From db168a398929a251f9c63b0270e93c4ee4ac0782 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:02:55 +0200 Subject: [PATCH 001/264] a minimal orchestrator is defined --- src/lib.rs | 1 + src/orch/mod.rs | 1 + src/orch/orchestrator.rs | 5 +++++ 3 files changed, 7 insertions(+) create mode 100644 src/orch/mod.rs create mode 100644 src/orch/orchestrator.rs diff --git a/src/lib.rs b/src/lib.rs index b20c2fb..3b1ce2f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -21,6 +21,7 @@ mod into_par_iter; /// Module for creating special iterators. pub mod iter; mod iter_into_par_iter; +pub mod orch; mod par_iter; mod par_iter_option; mod par_iter_result; diff --git a/src/orch/mod.rs b/src/orch/mod.rs new file mode 100644 index 0000000..9fe3d49 --- /dev/null +++ b/src/orch/mod.rs @@ -0,0 +1 @@ +mod orchestrator; diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs new file mode 100644 index 0000000..575e11a --- /dev/null +++ b/src/orch/orchestrator.rs @@ -0,0 +1,5 @@ +use crate::ParallelRunner; + +pub trait Orchestrator { + type Runner: ParallelRunner; +} From bcf8e78d702ba13d80cdbef387c47401d49a4d8d Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:04:29 +0200 Subject: [PATCH 002/264] std orchestrator is defined --- src/orch/implementations/mod.rs | 1 + src/orch/implementations/std_orchestrator.rs | 9 +++++++++ src/orch/mod.rs | 1 + 3 files changed, 11 insertions(+) create mode 100644 src/orch/implementations/mod.rs create mode 100644 src/orch/implementations/std_orchestrator.rs diff --git a/src/orch/implementations/mod.rs b/src/orch/implementations/mod.rs new file mode 100644 index 0000000..99d476f --- /dev/null +++ b/src/orch/implementations/mod.rs @@ -0,0 +1 @@ +mod std_orchestrator; diff --git a/src/orch/implementations/std_orchestrator.rs b/src/orch/implementations/std_orchestrator.rs new file mode 100644 index 0000000..8a2f37f --- /dev/null +++ b/src/orch/implementations/std_orchestrator.rs @@ -0,0 +1,9 @@ +use crate::ParallelRunner; +use std::marker::PhantomData; + +pub struct StdOrchestrator +where + R: ParallelRunner, +{ + r: PhantomData, +} diff --git a/src/orch/mod.rs b/src/orch/mod.rs index 9fe3d49..36629cf 100644 --- a/src/orch/mod.rs +++ b/src/orch/mod.rs @@ -1 +1,2 @@ +mod implementations; mod orchestrator; From 5a36d35767af0357f0d341012f78cfba1206bff3 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:05:55 +0200 Subject: [PATCH 003/264] default orchestrator is defined --- src/orch/implementations/mod.rs | 2 ++ src/orch/mod.rs | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/src/orch/implementations/mod.rs b/src/orch/implementations/mod.rs index 99d476f..8e679a7 100644 --- a/src/orch/implementations/mod.rs +++ b/src/orch/implementations/mod.rs @@ -1 +1,3 @@ mod std_orchestrator; + +pub use std_orchestrator::StdOrchestrator; diff --git a/src/orch/mod.rs b/src/orch/mod.rs index 36629cf..e3ceb07 100644 --- a/src/orch/mod.rs +++ b/src/orch/mod.rs @@ -1,2 +1,6 @@ mod implementations; mod orchestrator; + +pub use crate::orch::implementations::StdOrchestrator; + +pub type DefaultOrchestrator = StdOrchestrator; From c259936907d584bd0b2a405b891f4f2175171930 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:07:50 +0200 Subject: [PATCH 004/264] Orchestrator::new_runner is defined --- src/orch/orchestrator.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 575e11a..9c308db 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -1,5 +1,13 @@ -use crate::ParallelRunner; +use crate::{ParallelRunner, Params, runner::ComputationKind}; pub trait Orchestrator { type Runner: ParallelRunner; + + fn new_runner( + kind: ComputationKind, + params: Params, + initial_input_len: Option, + ) -> Self::Runner { + ::new(kind, params, initial_input_len) + } } From 9a5299c96a62b3b6af6ee92a1b6b03e1fe75ad49 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:21:55 +0200 Subject: [PATCH 005/264] runner to orchestrator - first refactoring --- src/computational_variants/map.rs | 16 ++++++++-------- src/computational_variants/par.rs | 18 +++++++++--------- src/computational_variants/xap.rs | 16 ++++++++-------- src/iter/special_iterators.rs | 4 ++-- src/lib.rs | 1 + src/orch/mod.rs | 2 +- src/par_iter.rs | 8 ++++---- src/par_iter_option.rs | 13 ++++++------- src/par_iter_result.rs | 14 ++++++-------- src/parallel_drainable.rs | 4 ++-- src/parallelizable.rs | 4 ++-- src/parallelizable_collection.rs | 4 ++-- src/parallelizable_collection_mut.rs | 5 +++-- 13 files changed, 54 insertions(+), 55 deletions(-) diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index 09fff92..554ef55 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -2,20 +2,20 @@ use super::xap::ParXap; use crate::ParIterResult; use crate::computational_variants::fallible_result::ParMapResult; use crate::generic_values::{Vector, WhilstAtom}; +use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsing, Params, computations::M, - runner::{DefaultRunner, ParallelRunner}, using::{UsingClone, UsingFun, computational_variants::UParMap}, }; use orx_concurrent_iter::ConcurrentIter; use std::marker::PhantomData; /// A parallel iterator that maps inputs. -pub struct ParMap +pub struct ParMap where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, { @@ -25,7 +25,7 @@ where impl ParMap where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, { @@ -43,7 +43,7 @@ where unsafe impl Send for ParMap where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, { @@ -51,7 +51,7 @@ where unsafe impl Sync for ParMap where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, { @@ -59,7 +59,7 @@ where impl ParIter for ParMap where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, { @@ -90,7 +90,7 @@ where self } - fn with_runner(self) -> impl ParIter { + fn with_runner(self) -> impl ParIter { let (params, iter, map) = self.destruct(); ParMap::new(params, iter, map) } diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index a158359..0ad86c2 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -1,11 +1,11 @@ use super::{map::ParMap, xap::ParXap}; use crate::computational_variants::fallible_result::ParResult; use crate::generic_values::{Vector, WhilstAtom}; +use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsing, Params, computations::{M, map_self}, - runner::{DefaultRunner, ParallelRunner}, using::{UsingClone, UsingFun, computational_variants::UPar}, }; use crate::{IntoParIter, ParIterResult}; @@ -14,9 +14,9 @@ use orx_concurrent_iter::{ConcurrentIter, ExactSizeConcurrentIter}; use std::marker::PhantomData; /// A parallel iterator. -pub struct Par +pub struct Par where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, { iter: I, @@ -26,7 +26,7 @@ where impl Par where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, { pub(crate) fn new(params: Params, iter: I) -> Self { @@ -49,21 +49,21 @@ where unsafe impl Send for Par where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, { } unsafe impl Sync for Par where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, { } impl ParIter for Par where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, { type Item = I::Item; @@ -93,7 +93,7 @@ where self } - fn with_runner(self) -> impl ParIter { + fn with_runner(self) -> impl ParIter { Par::new(self.params, self.iter) } @@ -206,7 +206,7 @@ where impl Par where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, { /// Creates a chain of this and `other` parallel iterators. diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index 12488f7..9e85591 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -2,11 +2,11 @@ use crate::ParIterResult; use crate::computational_variants::fallible_result::ParXapResult; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; +use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsing, Params, computations::X, - runner::{DefaultRunner, ParallelRunner}, using::{UsingClone, UsingFun, computational_variants::UParXap}, }; use orx_concurrent_iter::ConcurrentIter; @@ -15,9 +15,9 @@ use std::marker::PhantomData; /// A parallel iterator that xaps inputs. /// /// *xap* is a generalization of one-to-one map, filter-map and flat-map operations. -pub struct ParXap +pub struct ParXap where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, Vo: TransformableValues, M1: Fn(I::Item) -> Vo + Sync, @@ -28,7 +28,7 @@ where impl ParXap where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, Vo: TransformableValues, M1: Fn(I::Item) -> Vo + Sync, @@ -47,7 +47,7 @@ where unsafe impl Send for ParXap where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, Vo: TransformableValues, M1: Fn(I::Item) -> Vo + Sync, @@ -56,7 +56,7 @@ where unsafe impl Sync for ParXap where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, Vo: TransformableValues, M1: Fn(I::Item) -> Vo + Sync, @@ -65,7 +65,7 @@ where impl ParIter for ParXap where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, Vo: TransformableValues, M1: Fn(I::Item) -> Vo + Sync, @@ -97,7 +97,7 @@ where self } - fn with_runner(self) -> impl ParIter { + fn with_runner(self) -> impl ParIter { let (params, iter, map1) = self.destruct(); ParXap::new(params, iter, map1) } diff --git a/src/iter/special_iterators.rs b/src/iter/special_iterators.rs index 6d1753b..f1e4d22 100644 --- a/src/iter/special_iterators.rs +++ b/src/iter/special_iterators.rs @@ -1,8 +1,8 @@ -use crate::{computational_variants::Par, runner::DefaultRunner}; +use crate::{computational_variants::Par, orch::DefaultOrchestrator}; use orx_concurrent_iter::implementations::ConIterEmpty; /// An empty parallel iterator which does not yield any elements. -pub type ParEmpty = Par, R>; +pub type ParEmpty = Par, R>; /// Creates an empty parallel iterator which does not yield any elements. pub fn empty() -> ParEmpty { diff --git a/src/lib.rs b/src/lib.rs index 3b1ce2f..4d29fdd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -21,6 +21,7 @@ mod into_par_iter; /// Module for creating special iterators. pub mod iter; mod iter_into_par_iter; +/// Orchestrator for parallel execution and managing threads. pub mod orch; mod par_iter; mod par_iter_option; diff --git a/src/orch/mod.rs b/src/orch/mod.rs index e3ceb07..181e557 100644 --- a/src/orch/mod.rs +++ b/src/orch/mod.rs @@ -2,5 +2,5 @@ mod implementations; mod orchestrator; pub use crate::orch::implementations::StdOrchestrator; - +pub use orchestrator::Orchestrator; pub type DefaultOrchestrator = StdOrchestrator; diff --git a/src/par_iter.rs b/src/par_iter.rs index d709b94..b3b35d2 100644 --- a/src/par_iter.rs +++ b/src/par_iter.rs @@ -1,5 +1,6 @@ use crate::ParIterResult; use crate::computational_variants::fallible_option::ParOption; +use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_option::{IntoOption, ParIterOption}; use crate::par_iter_result::IntoResult; use crate::using::{UsingClone, UsingFun}; @@ -8,16 +9,15 @@ use crate::{ collect_into::ParCollectInto, computations::{map_clone, map_copy, map_count, reduce_sum, reduce_unit}, parameters::{ChunkSize, IterationOrder, NumThreads}, - runner::{DefaultRunner, ParallelRunner}, special_type_sets::Sum, }; use core::cmp::Ordering; use orx_concurrent_iter::ConcurrentIter; /// Parallel iterator. -pub trait ParIter: Sized + Send + Sync +pub trait ParIter: Sized + Send + Sync where - R: ParallelRunner, + R: Orchestrator, { /// Element type of the parallel iterator. type Item; @@ -262,7 +262,7 @@ where /// // uses the custom parallel runner MyParallelRunner: ParallelRunner /// let sum = inputs.par().with_runner::().sum(); /// ``` - fn with_runner(self) -> impl ParIter; + fn with_runner(self) -> impl ParIter; // using transformations diff --git a/src/par_iter_option.rs b/src/par_iter_option.rs index 42575d8..e579d2d 100644 --- a/src/par_iter_option.rs +++ b/src/par_iter_option.rs @@ -1,7 +1,6 @@ use crate::computations::{map_count, reduce_sum, reduce_unit}; -use crate::{ - ChunkSize, DefaultRunner, IterationOrder, NumThreads, ParCollectInto, ParallelRunner, Sum, -}; +use crate::orch::{DefaultOrchestrator, Orchestrator}; +use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, Sum}; use core::cmp::Ordering; /// A parallel iterator for which the computation either completely succeeds, @@ -122,9 +121,9 @@ use core::cmp::Ordering; /// ``` /// /// [`ParIter`]: crate::ParIter -pub trait ParIterOption +pub trait ParIterOption where - R: ParallelRunner, + R: Orchestrator, { /// Type of the success element, to be received as the Some variant iff the entire computation succeeds. type Item; @@ -157,10 +156,10 @@ where /// See [`IterationOrder`] and [`crate::ParIter::iteration_order`] for details. fn iteration_order(self, order: IterationOrder) -> Self; - /// Rather than the [`DefaultRunner`], uses the parallel runner `Q` which implements [`ParallelRunner`]. + /// Rather than the [`DefaultRunner`], uses the parallel runner `Q` which implements [`Orchestrator`]. /// /// See [`crate::ParIter::with_runner`] for details. - fn with_runner(self) -> impl ParIterOption; + fn with_runner(self) -> impl ParIterOption; // computation transformations diff --git a/src/par_iter_result.rs b/src/par_iter_result.rs index 64998af..8eddd1e 100644 --- a/src/par_iter_result.rs +++ b/src/par_iter_result.rs @@ -1,9 +1,7 @@ use crate::computations::{map_count, reduce_sum, reduce_unit}; +use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::{ChunkSize, IterationOrder, NumThreads, Sum}; -use crate::{ - DefaultRunner, ParCollectInto, ParIter, ParallelRunner, - generic_values::fallible_iterators::ResultOfIter, -}; +use crate::{ParCollectInto, ParIter, generic_values::fallible_iterators::ResultOfIter}; use core::cmp::Ordering; /// A parallel iterator for which the computation either completely succeeds, @@ -131,9 +129,9 @@ use core::cmp::Ordering; /// ``` /// /// [`ParIter`]: crate::ParIter -pub trait ParIterResult +pub trait ParIterResult where - R: ParallelRunner, + R: Orchestrator, { /// Type of the Ok element, to be received as the Ok variant iff the entire computation succeeds. type Item; @@ -199,10 +197,10 @@ where Self::from_regular_par(self.into_regular_par().iteration_order(order)) } - /// Rather than the [`DefaultRunner`], uses the parallel runner `Q` which implements [`ParallelRunner`]. + /// Rather than the [`DefaultRunner`], uses the parallel runner `Q` which implements [`Orchestrator`]. /// /// See [`ParIter::with_runner`] for details. - fn with_runner( + fn with_runner( self, ) -> impl ParIterResult; diff --git a/src/parallel_drainable.rs b/src/parallel_drainable.rs index e7b4244..23534ef 100644 --- a/src/parallel_drainable.rs +++ b/src/parallel_drainable.rs @@ -1,4 +1,4 @@ -use crate::{DefaultRunner, Params, computational_variants::Par}; +use crate::{Params, computational_variants::Par, orch::DefaultOrchestrator}; use orx_concurrent_iter::ConcurrentDrainableOverSlice; use std::ops::RangeBounds; @@ -46,7 +46,7 @@ pub trait ParallelDrainableOverSlice: ConcurrentDrainableOverSlice { fn par_drain( &mut self, range: R, - ) -> Par<::DrainingIter<'_>, DefaultRunner> + ) -> Par<::DrainingIter<'_>, DefaultOrchestrator> where R: RangeBounds, { diff --git a/src/parallelizable.rs b/src/parallelizable.rs index 69c93ad..8bcf1a8 100644 --- a/src/parallelizable.rs +++ b/src/parallelizable.rs @@ -1,4 +1,4 @@ -use crate::{computational_variants::Par, parameters::Params, runner::DefaultRunner}; +use crate::{computational_variants::Par, orch::DefaultOrchestrator, parameters::Params}; use orx_concurrent_iter::ConcurrentIterable; /// `Parallelizable` types are those from which parallel iterators can be created @@ -61,7 +61,7 @@ pub trait Parallelizable: ConcurrentIterable { /// assert_eq!(range.par().sum(), 10); /// assert_eq!(range.par().max(), Some(4)); /// ``` - fn par(&self) -> Par<::Iter, DefaultRunner> { + fn par(&self) -> Par<::Iter, DefaultOrchestrator> { Par::new(Params::default(), self.con_iter()) } } diff --git a/src/parallelizable_collection.rs b/src/parallelizable_collection.rs index f2304a3..5710458 100644 --- a/src/parallelizable_collection.rs +++ b/src/parallelizable_collection.rs @@ -1,4 +1,4 @@ -use crate::{Params, computational_variants::Par, runner::DefaultRunner}; +use crate::{Params, computational_variants::Par, orch::DefaultOrchestrator}; use orx_concurrent_iter::{ConcurrentCollection, ConcurrentIterable}; /// A type implementing [`ParallelizableCollection`] is a collection owning the elements such that @@ -75,7 +75,7 @@ pub trait ParallelizableCollection: ConcurrentCollection { &self, ) -> Par< <::Iterable<'_> as ConcurrentIterable>::Iter, - DefaultRunner, + DefaultOrchestrator, > { Par::new(Params::default(), self.con_iter()) } diff --git a/src/parallelizable_collection_mut.rs b/src/parallelizable_collection_mut.rs index d0d103e..162c676 100644 --- a/src/parallelizable_collection_mut.rs +++ b/src/parallelizable_collection_mut.rs @@ -1,5 +1,6 @@ use crate::{ - DefaultRunner, ParIter, ParallelizableCollection, Params, computational_variants::Par, + ParIter, ParallelizableCollection, Params, computational_variants::Par, + orch::DefaultOrchestrator, }; use orx_concurrent_iter::ConcurrentCollectionMut; @@ -59,7 +60,7 @@ pub trait ParallelizableCollectionMut: ConcurrentCollectionMut + ParallelizableC /// /// assert_eq!(&vec, &[1, 2, 13, 14]); /// ``` - fn par_mut(&mut self) -> impl ParIter { + fn par_mut(&mut self) -> impl ParIter { Par::new(Params::default(), self.con_iter_mut()) } } From 257010b21f03e2656d4edbaff9a396be7ffdfe0c Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:22:37 +0200 Subject: [PATCH 006/264] impl orchestrator for std orchestrator --- src/orch/implementations/std_orchestrator.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/orch/implementations/std_orchestrator.rs b/src/orch/implementations/std_orchestrator.rs index 8a2f37f..b08d2cf 100644 --- a/src/orch/implementations/std_orchestrator.rs +++ b/src/orch/implementations/std_orchestrator.rs @@ -1,4 +1,4 @@ -use crate::ParallelRunner; +use crate::{ParallelRunner, orch::Orchestrator}; use std::marker::PhantomData; pub struct StdOrchestrator @@ -7,3 +7,10 @@ where { r: PhantomData, } + +impl Orchestrator for StdOrchestrator +where + R: ParallelRunner, +{ + type Runner = R; +} From aea3d9550770ad5e8c08de0d56ffab604f20758d Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:23:45 +0200 Subject: [PATCH 007/264] clean up xap-filter-xap --- .../u_xap_filter_xap.rs | 223 ------------------ 1 file changed, 223 deletions(-) delete mode 100644 src/using/computational_variants/u_xap_filter_xap.rs diff --git a/src/using/computational_variants/u_xap_filter_xap.rs b/src/using/computational_variants/u_xap_filter_xap.rs deleted file mode 100644 index de87f63..0000000 --- a/src/using/computational_variants/u_xap_filter_xap.rs +++ /dev/null @@ -1,223 +0,0 @@ -use crate::{ - ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, - computations::{Values, Vector}, - runner::{DefaultRunner, ParallelRunner}, - using::Using, - using::computations::UXfx, - using::u_par_iter::ParIterUsing, -}; -use orx_concurrent_iter::ConcurrentIter; -use std::marker::PhantomData; - -/// A parallel iterator that xaps, then filters and finally xaps again. -/// -/// *xap* is a generalization of one-to-one map, filter-map and flat-map operations. -pub struct UParXapFilterXap -where - R: ParallelRunner, - U: Using, - I: ConcurrentIter, - Vt: Values, - Vo: Values, - M1: Fn(&mut U::Item, I::Item) -> Vt + Sync, - F: Fn(&mut U::Item, &Vt::Item) -> bool + Sync, - M2: Fn(&mut U::Item, Vt::Item) -> Vo + Sync, -{ - u_xfx: UXfx, - phantom: PhantomData, -} - -impl UParXapFilterXap -where - R: ParallelRunner, - U: Using, - I: ConcurrentIter, - Vt: Values, - Vo: Values, - M1: Fn(&mut U::Item, I::Item) -> Vt + Sync, - F: Fn(&mut U::Item, &Vt::Item) -> bool + Sync, - M2: Fn(&mut U::Item, Vt::Item) -> Vo + Sync, -{ - pub(crate) fn new(using: U, params: Params, iter: I, x1: M1, f: F, x2: M2) -> Self { - Self { - u_xfx: UXfx::new(using, params, iter, x1, f, x2), - phantom: PhantomData, - } - } - - fn destruct(self) -> (U, Params, I, M1, F, M2) { - self.u_xfx.destruct() - } -} - -unsafe impl Send for UParXapFilterXap -where - R: ParallelRunner, - U: Using, - I: ConcurrentIter, - Vt: Values, - Vo: Values, - M1: Fn(&mut U::Item, I::Item) -> Vt + Sync, - F: Fn(&mut U::Item, &Vt::Item) -> bool + Sync, - M2: Fn(&mut U::Item, Vt::Item) -> Vo + Sync, -{ -} - -unsafe impl Sync for UParXapFilterXap -where - R: ParallelRunner, - U: Using, - I: ConcurrentIter, - Vt: Values, - Vo: Values, - M1: Fn(&mut U::Item, I::Item) -> Vt + Sync, - F: Fn(&mut U::Item, &Vt::Item) -> bool + Sync, - M2: Fn(&mut U::Item, Vt::Item) -> Vo + Sync, -{ -} - -impl ParIterUsing for UParXapFilterXap -where - R: ParallelRunner, - U: Using, - I: ConcurrentIter, - Vt: Values, - Vo: Values, - M1: Fn(&mut U::Item, I::Item) -> Vt + Sync, - F: Fn(&mut U::Item, &Vt::Item) -> bool + Sync, - M2: Fn(&mut U::Item, Vt::Item) -> Vo + Sync, -{ - type Item = Vo::Item; - - fn con_iter(&self) -> &impl ConcurrentIter { - self.u_xfx.iter() - } - - fn params(&self) -> Params { - self.u_xfx.params() - } - - // params transformations - - fn num_threads(mut self, num_threads: impl Into) -> Self { - self.u_xfx.num_threads(num_threads); - self - } - - fn chunk_size(mut self, chunk_size: impl Into) -> Self { - self.u_xfx.chunk_size(chunk_size); - self - } - - fn iteration_order(mut self, collect: IterationOrder) -> Self { - self.u_xfx.iteration_order(collect); - self - } - - fn with_runner(self) -> impl ParIterUsing { - let (using, params, iter, map1, filter, map2) = self.destruct(); - UParXapFilterXap::new(using, params, iter, map1, filter, map2) - } - - // computation transformations - - fn map(self, map: Map) -> impl ParIterUsing - where - Map: Fn(&mut U::Item, Self::Item) -> Out + Sync + Clone, - { - let (using, params, iter, x1, f, x2) = self.destruct(); - let x2 = move |u: &mut U::Item, t: Vt::Item| { - // TODO: avoid allocation - let vo: Vec<_> = x2(u, t).values().into_iter().map(|x| map(u, x)).collect(); - Vector(vo) - }; - - UParXapFilterXap::new(using, params, iter, x1, f, x2) - } - - fn filter(self, filter: Filter) -> impl ParIterUsing - where - Filter: Fn(&mut U::Item, &Self::Item) -> bool + Sync + Clone, - { - let (using, params, iter, x1, f, x2) = self.destruct(); - let x2 = move |u: &mut U::Item, t: Vt::Item| { - // TODO: avoid allocation - let vo: Vec<_> = x2(u, t) - .values() - .into_iter() - .filter(|x| filter(u, x)) - .collect(); - Vector(vo) - }; - - UParXapFilterXap::new(using, params, iter, x1, f, x2) - } - - fn flat_map( - self, - flat_map: FlatMap, - ) -> impl ParIterUsing - where - IOut: IntoIterator, - FlatMap: Fn(&mut U::Item, Self::Item) -> IOut + Sync + Clone, - { - let (using, params, iter, x1, f, x2) = self.destruct(); - let x2 = move |u: &mut U::Item, t: Vt::Item| { - // TODO: avoid allocation - let vo: Vec<_> = x2(u, t).values().into_iter().collect(); - let vo: Vec<_> = vo.into_iter().flat_map(|x| flat_map(u, x)).collect(); - Vector(vo) - }; - - UParXapFilterXap::new(using, params, iter, x1, f, x2) - } - - fn filter_map( - self, - filter_map: FilterMap, - ) -> impl ParIterUsing - where - FilterMap: Fn(&mut U::Item, Self::Item) -> Option + Sync + Clone, - { - let (using, params, iter, x1, f, x2) = self.destruct(); - let x2 = move |u: &mut U::Item, t: Vt::Item| { - // TODO: avoid allocation - let vo: Vec<_> = x2(u, t).values().into_iter().collect(); - let vo: Vec<_> = vo.into_iter().filter_map(|x| filter_map(u, x)).collect(); - Vector(vo) - }; - - UParXapFilterXap::new(using, params, iter, x1, f, x2) - } - - // collect - - fn collect_into(self, output: C) -> C - where - C: ParCollectInto, - { - output.u_xfx_collect_into::(self.u_xfx) - } - - // reduce - - fn reduce(self, reduce: Reduce) -> Option - where - Self::Item: Send, - Reduce: Fn(&mut U::Item, Self::Item, Self::Item) -> Self::Item + Sync, - { - self.u_xfx.reduce::(reduce).1 - } - - // early exit - - fn first(self) -> Option - where - Self::Item: Send, - { - match self.params().iteration_order { - IterationOrder::Ordered => self.u_xfx.next::().1, - IterationOrder::Arbitrary => self.u_xfx.next_any::().1, - } - } -} From 13aece45b732e8d5aa56c4ae7560b5f23adfcdd5 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:24:59 +0200 Subject: [PATCH 008/264] using refactoring for orchestration --- src/using/computational_variants/u_map.rs | 21 +++++++++++---------- src/using/computational_variants/u_par.rs | 18 +++++++++--------- src/using/computational_variants/u_xap.rs | 16 ++++++++-------- src/using/u_par_iter.rs | 12 ++++++------ 4 files changed, 34 insertions(+), 33 deletions(-) diff --git a/src/using/computational_variants/u_map.rs b/src/using/computational_variants/u_map.rs index ae099e4..06682e0 100644 --- a/src/using/computational_variants/u_map.rs +++ b/src/using/computational_variants/u_map.rs @@ -1,17 +1,18 @@ use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, generic_values::Vector, - runner::{DefaultRunner, ParallelRunner}, - using::u_par_iter::ParIterUsing, - using::{Using, computational_variants::u_xap::UParXap, computations::UM}, + orch::{DefaultOrchestrator, Orchestrator}, + using::{ + Using, computational_variants::u_xap::UParXap, computations::UM, u_par_iter::ParIterUsing, + }, }; use orx_concurrent_iter::ConcurrentIter; use std::marker::PhantomData; /// A parallel iterator that maps inputs. -pub struct UParMap +pub struct UParMap where - R: ParallelRunner, + R: Orchestrator, U: Using, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, @@ -22,7 +23,7 @@ where impl UParMap where - R: ParallelRunner, + R: Orchestrator, U: Using, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, @@ -41,7 +42,7 @@ where unsafe impl Send for UParMap where - R: ParallelRunner, + R: Orchestrator, U: Using, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, @@ -50,7 +51,7 @@ where unsafe impl Sync for UParMap where - R: ParallelRunner, + R: Orchestrator, U: Using, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, @@ -59,7 +60,7 @@ where impl ParIterUsing for UParMap where - R: ParallelRunner, + R: Orchestrator, U: Using, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, @@ -91,7 +92,7 @@ where self } - fn with_runner(self) -> impl ParIterUsing { + fn with_runner(self) -> impl ParIterUsing { let (using, params, iter, map) = self.destruct(); UParMap::new(using, params, iter, map) } diff --git a/src/using/computational_variants/u_par.rs b/src/using/computational_variants/u_par.rs index bdb0dfc..7d3b8e0 100644 --- a/src/using/computational_variants/u_par.rs +++ b/src/using/computational_variants/u_par.rs @@ -1,22 +1,22 @@ use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, generic_values::Vector, - runner::{DefaultRunner, ParallelRunner}, - using::u_par_iter::ParIterUsing, + orch::{DefaultOrchestrator, Orchestrator}, using::{ Using, computational_variants::{u_map::UParMap, u_xap::UParXap}, computations::{UM, u_map_self}, + u_par_iter::ParIterUsing, }, }; use orx_concurrent_iter::ConcurrentIter; use std::marker::PhantomData; /// A parallel iterator. -pub struct UPar +pub struct UPar where U: Using, - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, { using: U, @@ -28,7 +28,7 @@ where impl UPar where U: Using, - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, { pub(crate) fn new(using: U, params: Params, iter: I) -> Self { @@ -54,7 +54,7 @@ where unsafe impl Send for UPar where U: Using, - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, { } @@ -62,7 +62,7 @@ where unsafe impl Sync for UPar where U: Using, - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, { } @@ -70,7 +70,7 @@ where impl ParIterUsing for UPar where U: Using, - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, { type Item = I::Item; @@ -100,7 +100,7 @@ where self } - fn with_runner(self) -> impl ParIterUsing { + fn with_runner(self) -> impl ParIterUsing { UPar::new(self.using, self.params, self.iter) } diff --git a/src/using/computational_variants/u_xap.rs b/src/using/computational_variants/u_xap.rs index cc10d4c..9fe335d 100644 --- a/src/using/computational_variants/u_xap.rs +++ b/src/using/computational_variants/u_xap.rs @@ -1,7 +1,7 @@ use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, generic_values::{TransformableValues, runner_results::Infallible}, - runner::{DefaultRunner, ParallelRunner}, + orch::{DefaultOrchestrator, Orchestrator}, using::{Using, computations::UX, u_par_iter::ParIterUsing}, }; use orx_concurrent_iter::ConcurrentIter; @@ -10,9 +10,9 @@ use std::marker::PhantomData; /// A parallel iterator that xaps inputs. /// /// *xap* is a generalization of one-to-one map, filter-map and flat-map operations. -pub struct UParXap +pub struct UParXap where - R: ParallelRunner, + R: Orchestrator, U: Using, I: ConcurrentIter, Vo: TransformableValues, @@ -24,7 +24,7 @@ where impl UParXap where - R: ParallelRunner, + R: Orchestrator, U: Using, I: ConcurrentIter, Vo: TransformableValues, @@ -44,7 +44,7 @@ where unsafe impl Send for UParXap where - R: ParallelRunner, + R: Orchestrator, U: Using, I: ConcurrentIter, Vo: TransformableValues, @@ -54,7 +54,7 @@ where unsafe impl Sync for UParXap where - R: ParallelRunner, + R: Orchestrator, U: Using, I: ConcurrentIter, Vo: TransformableValues, @@ -64,7 +64,7 @@ where impl ParIterUsing for UParXap where - R: ParallelRunner, + R: Orchestrator, U: Using, I: ConcurrentIter, Vo: TransformableValues, @@ -97,7 +97,7 @@ where self } - fn with_runner(self) -> impl ParIterUsing { + fn with_runner(self) -> impl ParIterUsing { let (using, params, iter, map1) = self.destruct(); UParXap::new(using, params, iter, map1) } diff --git a/src/using/u_par_iter.rs b/src/using/u_par_iter.rs index 315a929..bc555b2 100644 --- a/src/using/u_par_iter.rs +++ b/src/using/u_par_iter.rs @@ -1,6 +1,6 @@ use crate::{ - ChunkSize, DefaultRunner, IterationOrder, NumThreads, ParCollectInto, ParallelRunner, Params, - Sum, + ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, Sum, + orch::{DefaultOrchestrator, Orchestrator}, using::{ Using, computations::{u_map_clone, u_map_copy, u_map_count, u_reduce_sum, u_reduce_unit}, @@ -12,9 +12,9 @@ use orx_concurrent_iter::ConcurrentIter; /// Parallel iterator which allows mutable access to a variable of type `U` within its iterator methods. /// /// Note that one variable will be created per thread used by the parallel computation. -pub trait ParIterUsing: Sized + Send + Sync +pub trait ParIterUsing: Sized + Send + Sync where - R: ParallelRunner, + R: Orchestrator, U: Using, { /// Element type of the parallel iterator. @@ -58,10 +58,10 @@ where /// See [crate::ParIter::iteration_order] for details. fn iteration_order(self, collect: IterationOrder) -> Self; - /// Rather than the [`DefaultRunner`], uses the parallel runner `Q` which implements [`ParallelRunner`]. + /// Rather than the [`DefaultOrchestrator`], uses the parallel runner `Q` which implements [`Orchestrator`]. /// /// See [crate::ParIter::with_runner] for details. - fn with_runner(self) -> impl ParIterUsing; + fn with_runner(self) -> impl ParIterUsing; // computation transformations From e42dcf792442c16378c4f74afec2177f112c4f29 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:28:24 +0200 Subject: [PATCH 009/264] further orchestrator refactoring --- src/computational_variants/fallible_option.rs | 14 +++++++------- .../fallible_result/map_result.rs | 12 ++++++------ .../fallible_result/par_result.rs | 12 ++++++------ .../fallible_result/xap_result.rs | 12 ++++++------ src/into_par_iter.rs | 8 ++++---- src/iter_into_par_iter.rs | 6 +++--- 6 files changed, 32 insertions(+), 32 deletions(-) diff --git a/src/computational_variants/fallible_option.rs b/src/computational_variants/fallible_option.rs index 5cec087..521dc48 100644 --- a/src/computational_variants/fallible_option.rs +++ b/src/computational_variants/fallible_option.rs @@ -1,13 +1,13 @@ use crate::{ - ChunkSize, DefaultRunner, IterationOrder, NumThreads, ParCollectInto, ParIterResult, - ParallelRunner, + ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIterResult, + orch::{DefaultOrchestrator, Orchestrator}, par_iter_option::{ParIterOption, ResultIntoOption}, }; use std::marker::PhantomData; -pub struct ParOption +pub struct ParOption where - R: ParallelRunner, + R: Orchestrator, F: ParIterResult, { par: F, @@ -16,7 +16,7 @@ where impl ParOption where - R: ParallelRunner, + R: Orchestrator, F: ParIterResult, { pub(crate) fn new(par: F) -> Self { @@ -29,7 +29,7 @@ where impl ParIterOption for ParOption where - R: ParallelRunner, + R: Orchestrator, F: ParIterResult, { type Item = T; @@ -48,7 +48,7 @@ where Self::new(self.par.iteration_order(order)) } - fn with_runner(self) -> impl ParIterOption { + fn with_runner(self) -> impl ParIterOption { ParOption::new(self.par.with_runner()) } diff --git a/src/computational_variants/fallible_result/map_result.rs b/src/computational_variants/fallible_result/map_result.rs index 1b6e351..d7c7e00 100644 --- a/src/computational_variants/fallible_result/map_result.rs +++ b/src/computational_variants/fallible_result/map_result.rs @@ -1,14 +1,14 @@ use crate::computational_variants::ParMap; use crate::computations::X; +use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; -use crate::runner::{DefaultRunner, ParallelRunner}; use crate::{IterationOrder, ParCollectInto, ParIter}; use orx_concurrent_iter::ConcurrentIter; use std::marker::PhantomData; -pub struct ParMapResult +pub struct ParMapResult where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, O: IntoResult, M1: Fn(I::Item) -> O + Sync, @@ -19,7 +19,7 @@ where impl ParMapResult where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, O: IntoResult, M1: Fn(I::Item) -> O + Sync, @@ -34,7 +34,7 @@ where impl ParIterResult for ParMapResult where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, O: IntoResult, M1: Fn(I::Item) -> O + Sync, @@ -64,7 +64,7 @@ where // params transformations - fn with_runner( + fn with_runner( self, ) -> impl ParIterResult { let (params, iter, m1) = self.par.destruct(); diff --git a/src/computational_variants/fallible_result/par_result.rs b/src/computational_variants/fallible_result/par_result.rs index 3af9357..27b0e48 100644 --- a/src/computational_variants/fallible_result/par_result.rs +++ b/src/computational_variants/fallible_result/par_result.rs @@ -1,14 +1,14 @@ use crate::computational_variants::Par; use crate::computations::X; +use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; -use crate::runner::{DefaultRunner, ParallelRunner}; use crate::{IterationOrder, ParCollectInto, ParIter}; use orx_concurrent_iter::ConcurrentIter; use std::marker::PhantomData; -pub struct ParResult +pub struct ParResult where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, I::Item: IntoResult, { @@ -18,7 +18,7 @@ where impl ParResult where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, I::Item: IntoResult, { @@ -32,7 +32,7 @@ where impl ParIterResult for ParResult where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, I::Item: IntoResult, { @@ -61,7 +61,7 @@ where // params transformations - fn with_runner( + fn with_runner( self, ) -> impl ParIterResult { let (params, iter) = self.par.destruct(); diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index 9ec7d15..d798cee 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -2,15 +2,15 @@ use crate::computational_variants::ParXap; use crate::computations::X; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; +use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; -use crate::runner::{DefaultRunner, ParallelRunner}; use crate::{IterationOrder, ParCollectInto, ParIter}; use orx_concurrent_iter::ConcurrentIter; use std::marker::PhantomData; -pub struct ParXapResult +pub struct ParXapResult where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, Vo: TransformableValues, Vo::Item: IntoResult, @@ -22,7 +22,7 @@ where impl ParXapResult where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, Vo: TransformableValues, Vo::Item: IntoResult, @@ -38,7 +38,7 @@ where impl ParIterResult for ParXapResult where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, Vo: TransformableValues, Vo::Item: IntoResult, @@ -69,7 +69,7 @@ where // params transformations - fn with_runner( + fn with_runner( self, ) -> impl ParIterResult { let (params, iter, m1) = self.par.destruct(); diff --git a/src/into_par_iter.rs b/src/into_par_iter.rs index 441eb81..11d368b 100644 --- a/src/into_par_iter.rs +++ b/src/into_par_iter.rs @@ -1,4 +1,4 @@ -use crate::{Params, computational_variants::Par, runner::DefaultRunner}; +use crate::{Params, computational_variants::Par, orch::DefaultOrchestrator}; use orx_concurrent_iter::{ConcurrentIter, IntoConcurrentIter}; /// Trait to convert a source (collection or generator) into a parallel iterator; i.e., [`ParIter`], @@ -47,19 +47,19 @@ pub trait IntoParIter: IntoConcurrentIter { /// let range = 1..5; /// assert_eq!(range.into_par().max(), Some(4)); /// ``` - fn into_par(self) -> Par; + fn into_par(self) -> Par; } impl IntoParIter for I where I: IntoConcurrentIter, { - fn into_par(self) -> Par { + fn into_par(self) -> Par { Par::new(Params::default(), self.into_con_iter()) } } -impl IntoConcurrentIter for Par { +impl IntoConcurrentIter for Par { type Item = I::Item; type IntoIter = I; diff --git a/src/iter_into_par_iter.rs b/src/iter_into_par_iter.rs index 03724d4..228d882 100644 --- a/src/iter_into_par_iter.rs +++ b/src/iter_into_par_iter.rs @@ -1,4 +1,4 @@ -use crate::{Params, computational_variants::Par, runner::DefaultRunner}; +use crate::{Params, computational_variants::Par, orch::DefaultOrchestrator}; use orx_concurrent_iter::{IterIntoConcurrentIter, implementations::ConIterOfIter}; /// Any regular iterator implements [`IterIntoParIter`] trait allowing them to be used @@ -116,7 +116,7 @@ pub trait IterIntoParIter: Iterator { /// /// assert_eq!(sum_evens, 3782); /// ``` - fn iter_into_par(self) -> Par, DefaultRunner> + fn iter_into_par(self) -> Par, DefaultOrchestrator> where Self: Sized, Self::Item: Send; @@ -127,7 +127,7 @@ where I: Iterator, I::Item: Send + Sync, { - fn iter_into_par(self) -> Par, DefaultRunner> { + fn iter_into_par(self) -> Par, DefaultOrchestrator> { Par::new(Params::default(), self.iter_into_con_iter()) } } From 10e4e80416dcb164dc6f39a7cd5720808bb6b718 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:37:43 +0200 Subject: [PATCH 010/264] orchestrator is added as a field to regular parallel iteators --- src/computational_variants/map.rs | 31 +++++++++++++++++-------------- src/computational_variants/par.rs | 30 ++++++++++++++++-------------- src/computational_variants/xap.rs | 29 ++++++++++++++++------------- src/par_iter.rs | 2 +- 4 files changed, 50 insertions(+), 42 deletions(-) diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index 554ef55..8a1dfa2 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -19,6 +19,7 @@ where I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, { + orchestrator: R, m: M, phantom: PhantomData, } @@ -29,15 +30,17 @@ where I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, { - pub(crate) fn new(params: Params, iter: I, m1: M1) -> Self { + pub(crate) fn new(orchestrator: R, params: Params, iter: I, m1: M1) -> Self { Self { + orchestrator, m: M::new(params, iter, m1), phantom: PhantomData, } } - pub(crate) fn destruct(self) -> (Params, I, M1) { - self.m.destruct() + pub(crate) fn destruct(self) -> (R, Params, I, M1) { + let (params, iter, m1) = self.m.destruct(); + (self.orchestrator, params, iter, m1) } } @@ -90,9 +93,9 @@ where self } - fn with_runner(self) -> impl ParIter { - let (params, iter, map) = self.destruct(); - ParMap::new(params, iter, map) + fn with_runner(self, orchestrator: Q) -> impl ParIter { + let (_, params, iter, map) = self.destruct(); + ParMap::new(self.orchestrator, params, iter, map) } // using transformations @@ -106,7 +109,7 @@ where F: FnMut(usize) -> U, { let using = UsingFun::new(using); - let (params, iter, m1) = self.destruct(); + let (orchestrator, params, iter, m1) = self.destruct(); let m1 = move |_: &mut U, t: I::Item| m1(t); UParMap::new(using, params, iter, m1) } @@ -119,7 +122,7 @@ where U: Clone + Send + 'static, { let using = UsingClone::new(using); - let (params, iter, m1) = self.destruct(); + let (orchestrator, params, iter, m1) = self.destruct(); let m1 = move |_: &mut U, t: I::Item| m1(t); UParMap::new(using, params, iter, m1) } @@ -130,16 +133,16 @@ where where Map: Fn(Self::Item) -> Out + Sync, { - let (params, iter, m1) = self.destruct(); + let (orchestrator, params, iter, m1) = self.destruct(); let m1 = move |x| map(m1(x)); - ParMap::new(params, iter, m1) + ParMap::new(orchestrator, params, iter, m1) } fn filter(self, filter: Filter) -> impl ParIter where Filter: Fn(&Self::Item) -> bool + Sync, { - let (params, iter, m1) = self.destruct(); + let (orchestrator, params, iter, m1) = self.destruct(); let x1 = move |i: I::Item| { let value = m1(i); @@ -153,7 +156,7 @@ where IOut: IntoIterator, FlatMap: Fn(Self::Item) -> IOut + Sync, { - let (params, iter, m1) = self.destruct(); + let (orchestrator, params, iter, m1) = self.destruct(); let x1 = move |i: I::Item| Vector(flat_map(m1(i))); ParXap::new(params, iter, x1) } @@ -162,7 +165,7 @@ where where FilterMap: Fn(Self::Item) -> Option + Sync, { - let (params, iter, m1) = self.destruct(); + let (orchestrator, params, iter, m1) = self.destruct(); let x1 = move |i: I::Item| filter_map(m1(i)); ParXap::new(params, iter, x1) } @@ -171,7 +174,7 @@ where where While: Fn(&Self::Item) -> bool + Sync, { - let (params, iter, m1) = self.destruct(); + let (orchestrator, params, iter, m1) = self.destruct(); let x1 = move |value: I::Item| WhilstAtom::new(m1(value), &take_while); ParXap::new(params, iter, x1) } diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 0ad86c2..246fbb4 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -19,8 +19,9 @@ where R: Orchestrator, I: ConcurrentIter, { - iter: I, + orchestrator: R, params: Params, + iter: I, phantom: PhantomData, } @@ -29,20 +30,21 @@ where R: Orchestrator, I: ConcurrentIter, { - pub(crate) fn new(params: Params, iter: I) -> Self { + pub(crate) fn new(orchestrator: R, params: Params, iter: I) -> Self { Self { + orchestrator, iter, params, phantom: PhantomData, } } - pub(crate) fn destruct(self) -> (Params, I) { - (self.params, self.iter) + pub(crate) fn destruct(self) -> (R, Params, I) { + (self.orchestrator, self.params, self.iter) } fn m(self) -> M I::Item> { - let (params, iter) = self.destruct(); + let (orchestrator, params, iter) = self.destruct(); M::new(params, iter, map_self) } } @@ -93,8 +95,8 @@ where self } - fn with_runner(self) -> impl ParIter { - Par::new(self.params, self.iter) + fn with_runner(self, orchestrator: Q) -> impl ParIter { + Par::new(orchestrator, self.params, self.iter) } // using transformations @@ -128,15 +130,15 @@ where where Map: Fn(Self::Item) -> Out + Sync, { - let (params, iter) = self.destruct(); - ParMap::new(params, iter, map) + let (orchestrator, params, iter) = self.destruct(); + ParMap::new(orchestrator, params, iter, map) } fn filter(self, filter: Filter) -> impl ParIter where Filter: Fn(&Self::Item) -> bool + Sync, { - let (params, iter) = self.destruct(); + let (orchestrator, params, iter) = self.destruct(); let x1 = move |i: Self::Item| filter(&i).then_some(i); ParXap::new(params, iter, x1) } @@ -146,7 +148,7 @@ where IOut: IntoIterator, FlatMap: Fn(Self::Item) -> IOut + Sync, { - let (params, iter) = self.destruct(); + let (orchestrator, params, iter) = self.destruct(); let x1 = move |i: Self::Item| Vector(flat_map(i)); // TODO: inline ParXap::new(params, iter, x1) } @@ -155,7 +157,7 @@ where where FilterMap: Fn(Self::Item) -> Option + Sync, { - let (params, iter) = self.destruct(); + let (orchestrator, params, iter) = self.destruct(); ParXap::new(params, iter, filter_map) } @@ -163,7 +165,7 @@ where where While: Fn(&Self::Item) -> bool + Sync, { - let (params, iter) = self.destruct(); + let (orchestrator, params, iter) = self.destruct(); let x1 = move |value: Self::Item| WhilstAtom::new(value, &take_while); ParXap::new(params, iter, x1) } @@ -232,7 +234,7 @@ where I: ExactSizeConcurrentIter, C: IntoParIter, { - let (params, iter) = self.destruct(); + let (orchestrator, params, iter) = self.destruct(); let iter = iter.chain(other.into_con_iter()); Par::new(params, iter) } diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index 9e85591..412f64b 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -22,6 +22,7 @@ where Vo: TransformableValues, M1: Fn(I::Item) -> Vo + Sync, { + orchestrator: R, x: X, phantom: PhantomData, } @@ -33,15 +34,17 @@ where Vo: TransformableValues, M1: Fn(I::Item) -> Vo + Sync, { - pub(crate) fn new(params: Params, iter: I, x1: M1) -> Self { + pub(crate) fn new(orchestrator: R, params: Params, iter: I, x1: M1) -> Self { Self { + orchestrator, x: X::new(params, iter, x1), phantom: PhantomData, } } - pub(crate) fn destruct(self) -> (Params, I, M1) { - self.x.destruct() + pub(crate) fn destruct(self) -> (R, Params, I, M1) { + let (params, iter, x1) = self.x.destruct(); + (self.orchestrator, params, iter, x1) } } @@ -97,9 +100,9 @@ where self } - fn with_runner(self) -> impl ParIter { - let (params, iter, map1) = self.destruct(); - ParXap::new(params, iter, map1) + fn with_runner(self, orchestrator: Q) -> impl ParIter { + let (_, params, iter, x1) = self.destruct(); + ParXap::new(orchestrator, params, iter, x1) } // using transformations @@ -113,7 +116,7 @@ where F: FnMut(usize) -> U, { let using = UsingFun::new(using); - let (params, iter, x1) = self.destruct(); + let (orchestrator, params, iter, x1) = self.destruct(); let m1 = move |_: &mut U, t: I::Item| x1(t); UParXap::new(using, params, iter, m1) } @@ -126,7 +129,7 @@ where U: Clone + Send + 'static, { let using = UsingClone::new(using); - let (params, iter, x1) = self.destruct(); + let (orchestrator, params, iter, x1) = self.destruct(); let m1 = move |_: &mut U, t: I::Item| x1(t); UParXap::new(using, params, iter, m1) } @@ -137,7 +140,7 @@ where where Map: Fn(Self::Item) -> Out + Sync + Clone, { - let (params, iter, x1) = self.destruct(); + let (orchestrator, params, iter, x1) = self.destruct(); let x1 = move |i: I::Item| { let vo = x1(i); vo.map(map.clone()) @@ -150,7 +153,7 @@ where where Filter: Fn(&Self::Item) -> bool + Sync + Clone, { - let (params, iter, x1) = self.destruct(); + let (orchestrator, params, iter, x1) = self.destruct(); let x1 = move |i: I::Item| { let values = x1(i); values.filter(filter.clone()) @@ -163,7 +166,7 @@ where IOut: IntoIterator, FlatMap: Fn(Self::Item) -> IOut + Sync + Clone, { - let (params, iter, x1) = self.destruct(); + let (orchestrator, params, iter, x1) = self.destruct(); let x1 = move |i: I::Item| { let vo = x1(i); vo.flat_map(flat_map.clone()) @@ -175,7 +178,7 @@ where where FilterMap: Fn(Self::Item) -> Option + Sync + Clone, { - let (params, iter, x1) = self.destruct(); + let (orchestrator, params, iter, x1) = self.destruct(); let x1 = move |i: I::Item| { let vo = x1(i); vo.filter_map(filter_map.clone()) @@ -187,7 +190,7 @@ where where While: Fn(&Self::Item) -> bool + Sync + Clone, { - let (params, iter, x1) = self.destruct(); + let (orchestrator, params, iter, x1) = self.destruct(); let x1 = move |i: I::Item| { let vo = x1(i); vo.whilst(take_while.clone()) diff --git a/src/par_iter.rs b/src/par_iter.rs index b3b35d2..c42b5d0 100644 --- a/src/par_iter.rs +++ b/src/par_iter.rs @@ -262,7 +262,7 @@ where /// // uses the custom parallel runner MyParallelRunner: ParallelRunner /// let sum = inputs.par().with_runner::().sum(); /// ``` - fn with_runner(self) -> impl ParIter; + fn with_runner(self, orchestrator: Q) -> impl ParIter; // using transformations From 67a0839ea09ee242eccf9ee2ed9ee7579db82bc4 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:42:20 +0200 Subject: [PATCH 011/264] orchestrator is added to fallible iterator fields --- .../fallible_result/map_result.rs | 11 ++++++----- .../fallible_result/par_result.rs | 11 ++++++----- .../fallible_result/xap_result.rs | 11 ++++++----- src/par_iter_result.rs | 1 + 4 files changed, 19 insertions(+), 15 deletions(-) diff --git a/src/computational_variants/fallible_result/map_result.rs b/src/computational_variants/fallible_result/map_result.rs index d7c7e00..b149411 100644 --- a/src/computational_variants/fallible_result/map_result.rs +++ b/src/computational_variants/fallible_result/map_result.rs @@ -66,10 +66,11 @@ where fn with_runner( self, + orchestrator: Q, ) -> impl ParIterResult { - let (params, iter, m1) = self.par.destruct(); + let (_, params, iter, m1) = self.par.destruct(); ParMapResult { - par: ParMap::new(params, iter, m1), + par: ParMap::new(orchestrator, params, iter, m1), phantom: PhantomData, } } @@ -82,7 +83,7 @@ where Self::Item: Send, Self::Err: Send, { - let (params, iter, m1) = self.par.destruct(); + let (orchestrator, params, iter, m1) = self.par.destruct(); let x1 = |i: I::Item| m1(i).into_result(); let x = X::new(params, iter, x1); output.x_try_collect_into::(x) @@ -96,7 +97,7 @@ where Self::Err: Send, Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { - let (params, iter, m1) = self.par.destruct(); + let (orchestrator, params, iter, m1) = self.par.destruct(); let x1 = |i: I::Item| m1(i).into_result(); let x = X::new(params, iter, x1); x.try_reduce::(reduce).1 @@ -109,7 +110,7 @@ where Self::Item: Send, Self::Err: Send, { - let (params, iter, m1) = self.par.destruct(); + let (orchestrator, params, iter, m1) = self.par.destruct(); let x1 = |i: I::Item| m1(i).into_result(); let x = X::new(params, iter, x1); match params.iteration_order { diff --git a/src/computational_variants/fallible_result/par_result.rs b/src/computational_variants/fallible_result/par_result.rs index 27b0e48..1a5c78c 100644 --- a/src/computational_variants/fallible_result/par_result.rs +++ b/src/computational_variants/fallible_result/par_result.rs @@ -63,10 +63,11 @@ where fn with_runner( self, + orchestrator: Q, ) -> impl ParIterResult { - let (params, iter) = self.par.destruct(); + let (orchestrator, params, iter) = self.par.destruct(); ParResult { - par: Par::new(params, iter), + par: Par::new(orchestrator, params, iter), phantom: PhantomData, } } @@ -79,7 +80,7 @@ where Self::Item: Send, Self::Err: Send, { - let (params, iter) = self.par.destruct(); + let (orchestrator, params, iter) = self.par.destruct(); let x1 = |i: I::Item| i.into_result(); let x = X::new(params, iter, x1); output.x_try_collect_into::(x) @@ -93,7 +94,7 @@ where Self::Err: Send, Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { - let (params, iter) = self.par.destruct(); + let (orchestrator, params, iter) = self.par.destruct(); let x1 = |i: I::Item| i.into_result(); let x = X::new(params, iter, x1); x.try_reduce::(reduce).1 @@ -106,7 +107,7 @@ where Self::Item: Send, Self::Err: Send, { - let (params, iter) = self.par.destruct(); + let (orchestrator, params, iter) = self.par.destruct(); let x1 = |i: I::Item| i.into_result(); let x = X::new(params, iter, x1); match params.iteration_order { diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index d798cee..77ea2f0 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -71,10 +71,11 @@ where fn with_runner( self, + orchestrator: Q, ) -> impl ParIterResult { - let (params, iter, m1) = self.par.destruct(); + let (orchestrator, params, iter, m1) = self.par.destruct(); ParXapResult { - par: ParXap::new(params, iter, m1), + par: ParXap::new(orchestrator, params, iter, m1), phantom: PhantomData, } } @@ -88,7 +89,7 @@ where Self::Err: Send, Self::Err: Send, { - let (params, iter, x1) = self.par.destruct(); + let (orchestrator, params, iter, x1) = self.par.destruct(); let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); let x = X::new(params, iter, x1); output.x_try_collect_into::(x) @@ -102,7 +103,7 @@ where Self::Err: Send, Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { - let (params, iter, x1) = self.par.destruct(); + let (orchestrator, params, iter, x1) = self.par.destruct(); let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); let x = X::new(params, iter, x1); x.try_reduce::(reduce).1 @@ -115,7 +116,7 @@ where Self::Item: Send, Self::Err: Send, { - let (params, iter, x1) = self.par.destruct(); + let (orchestrator, params, iter, x1) = self.par.destruct(); let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); let x = X::new(params, iter, x1); match params.iteration_order { diff --git a/src/par_iter_result.rs b/src/par_iter_result.rs index 8eddd1e..c23647e 100644 --- a/src/par_iter_result.rs +++ b/src/par_iter_result.rs @@ -202,6 +202,7 @@ where /// See [`ParIter::with_runner`] for details. fn with_runner( self, + orchestrator: Q, ) -> impl ParIterResult; // computation transformations From 8d2dbc6fc67ca8e69358a2a899688049343e8bee Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:42:57 +0200 Subject: [PATCH 012/264] impl Default for StdOrchestrator --- src/orch/implementations/std_orchestrator.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/orch/implementations/std_orchestrator.rs b/src/orch/implementations/std_orchestrator.rs index b08d2cf..9851236 100644 --- a/src/orch/implementations/std_orchestrator.rs +++ b/src/orch/implementations/std_orchestrator.rs @@ -8,6 +8,15 @@ where r: PhantomData, } +impl Default for StdOrchestrator +where + R: ParallelRunner, +{ + fn default() -> Self { + Self { r: PhantomData } + } +} + impl Orchestrator for StdOrchestrator where R: ParallelRunner, From cacd82d1aaef11a16efec8391c53074da9993c81 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:43:20 +0200 Subject: [PATCH 013/264] empty iterator is fixed --- src/iter/special_iterators.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/iter/special_iterators.rs b/src/iter/special_iterators.rs index f1e4d22..3f0a60f 100644 --- a/src/iter/special_iterators.rs +++ b/src/iter/special_iterators.rs @@ -6,5 +6,5 @@ pub type ParEmpty = Par, R>; /// Creates an empty parallel iterator which does not yield any elements. pub fn empty() -> ParEmpty { - ParEmpty::new(Default::default(), Default::default()) + ParEmpty::new(Default::default(), Default::default(), Default::default()) } From 18df627a021326718185d6ad089f1f29602110a9 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:43:58 +0200 Subject: [PATCH 014/264] parxap ctors fixed --- src/computational_variants/xap.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index 412f64b..b3fca83 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -146,7 +146,7 @@ where vo.map(map.clone()) }; - ParXap::new(params, iter, x1) + ParXap::new(orchestrator, params, iter, x1) } fn filter(self, filter: Filter) -> impl ParIter @@ -158,7 +158,7 @@ where let values = x1(i); values.filter(filter.clone()) }; - ParXap::new(params, iter, x1) + ParXap::new(orchestrator, params, iter, x1) } fn flat_map(self, flat_map: FlatMap) -> impl ParIter @@ -171,7 +171,7 @@ where let vo = x1(i); vo.flat_map(flat_map.clone()) }; - ParXap::new(params, iter, x1) + ParXap::new(orchestrator, params, iter, x1) } fn filter_map(self, filter_map: FilterMap) -> impl ParIter @@ -183,7 +183,7 @@ where let vo = x1(i); vo.filter_map(filter_map.clone()) }; - ParXap::new(params, iter, x1) + ParXap::new(orchestrator, params, iter, x1) } fn take_while(self, take_while: While) -> impl ParIter @@ -195,7 +195,7 @@ where let vo = x1(i); vo.whilst(take_while.clone()) }; - ParXap::new(params, iter, x1) + ParXap::new(orchestrator, params, iter, x1) } fn into_fallible_result(self) -> impl ParIterResult From f00a97792189138779414af6e891b9f327a148de Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:45:02 +0200 Subject: [PATCH 015/264] par transformations fixed for orchestrator --- src/computational_variants/par.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 246fbb4..645badc 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -140,7 +140,7 @@ where { let (orchestrator, params, iter) = self.destruct(); let x1 = move |i: Self::Item| filter(&i).then_some(i); - ParXap::new(params, iter, x1) + ParXap::new(orchestrator, params, iter, x1) } fn flat_map(self, flat_map: FlatMap) -> impl ParIter @@ -150,7 +150,7 @@ where { let (orchestrator, params, iter) = self.destruct(); let x1 = move |i: Self::Item| Vector(flat_map(i)); // TODO: inline - ParXap::new(params, iter, x1) + ParXap::new(orchestrator, params, iter, x1) } fn filter_map(self, filter_map: FilterMap) -> impl ParIter @@ -158,7 +158,7 @@ where FilterMap: Fn(Self::Item) -> Option + Sync, { let (orchestrator, params, iter) = self.destruct(); - ParXap::new(params, iter, filter_map) + ParXap::new(orchestrator, params, iter, filter_map) } fn take_while(self, take_while: While) -> impl ParIter @@ -167,7 +167,7 @@ where { let (orchestrator, params, iter) = self.destruct(); let x1 = move |value: Self::Item| WhilstAtom::new(value, &take_while); - ParXap::new(params, iter, x1) + ParXap::new(orchestrator, params, iter, x1) } fn into_fallible_result(self) -> impl ParIterResult @@ -236,6 +236,6 @@ where { let (orchestrator, params, iter) = self.destruct(); let iter = iter.chain(other.into_con_iter()); - Par::new(params, iter) + Par::new(orchestrator, params, iter) } } From b704650115766680976dc681b11c5f6b29acfa36 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:46:04 +0200 Subject: [PATCH 016/264] option to result transformation is fixed for orchestrator --- src/computational_variants/fallible_option.rs | 7 +++++-- src/par_iter_option.rs | 5 ++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/computational_variants/fallible_option.rs b/src/computational_variants/fallible_option.rs index 521dc48..3bd3670 100644 --- a/src/computational_variants/fallible_option.rs +++ b/src/computational_variants/fallible_option.rs @@ -48,8 +48,11 @@ where Self::new(self.par.iteration_order(order)) } - fn with_runner(self) -> impl ParIterOption { - ParOption::new(self.par.with_runner()) + fn with_runner( + self, + orchestrator: Q, + ) -> impl ParIterOption { + ParOption::new(self.par.with_runner(orchestrator)) } // computation transformations diff --git a/src/par_iter_option.rs b/src/par_iter_option.rs index e579d2d..6ac92bb 100644 --- a/src/par_iter_option.rs +++ b/src/par_iter_option.rs @@ -159,7 +159,10 @@ where /// Rather than the [`DefaultRunner`], uses the parallel runner `Q` which implements [`Orchestrator`]. /// /// See [`crate::ParIter::with_runner`] for details. - fn with_runner(self) -> impl ParIterOption; + fn with_runner( + self, + orchestrator: Q, + ) -> impl ParIterOption; // computation transformations From 9866df8cc97e3904ce76215c80e0a74e2a9e8311 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:48:10 +0200 Subject: [PATCH 017/264] par computations fixed --- src/computational_variants/map.rs | 2 +- src/computational_variants/par.rs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index 8a1dfa2..b26b369 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -95,7 +95,7 @@ where fn with_runner(self, orchestrator: Q) -> impl ParIter { let (_, params, iter, map) = self.destruct(); - ParMap::new(self.orchestrator, params, iter, map) + ParMap::new(orchestrator, params, iter, map) } // using transformations diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 645badc..14fb50b 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -183,7 +183,7 @@ where where C: ParCollectInto, { - output.m_collect_into::(self.m()) + output.m_collect_into::(self.m()) } // reduce @@ -193,15 +193,15 @@ where Self::Item: Send, Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { - self.m().reduce::(reduce).1 + self.m().reduce::(reduce).1 } // early exit fn first(self) -> Option { match self.params().iteration_order { - IterationOrder::Ordered => self.m().next::().1, - IterationOrder::Arbitrary => self.m().next_any::().1, + IterationOrder::Ordered => self.m().next::().1, + IterationOrder::Arbitrary => self.m().next_any::().1, } } } From 79535bf4fc8c84432183b290e845f77abe86ade2 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:49:00 +0200 Subject: [PATCH 018/264] map computations fixed with orchestrator --- src/computational_variants/map.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index b26b369..a56e263 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -148,7 +148,7 @@ where let value = m1(i); filter(&value).then_some(value) }; - ParXap::new(params, iter, x1) + ParXap::new(orchestrator, params, iter, x1) } fn flat_map(self, flat_map: FlatMap) -> impl ParIter @@ -158,7 +158,7 @@ where { let (orchestrator, params, iter, m1) = self.destruct(); let x1 = move |i: I::Item| Vector(flat_map(m1(i))); - ParXap::new(params, iter, x1) + ParXap::new(orchestrator, params, iter, x1) } fn filter_map(self, filter_map: FilterMap) -> impl ParIter @@ -167,7 +167,7 @@ where { let (orchestrator, params, iter, m1) = self.destruct(); let x1 = move |i: I::Item| filter_map(m1(i)); - ParXap::new(params, iter, x1) + ParXap::new(orchestrator, params, iter, x1) } fn take_while(self, take_while: While) -> impl ParIter @@ -176,7 +176,7 @@ where { let (orchestrator, params, iter, m1) = self.destruct(); let x1 = move |value: I::Item| WhilstAtom::new(m1(value), &take_while); - ParXap::new(params, iter, x1) + ParXap::new(orchestrator, params, iter, x1) } fn into_fallible_result(self) -> impl ParIterResult @@ -192,7 +192,7 @@ where where C: ParCollectInto, { - output.m_collect_into::(self.m) + output.m_collect_into::(self.m) } // reduce @@ -202,7 +202,7 @@ where Self::Item: Send, Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { - self.m.reduce::(reduce).1 + self.m.reduce::(reduce).1 } // early exit @@ -212,8 +212,8 @@ where Self::Item: Send, { match self.params().iteration_order { - IterationOrder::Ordered => self.m.next::().1, - IterationOrder::Arbitrary => self.m.next_any::().1, + IterationOrder::Ordered => self.m.next::().1, + IterationOrder::Arbitrary => self.m.next_any::().1, } } } From ef85d7d5cb77e2c053d1b49c767bc52fadfdf1d7 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:49:31 +0200 Subject: [PATCH 019/264] xap computations fixed for orchestrator --- src/computational_variants/xap.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index b3fca83..f63b214 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -211,7 +211,7 @@ where where C: ParCollectInto, { - output.x_collect_into::(self.x) + output.x_collect_into::(self.x) } // reduce @@ -221,7 +221,7 @@ where Self::Item: Send, Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { - self.x.reduce::(reduce).1 + self.x.reduce::(reduce).1 } // early exit @@ -231,8 +231,8 @@ where Self::Item: Send, { match self.params().iteration_order { - IterationOrder::Ordered => self.x.next::().1, - IterationOrder::Arbitrary => self.x.next_any::().1, + IterationOrder::Ordered => self.x.next::().1, + IterationOrder::Arbitrary => self.x.next_any::().1, } } } From 7322eaff60eec2001cc83d5296368baa095007fc Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:50:37 +0200 Subject: [PATCH 020/264] fallible computations fixed for orchestrator --- .../fallible_result/map_result.rs | 8 ++++---- .../fallible_result/par_result.rs | 10 +++++----- .../fallible_result/xap_result.rs | 10 +++++----- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/computational_variants/fallible_result/map_result.rs b/src/computational_variants/fallible_result/map_result.rs index b149411..3148475 100644 --- a/src/computational_variants/fallible_result/map_result.rs +++ b/src/computational_variants/fallible_result/map_result.rs @@ -86,7 +86,7 @@ where let (orchestrator, params, iter, m1) = self.par.destruct(); let x1 = |i: I::Item| m1(i).into_result(); let x = X::new(params, iter, x1); - output.x_try_collect_into::(x) + output.x_try_collect_into::(x) } // reduce @@ -100,7 +100,7 @@ where let (orchestrator, params, iter, m1) = self.par.destruct(); let x1 = |i: I::Item| m1(i).into_result(); let x = X::new(params, iter, x1); - x.try_reduce::(reduce).1 + x.try_reduce::(reduce).1 } // early exit @@ -114,8 +114,8 @@ where let x1 = |i: I::Item| m1(i).into_result(); let x = X::new(params, iter, x1); match params.iteration_order { - IterationOrder::Ordered => x.try_next::().1, - IterationOrder::Arbitrary => x.try_next_any::().1, + IterationOrder::Ordered => x.try_next::().1, + IterationOrder::Arbitrary => x.try_next_any::().1, } } } diff --git a/src/computational_variants/fallible_result/par_result.rs b/src/computational_variants/fallible_result/par_result.rs index 1a5c78c..7facb75 100644 --- a/src/computational_variants/fallible_result/par_result.rs +++ b/src/computational_variants/fallible_result/par_result.rs @@ -65,7 +65,7 @@ where self, orchestrator: Q, ) -> impl ParIterResult { - let (orchestrator, params, iter) = self.par.destruct(); + let (_, params, iter) = self.par.destruct(); ParResult { par: Par::new(orchestrator, params, iter), phantom: PhantomData, @@ -83,7 +83,7 @@ where let (orchestrator, params, iter) = self.par.destruct(); let x1 = |i: I::Item| i.into_result(); let x = X::new(params, iter, x1); - output.x_try_collect_into::(x) + output.x_try_collect_into::(x) } // reduce @@ -97,7 +97,7 @@ where let (orchestrator, params, iter) = self.par.destruct(); let x1 = |i: I::Item| i.into_result(); let x = X::new(params, iter, x1); - x.try_reduce::(reduce).1 + x.try_reduce::(reduce).1 } // early exit @@ -111,8 +111,8 @@ where let x1 = |i: I::Item| i.into_result(); let x = X::new(params, iter, x1); match params.iteration_order { - IterationOrder::Ordered => x.try_next::().1, - IterationOrder::Arbitrary => x.try_next_any::().1, + IterationOrder::Ordered => x.try_next::().1, + IterationOrder::Arbitrary => x.try_next_any::().1, } } } diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index 77ea2f0..1fb08dd 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -73,7 +73,7 @@ where self, orchestrator: Q, ) -> impl ParIterResult { - let (orchestrator, params, iter, m1) = self.par.destruct(); + let (_, params, iter, m1) = self.par.destruct(); ParXapResult { par: ParXap::new(orchestrator, params, iter, m1), phantom: PhantomData, @@ -92,7 +92,7 @@ where let (orchestrator, params, iter, x1) = self.par.destruct(); let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); let x = X::new(params, iter, x1); - output.x_try_collect_into::(x) + output.x_try_collect_into::(x) } // reduce @@ -106,7 +106,7 @@ where let (orchestrator, params, iter, x1) = self.par.destruct(); let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); let x = X::new(params, iter, x1); - x.try_reduce::(reduce).1 + x.try_reduce::(reduce).1 } // early exit @@ -120,8 +120,8 @@ where let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); let x = X::new(params, iter, x1); match params.iteration_order { - IterationOrder::Ordered => x.try_next::().1, - IterationOrder::Arbitrary => x.try_next_any::().1, + IterationOrder::Ordered => x.try_next::().1, + IterationOrder::Arbitrary => x.try_next_any::().1, } } } From 6eb7451e9f2aca6106484776bf5d4441030f574e Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:51:35 +0200 Subject: [PATCH 021/264] using computations fixed for orchestrator --- src/using/computational_variants/u_map.rs | 6 +++--- src/using/computational_variants/u_par.rs | 6 +++--- src/using/computational_variants/u_xap.rs | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/using/computational_variants/u_map.rs b/src/using/computational_variants/u_map.rs index 06682e0..661da72 100644 --- a/src/using/computational_variants/u_map.rs +++ b/src/using/computational_variants/u_map.rs @@ -162,7 +162,7 @@ where where C: ParCollectInto, { - output.u_m_collect_into::(self.um) + output.u_m_collect_into::(self.um) } // reduce @@ -172,7 +172,7 @@ where Self::Item: Send, Reduce: Fn(&mut U::Item, Self::Item, Self::Item) -> Self::Item + Sync, { - self.um.reduce::(reduce).1 + self.um.reduce::(reduce).1 } // early exit @@ -181,6 +181,6 @@ where where Self::Item: Send, { - self.um.next::().1 + self.um.next::().1 } } diff --git a/src/using/computational_variants/u_par.rs b/src/using/computational_variants/u_par.rs index 7d3b8e0..fd901f9 100644 --- a/src/using/computational_variants/u_par.rs +++ b/src/using/computational_variants/u_par.rs @@ -155,7 +155,7 @@ where where C: ParCollectInto, { - output.u_m_collect_into::(self.u_m()) + output.u_m_collect_into::(self.u_m()) } // reduce @@ -165,7 +165,7 @@ where Self::Item: Send, Reduce: Fn(&mut U::Item, Self::Item, Self::Item) -> Self::Item + Sync, { - self.u_m().reduce::(reduce).1 + self.u_m().reduce::(reduce).1 } // early exit @@ -174,6 +174,6 @@ where where Self::Item: Send, { - self.u_m().next::().1 + self.u_m().next::().1 } } diff --git a/src/using/computational_variants/u_xap.rs b/src/using/computational_variants/u_xap.rs index 9fe335d..458ec7b 100644 --- a/src/using/computational_variants/u_xap.rs +++ b/src/using/computational_variants/u_xap.rs @@ -203,7 +203,7 @@ where where C: ParCollectInto, { - output.u_x_collect_into::(self.ux) + output.u_x_collect_into::(self.ux) } // reduce @@ -213,7 +213,7 @@ where Self::Item: Send, Reduce: Fn(&mut U::Item, Self::Item, Self::Item) -> Self::Item + Sync, { - self.ux.reduce::(reduce).1 + self.ux.reduce::(reduce).1 } // early exit @@ -222,6 +222,6 @@ where where Self::Item: Send, { - self.ux.next::().1 + self.ux.next::().1 } } From 59b733d8601da6ed8208624cbc6ddbc439c4bca5 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:53:09 +0200 Subject: [PATCH 022/264] par traits fixed to include orchestrator on constructor --- src/into_par_iter.rs | 4 ++-- src/iter_into_par_iter.rs | 6 +++++- src/parallel_drainable.rs | 2 +- src/parallelizable.rs | 2 +- src/parallelizable_collection.rs | 2 +- src/parallelizable_collection_mut.rs | 2 +- 6 files changed, 11 insertions(+), 7 deletions(-) diff --git a/src/into_par_iter.rs b/src/into_par_iter.rs index 11d368b..c50c53e 100644 --- a/src/into_par_iter.rs +++ b/src/into_par_iter.rs @@ -55,7 +55,7 @@ where I: IntoConcurrentIter, { fn into_par(self) -> Par { - Par::new(Params::default(), self.into_con_iter()) + Par::new(Default::default(), Params::default(), self.into_con_iter()) } } @@ -65,6 +65,6 @@ impl IntoConcurrentIter for Par { type IntoIter = I; fn into_con_iter(self) -> Self::IntoIter { - self.destruct().1 + self.destruct().2 } } diff --git a/src/iter_into_par_iter.rs b/src/iter_into_par_iter.rs index 228d882..696b628 100644 --- a/src/iter_into_par_iter.rs +++ b/src/iter_into_par_iter.rs @@ -128,6 +128,10 @@ where I::Item: Send + Sync, { fn iter_into_par(self) -> Par, DefaultOrchestrator> { - Par::new(Params::default(), self.iter_into_con_iter()) + Par::new( + Default::default(), + Params::default(), + self.iter_into_con_iter(), + ) } } diff --git a/src/parallel_drainable.rs b/src/parallel_drainable.rs index 23534ef..1f0ffe5 100644 --- a/src/parallel_drainable.rs +++ b/src/parallel_drainable.rs @@ -50,7 +50,7 @@ pub trait ParallelDrainableOverSlice: ConcurrentDrainableOverSlice { where R: RangeBounds, { - Par::new(Params::default(), self.con_drain(range)) + Par::new(Default::default(), Params::default(), self.con_drain(range)) } } diff --git a/src/parallelizable.rs b/src/parallelizable.rs index 8bcf1a8..fc01aca 100644 --- a/src/parallelizable.rs +++ b/src/parallelizable.rs @@ -62,7 +62,7 @@ pub trait Parallelizable: ConcurrentIterable { /// assert_eq!(range.par().max(), Some(4)); /// ``` fn par(&self) -> Par<::Iter, DefaultOrchestrator> { - Par::new(Params::default(), self.con_iter()) + Par::new(Default::default(), Params::default(), self.con_iter()) } } diff --git a/src/parallelizable_collection.rs b/src/parallelizable_collection.rs index 5710458..883dbcb 100644 --- a/src/parallelizable_collection.rs +++ b/src/parallelizable_collection.rs @@ -77,7 +77,7 @@ pub trait ParallelizableCollection: ConcurrentCollection { <::Iterable<'_> as ConcurrentIterable>::Iter, DefaultOrchestrator, > { - Par::new(Params::default(), self.con_iter()) + Par::new(Default::default(), Params::default(), self.con_iter()) } } diff --git a/src/parallelizable_collection_mut.rs b/src/parallelizable_collection_mut.rs index 162c676..063b546 100644 --- a/src/parallelizable_collection_mut.rs +++ b/src/parallelizable_collection_mut.rs @@ -61,7 +61,7 @@ pub trait ParallelizableCollectionMut: ConcurrentCollectionMut + ParallelizableC /// assert_eq!(&vec, &[1, 2, 13, 14]); /// ``` fn par_mut(&mut self) -> impl ParIter { - Par::new(Params::default(), self.con_iter_mut()) + Par::new(Default::default(), Params::default(), self.con_iter_mut()) } } From 3197d5c62cfa838a05cb698be38e7b1d5e0c041b Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 10:54:23 +0200 Subject: [PATCH 023/264] orchestrator checkpoint: compiles --- src/into_par_iter.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/into_par_iter.rs b/src/into_par_iter.rs index c50c53e..be77677 100644 --- a/src/into_par_iter.rs +++ b/src/into_par_iter.rs @@ -65,6 +65,7 @@ impl IntoConcurrentIter for Par { type IntoIter = I; fn into_con_iter(self) -> Self::IntoIter { - self.destruct().2 + let (_, _, iter) = self.destruct(); + iter } } From 4908653f1beaffe4c13995fab895b03e485f2ac7 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 11:02:39 +0200 Subject: [PATCH 024/264] collect-ordered m2 is implemented with orchestrator --- src/orch/orchestrator.rs | 1 + .../collect_ordered.rs | 43 ++++++++++++++++++- 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 9c308db..d54741a 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -4,6 +4,7 @@ pub trait Orchestrator { type Runner: ParallelRunner; fn new_runner( + &self, kind: ComputationKind, params: Params, initial_input_len: Option, diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index f33d00f..5cdec70 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -1,6 +1,8 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect, ThreadCollect}; -use crate::runner::thread_runner_compute as thread; +use crate::orch::Orchestrator; +use crate::runner::parallel_runner::ParallelRunner; +use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{ computations::{M, X}, runner::ParallelRunnerCompute, @@ -11,6 +13,45 @@ use orx_fixed_vec::IntoConcurrentPinnedVec; // m +pub fn m2(orchestrator: &mut C, m: M, pinned_vec: P) -> (usize, P) +where + C: Orchestrator, + I: ConcurrentIter, + O: Send, + M1: Fn(I::Item) -> O + Sync, + P: IntoConcurrentPinnedVec, +{ + let offset = pinned_vec.len(); + let (params, iter, map1) = m.destruct(); + let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); + + let o_bag: ConcurrentOrderedBag = pinned_vec.into(); + + // compute + let state = runner.new_shared_state(); + let shared_state = &state; + + let mut num_spawned = 0; + std::thread::scope(|s| { + while runner.do_spawn_new(num_spawned, shared_state, &iter) { + num_spawned += 1; + s.spawn(|| { + thread::collect_ordered::m( + runner.new_thread_runner(shared_state), + &iter, + shared_state, + &map1, + &o_bag, + offset, + ); + }); + } + }); + + let values = unsafe { o_bag.into_inner().unwrap_only_if_counts_match() }; + (num_spawned, values) +} + pub fn m(runner: C, m: M, pinned_vec: P) -> (usize, P) where C: ParallelRunnerCompute, From 9b0f71c1e1194c7672c4092080a8bed3894af532 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 11:17:45 +0200 Subject: [PATCH 025/264] m computations use orchestrator --- src/computational_variants/map.rs | 9 ++-- src/computations/map/collect.rs | 13 +++--- src/computations/map/m.rs | 22 ++++++---- src/computations/map/next.rs | 18 ++++---- src/computations/map/reduce.rs | 8 ++-- src/computations/map/transformations.rs | 11 +++-- .../collect_arbitrary.rs | 13 +++--- .../collect_ordered.rs | 42 +------------------ src/runner/parallel_runner_compute/next.rs | 13 +++--- .../parallel_runner_compute/next_any.rs | 13 +++--- src/runner/parallel_runner_compute/reduce.rs | 11 +++-- 11 files changed, 77 insertions(+), 96 deletions(-) diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index a56e263..1a31fd3 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -19,8 +19,7 @@ where I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, { - orchestrator: R, - m: M, + m: M, phantom: PhantomData, } @@ -32,15 +31,13 @@ where { pub(crate) fn new(orchestrator: R, params: Params, iter: I, m1: M1) -> Self { Self { - orchestrator, - m: M::new(params, iter, m1), + m: M::new(orchestrator, params, iter, m1), phantom: PhantomData, } } pub(crate) fn destruct(self) -> (R, Params, I, M1) { - let (params, iter, m1) = self.m.destruct(); - (self.orchestrator, params, iter, m1) + self.m.destruct() } } diff --git a/src/computations/map/collect.rs b/src/computations/map/collect.rs index 6b797ca..bea55ed 100644 --- a/src/computations/map/collect.rs +++ b/src/computations/map/collect.rs @@ -1,6 +1,7 @@ use super::m::M; #[cfg(test)] use crate::IterationOrder; +use crate::orch::Orchestrator; #[cfg(test)] use crate::runner::parallel_runner_compute::collect_arbitrary; use crate::runner::parallel_runner_compute::collect_ordered; @@ -8,15 +9,15 @@ use crate::runner::{ParallelRunner, ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; use orx_pinned_vec::IntoConcurrentPinnedVec; -impl M +impl M where + R: Orchestrator, I: ConcurrentIter, O: Send, M1: Fn(I::Item) -> O + Sync, { - pub fn collect_into(self, pinned_vec: P) -> (usize, P) + pub fn collect_into

(self, pinned_vec: P) -> (usize, P) where - R: ParallelRunner, P: IntoConcurrentPinnedVec, { let (len, p) = self.len_and_params(); @@ -24,9 +25,9 @@ where (true, _) => (0, self.sequential(pinned_vec)), #[cfg(test)] (false, IterationOrder::Arbitrary) => { - collect_arbitrary::m(R::collection(p, len), self, pinned_vec) + collect_arbitrary::m(R::Runner::collection(p, len), self, pinned_vec) } - (false, _) => collect_ordered::m(R::collection(p, len), self, pinned_vec), + (false, _) => collect_ordered::m(R::Runner::collection(p, len), self, pinned_vec), } } @@ -34,7 +35,7 @@ where where P: IntoConcurrentPinnedVec, { - let (_, iter, map1) = self.destruct(); + let (_, _, iter, map1) = self.destruct(); let iter = iter.into_seq_iter(); for i in iter { diff --git a/src/computations/map/m.rs b/src/computations/map/m.rs index 455d1a4..3c19f72 100644 --- a/src/computations/map/m.rs +++ b/src/computations/map/m.rs @@ -1,27 +1,35 @@ -use crate::{ChunkSize, IterationOrder, NumThreads, Params}; +use crate::{ChunkSize, IterationOrder, NumThreads, Params, orch::Orchestrator}; use orx_concurrent_iter::ConcurrentIter; -pub struct M +pub struct M where + R: Orchestrator, I: ConcurrentIter, M1: Fn(I::Item) -> O, { + orchestrator: R, params: Params, iter: I, map1: M1, } -impl M +impl M where + R: Orchestrator, I: ConcurrentIter, M1: Fn(I::Item) -> O, { - pub fn new(params: Params, iter: I, map1: M1) -> Self { - Self { params, iter, map1 } + pub fn new(orchestrator: R, params: Params, iter: I, map1: M1) -> Self { + Self { + orchestrator, + params, + iter, + map1, + } } - pub fn destruct(self) -> (Params, I, M1) { - (self.params, self.iter, self.map1) + pub fn destruct(self) -> (R, Params, I, M1) { + (self.orchestrator, self.params, self.iter, self.map1) } pub fn params(&self) -> Params { diff --git a/src/computations/map/next.rs b/src/computations/map/next.rs index 41112ba..5c5fa73 100644 --- a/src/computations/map/next.rs +++ b/src/computations/map/next.rs @@ -1,27 +1,23 @@ use super::m::M; +use crate::orch::Orchestrator; use crate::runner::parallel_runner_compute::{next, next_any}; use crate::runner::{ParallelRunner, ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; -impl M +impl M where + R: Orchestrator, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, O: Send, { - pub fn next(self) -> (usize, Option) - where - R: ParallelRunner, - { + pub fn next(self) -> (usize, Option) { let (len, p) = self.len_and_params(); - next::m(R::early_return(p, len), self) + next::m(R::Runner::early_return(p, len), self) } - pub fn next_any(self) -> (usize, Option) - where - R: ParallelRunner, - { + pub fn next_any(self) -> (usize, Option) { let (len, p) = self.len_and_params(); - next_any::m(R::early_return(p, len), self) + next_any::m(R::Runner::early_return(p, len), self) } } diff --git a/src/computations/map/reduce.rs b/src/computations/map/reduce.rs index 55740c2..df2e128 100644 --- a/src/computations/map/reduce.rs +++ b/src/computations/map/reduce.rs @@ -1,20 +1,22 @@ use super::m::M; +use crate::orch::Orchestrator; use crate::runner::parallel_runner_compute::reduce; use crate::runner::{ParallelRunner, ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; -impl M +impl M where + R: Orchestrator, I: ConcurrentIter, O: Send, M1: Fn(I::Item) -> O + Sync, { - pub fn reduce(self, reduce: X) -> (usize, Option) + pub fn reduce(self, reduce: X) -> (usize, Option) where R: ParallelRunner, X: Fn(O, O) -> O + Sync, { let (len, p) = self.len_and_params(); - reduce::m(R::reduce(p, len), self, reduce) + reduce::m(R::Runner::reduce(p, len), self, reduce) } } diff --git a/src/computations/map/transformations.rs b/src/computations/map/transformations.rs index c646589..b42e9e3 100644 --- a/src/computations/map/transformations.rs +++ b/src/computations/map/transformations.rs @@ -1,18 +1,21 @@ +use crate::orch::Orchestrator; + use super::m::M; use orx_concurrent_iter::ConcurrentIter; -impl M +impl M where + R: Orchestrator, I: ConcurrentIter, M1: Fn(I::Item) -> O, { - pub fn map(self, map: M2) -> M Q> + pub fn map(self, map: M2) -> M Q> where M2: Fn(O) -> Q, Q: Send, { - let (params, iter, map1) = self.destruct(); + let (orchestrator, params, iter, map1) = self.destruct(); let map2 = move |t| map(map1(t)); - M::new(params, iter, map2) + M::new(orchestrator, params, iter, map2) } } diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 7f7595c..10a3958 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -1,8 +1,8 @@ -#[cfg(test)] -use crate::computations::M; use crate::generic_values::Values; use crate::generic_values::runner_results::{ParallelCollectArbitrary, ThreadCollectArbitrary}; use crate::runner::thread_runner_compute as thread; +#[cfg(test)] +use crate::{computations::M, orch::Orchestrator, runner::ParallelRunner}; use crate::{computations::X, runner::ParallelRunnerCompute}; use orx_concurrent_bag::ConcurrentBag; use orx_concurrent_iter::ConcurrentIter; @@ -11,17 +11,20 @@ use orx_fixed_vec::IntoConcurrentPinnedVec; // m #[cfg(test)] -pub fn m(runner: C, m: M, pinned_vec: P) -> (usize, P) +pub fn m(m: M, pinned_vec: P) -> (usize, P) where - C: ParallelRunnerCompute, + C: Orchestrator, I: ConcurrentIter, O: Send, M1: Fn(I::Item) -> O + Sync, P: IntoConcurrentPinnedVec, { + use crate::runner::ComputationKind; + let capacity_bound = pinned_vec.capacity_bound(); let offset = pinned_vec.len(); - let (_, iter, map1) = m.destruct(); + let (orchestrator, params, iter, map1) = m.destruct(); + let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let mut bag: ConcurrentBag = pinned_vec.into(); match iter.try_get_len() { diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index 5cdec70..f269a0e 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -13,7 +13,7 @@ use orx_fixed_vec::IntoConcurrentPinnedVec; // m -pub fn m2(orchestrator: &mut C, m: M, pinned_vec: P) -> (usize, P) +pub fn m(m: M, pinned_vec: P) -> (usize, P) where C: Orchestrator, I: ConcurrentIter, @@ -22,7 +22,7 @@ where P: IntoConcurrentPinnedVec, { let offset = pinned_vec.len(); - let (params, iter, map1) = m.destruct(); + let (orchestrator, params, iter, map1) = m.destruct(); let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let o_bag: ConcurrentOrderedBag = pinned_vec.into(); @@ -52,44 +52,6 @@ where (num_spawned, values) } -pub fn m(runner: C, m: M, pinned_vec: P) -> (usize, P) -where - C: ParallelRunnerCompute, - I: ConcurrentIter, - O: Send, - M1: Fn(I::Item) -> O + Sync, - P: IntoConcurrentPinnedVec, -{ - let offset = pinned_vec.len(); - let (_, iter, map1) = m.destruct(); - - let o_bag: ConcurrentOrderedBag = pinned_vec.into(); - - // compute - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = 0; - std::thread::scope(|s| { - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned += 1; - s.spawn(|| { - thread::collect_ordered::m( - runner.new_thread_runner(shared_state), - &iter, - shared_state, - &map1, - &o_bag, - offset, - ); - }); - } - }); - - let values = unsafe { o_bag.into_inner().unwrap_only_if_counts_match() }; - (num_spawned, values) -} - // x pub fn x( diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index b75d9d2..3d08cde 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -1,17 +1,20 @@ +use crate::ParallelRunner; use crate::computations::{M, X}; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; -use crate::runner::thread_runner_compute as thread; +use crate::orch::Orchestrator; +use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{generic_values::Values, runner::ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; -pub fn m(runner: C, m: M) -> (usize, Option) +pub fn m(m: M) -> (usize, Option) where - C: ParallelRunnerCompute, + C: Orchestrator, I: ConcurrentIter, O: Send, M1: Fn(I::Item) -> O + Sync, { - let (_, iter, xap1) = m.destruct(); + let (orchestrator, params, iter, map1) = m.destruct(); + let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); let shared_state = &state; @@ -27,7 +30,7 @@ where runner.new_thread_runner(shared_state), &iter, shared_state, - &xap1, + &map1, ) })) } diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index 181e573..1b4e978 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -1,17 +1,20 @@ +use crate::ParallelRunner; use crate::computations::{M, X}; use crate::generic_values::runner_results::Fallibility; -use crate::runner::thread_runner_compute as thread; +use crate::orch::Orchestrator; +use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{generic_values::Values, runner::ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; -pub fn m(runner: C, m: M) -> (usize, Option) +pub fn m(m: M) -> (usize, Option) where - C: ParallelRunnerCompute, + C: Orchestrator, I: ConcurrentIter, O: Send, M1: Fn(I::Item) -> O + Sync, { - let (_, iter, xap1) = m.destruct(); + let (orchestrator, params, iter, map1) = m.destruct(); + let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); let shared_state = &state; @@ -27,7 +30,7 @@ where runner.new_thread_runner(shared_state), &iter, shared_state, - &xap1, + &map1, ) })); } diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index 80f685e..ba366c0 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -1,20 +1,23 @@ +use crate::ParallelRunner; use crate::computations::{M, X}; use crate::generic_values::runner_results::{Fallibility, Reduce}; -use crate::runner::thread_runner_compute as thread; +use crate::orch::Orchestrator; +use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{generic_values::Values, runner::ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; // m -pub fn m(runner: C, m: M, reduce: Red) -> (usize, Option) +pub fn m(m: M, reduce: Red) -> (usize, Option) where - C: ParallelRunnerCompute, + C: Orchestrator, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, Red: Fn(O, O) -> O + Sync, O: Send, { - let (_, iter, map1) = m.destruct(); + let (orchestrator, params, iter, map1) = m.destruct(); + let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); let shared_state = &state; From e0b3ede7c8f97e4875e26ed294f652e9a88c32ed Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 11:21:10 +0200 Subject: [PATCH 026/264] par computations fixed to accept orchestrator --- src/computational_variants/par.rs | 10 +++++----- src/computations/map/collect.rs | 7 ++----- src/computations/map/next.rs | 7 ++----- src/computations/map/reduce.rs | 5 +---- 4 files changed, 10 insertions(+), 19 deletions(-) diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 14fb50b..da6f4f9 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -43,9 +43,9 @@ where (self.orchestrator, self.params, self.iter) } - fn m(self) -> M I::Item> { + fn m(self) -> M I::Item> { let (orchestrator, params, iter) = self.destruct(); - M::new(params, iter, map_self) + M::new(orchestrator, params, iter, map_self) } } @@ -193,15 +193,15 @@ where Self::Item: Send, Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { - self.m().reduce::(reduce).1 + self.m().reduce(reduce).1 } // early exit fn first(self) -> Option { match self.params().iteration_order { - IterationOrder::Ordered => self.m().next::().1, - IterationOrder::Arbitrary => self.m().next_any::().1, + IterationOrder::Ordered => self.m().next().1, + IterationOrder::Arbitrary => self.m().next_any().1, } } } diff --git a/src/computations/map/collect.rs b/src/computations/map/collect.rs index bea55ed..9a64967 100644 --- a/src/computations/map/collect.rs +++ b/src/computations/map/collect.rs @@ -5,7 +5,6 @@ use crate::orch::Orchestrator; #[cfg(test)] use crate::runner::parallel_runner_compute::collect_arbitrary; use crate::runner::parallel_runner_compute::collect_ordered; -use crate::runner::{ParallelRunner, ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; use orx_pinned_vec::IntoConcurrentPinnedVec; @@ -24,10 +23,8 @@ where match (p.is_sequential(), p.iteration_order) { (true, _) => (0, self.sequential(pinned_vec)), #[cfg(test)] - (false, IterationOrder::Arbitrary) => { - collect_arbitrary::m(R::Runner::collection(p, len), self, pinned_vec) - } - (false, _) => collect_ordered::m(R::Runner::collection(p, len), self, pinned_vec), + (false, IterationOrder::Arbitrary) => collect_arbitrary::m(self, pinned_vec), + (false, _) => collect_ordered::m(self, pinned_vec), } } diff --git a/src/computations/map/next.rs b/src/computations/map/next.rs index 5c5fa73..2f93670 100644 --- a/src/computations/map/next.rs +++ b/src/computations/map/next.rs @@ -1,7 +1,6 @@ use super::m::M; use crate::orch::Orchestrator; use crate::runner::parallel_runner_compute::{next, next_any}; -use crate::runner::{ParallelRunner, ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; impl M @@ -12,12 +11,10 @@ where O: Send, { pub fn next(self) -> (usize, Option) { - let (len, p) = self.len_and_params(); - next::m(R::Runner::early_return(p, len), self) + next::m(self) } pub fn next_any(self) -> (usize, Option) { - let (len, p) = self.len_and_params(); - next_any::m(R::Runner::early_return(p, len), self) + next_any::m(self) } } diff --git a/src/computations/map/reduce.rs b/src/computations/map/reduce.rs index df2e128..02adc0b 100644 --- a/src/computations/map/reduce.rs +++ b/src/computations/map/reduce.rs @@ -1,7 +1,6 @@ use super::m::M; use crate::orch::Orchestrator; use crate::runner::parallel_runner_compute::reduce; -use crate::runner::{ParallelRunner, ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; impl M @@ -13,10 +12,8 @@ where { pub fn reduce(self, reduce: X) -> (usize, Option) where - R: ParallelRunner, X: Fn(O, O) -> O + Sync, { - let (len, p) = self.len_and_params(); - reduce::m(R::Runner::reduce(p, len), self, reduce) + reduce::m(self, reduce) } } From b1e2065e344cb4e0b5f5dbbbb3b0e9c97c7b22c6 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 11:21:43 +0200 Subject: [PATCH 027/264] map computations fixed to use orchestrator --- src/computational_variants/map.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index 1a31fd3..dbb4abf 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -199,7 +199,7 @@ where Self::Item: Send, Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { - self.m.reduce::(reduce).1 + self.m.reduce(reduce).1 } // early exit @@ -209,8 +209,8 @@ where Self::Item: Send, { match self.params().iteration_order { - IterationOrder::Ordered => self.m.next::().1, - IterationOrder::Arbitrary => self.m.next_any::().1, + IterationOrder::Ordered => self.m.next().1, + IterationOrder::Arbitrary => self.m.next_any().1, } } } From 4c27f90f814a435385bfa147b94b44e5960628cd Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 11:23:44 +0200 Subject: [PATCH 028/264] collect into methods are fixed for orchestrator for the m variant --- src/collect_into/fixed_vec.rs | 5 +++-- src/collect_into/par_collect_into.rs | 5 +++-- src/collect_into/split_vec.rs | 7 ++++--- src/collect_into/vec.rs | 9 +++++---- 4 files changed, 15 insertions(+), 11 deletions(-) diff --git a/src/collect_into/fixed_vec.rs b/src/collect_into/fixed_vec.rs index e248ef9..e101b5b 100644 --- a/src/collect_into/fixed_vec.rs +++ b/src/collect_into/fixed_vec.rs @@ -2,6 +2,7 @@ use super::par_collect_into::ParCollectIntoCore; use crate::computations::{M, X}; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Infallible}; +use crate::orch::Orchestrator; use crate::runner::ParallelRunner; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; @@ -19,9 +20,9 @@ where vec.into() } - fn m_collect_into(self, m: M) -> Self + fn m_collect_into(self, m: M) -> Self where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, O: Send, diff --git a/src/collect_into/par_collect_into.rs b/src/collect_into/par_collect_into.rs index d140fed..61c595f 100644 --- a/src/collect_into/par_collect_into.rs +++ b/src/collect_into/par_collect_into.rs @@ -1,6 +1,7 @@ use crate::computations::{M, X}; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Infallible}; +use crate::orch::Orchestrator; use crate::runner::ParallelRunner; use crate::using::UParCollectIntoCore; use orx_concurrent_iter::ConcurrentIter; @@ -12,9 +13,9 @@ pub trait ParCollectIntoCore: Collection { fn empty(iter_len: Option) -> Self; - fn m_collect_into(self, m: M) -> Self + fn m_collect_into(self, m: M) -> Self where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync; diff --git a/src/collect_into/split_vec.rs b/src/collect_into/split_vec.rs index 898c950..d4dfabd 100644 --- a/src/collect_into/split_vec.rs +++ b/src/collect_into/split_vec.rs @@ -1,6 +1,7 @@ use super::par_collect_into::ParCollectIntoCore; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Infallible}; +use crate::orch::Orchestrator; use crate::{ collect_into::utils::split_vec_reserve, computations::{M, X}, @@ -25,15 +26,15 @@ where vec } - fn m_collect_into(mut self, m: M) -> Self + fn m_collect_into(mut self, m: M) -> Self where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, O: Send, { split_vec_reserve(&mut self, m.par_len()); - let (_num_spawned, pinned_vec) = m.collect_into::(self); + let (_num_spawned, pinned_vec) = m.collect_into(self); pinned_vec } diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index c5a9862..2678ba7 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -3,6 +3,7 @@ use crate::collect_into::utils::extend_vec_from_split; use crate::computations::{M, X}; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Infallible}; +use crate::orch::Orchestrator; use crate::runner::ParallelRunner; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; @@ -21,9 +22,9 @@ where } } - fn m_collect_into(mut self, m: M) -> Self + fn m_collect_into(mut self, m: M) -> Self where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, O: Send, @@ -31,13 +32,13 @@ where match m.par_len() { None => { let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let split_vec = split_vec.m_collect_into::(m); + let split_vec = split_vec.m_collect_into(m); extend_vec_from_split(self, split_vec) } Some(len) => { self.reserve(len); let fixed_vec = FixedVec::from(self); - let (_num_spawned, fixed_vec) = m.collect_into::(fixed_vec); + let (_num_spawned, fixed_vec) = m.collect_into(fixed_vec); Vec::from(fixed_vec) } } From 4c3467e721ab7af86c995d9db4336de72a512ea3 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 11:24:41 +0200 Subject: [PATCH 029/264] par and map collections use orchestrator --- src/computational_variants/map.rs | 2 +- src/computational_variants/par.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index dbb4abf..f2728e4 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -189,7 +189,7 @@ where where C: ParCollectInto, { - output.m_collect_into::(self.m) + output.m_collect_into(self.m) } // reduce diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index da6f4f9..5b3f76b 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -183,7 +183,7 @@ where where C: ParCollectInto, { - output.m_collect_into::(self.m()) + output.m_collect_into(self.m()) } // reduce From d484929f2c3355dc21cb5c777fb060bf1deb827f Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 11:27:17 +0200 Subject: [PATCH 030/264] tests fixed --- src/computations/map/tests/collect.rs | 6 +++--- src/computations/map/tests/find.rs | 11 ++++++----- src/computations/map/tests/reduce.rs | 10 +++++----- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/src/computations/map/tests/collect.rs b/src/computations/map/tests/collect.rs index 9ae4f7c..ff2680b 100644 --- a/src/computations/map/tests/collect.rs +++ b/src/computations/map/tests/collect.rs @@ -1,4 +1,4 @@ -use crate::{IterationOrder, Params, computations::map::m::M, runner::DefaultRunner}; +use crate::{IterationOrder, Params, computations::map::m::M, orch::DefaultOrchestrator}; use orx_concurrent_iter::IntoConcurrentIter; use orx_pinned_vec::PinnedVec; use orx_split_vec::SplitVec; @@ -33,9 +33,9 @@ fn m_map_collect(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { let params = Params::new(nt, chunk, ordering); let iter = input.into_con_iter(); - let m = M::new(params, iter, map); + let m = M::new(DefaultOrchestrator::default(), params, iter, map); - let (_, mut output) = m.collect_into::(output); + let (_, mut output) = m.collect_into(output); if !params.is_sequential() && matches!(params.iteration_order, IterationOrder::Arbitrary) { expected.sort(); diff --git a/src/computations/map/tests/find.rs b/src/computations/map/tests/find.rs index 301e7bb..3677113 100644 --- a/src/computations/map/tests/find.rs +++ b/src/computations/map/tests/find.rs @@ -1,6 +1,7 @@ use crate::{ - DefaultRunner, Params, + Params, computations::{map::m::M, map_self}, + orch::DefaultOrchestrator, }; use orx_concurrent_iter::IntoConcurrentIter; use test_case::test_matrix; @@ -22,9 +23,9 @@ fn m_find(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let m = M::new(params, iter, map_self); + let m = M::new(DefaultOrchestrator::default(), params, iter, map_self); - let output = m.next::().1; + let output = m.next().1; assert_eq!(expected, output); } @@ -41,8 +42,8 @@ fn m_map_find(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let m = M::new(params, iter, map); - let output = m.next::().1; + let m = M::new(DefaultOrchestrator::default(), params, iter, map); + let output = m.next().1; assert_eq!(expected, output); } diff --git a/src/computations/map/tests/reduce.rs b/src/computations/map/tests/reduce.rs index 2b8cbeb..c7fc256 100644 --- a/src/computations/map/tests/reduce.rs +++ b/src/computations/map/tests/reduce.rs @@ -1,7 +1,7 @@ use crate::{ Params, computations::{map::m::M, map_self}, - runner::DefaultRunner, + orch::DefaultOrchestrator, }; use orx_concurrent_iter::IntoConcurrentIter; use test_case::test_matrix; @@ -27,8 +27,8 @@ fn m_reduce(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let m = M::new(params, iter, map_self); - let (_, output) = m.reduce::(reduce); + let m = M::new(DefaultOrchestrator::default(), params, iter, map_self); + let (_, output) = m.reduce(reduce); assert_eq!(expected, output); } @@ -50,8 +50,8 @@ fn m_map_reduce(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let m = M::new(params, iter, map); - let (_, output) = m.reduce::(reduce); + let m = M::new(DefaultOrchestrator::default(), params, iter, map); + let (_, output) = m.reduce(reduce); assert_eq!(expected, output); } From db96d0060be06df05cfcf4ece9076c1e2436de7c Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 11:27:56 +0200 Subject: [PATCH 031/264] checkpoint: orch compiles --- src/computations/map/collect.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/computations/map/collect.rs b/src/computations/map/collect.rs index 9a64967..0268779 100644 --- a/src/computations/map/collect.rs +++ b/src/computations/map/collect.rs @@ -19,7 +19,7 @@ where where P: IntoConcurrentPinnedVec, { - let (len, p) = self.len_and_params(); + let (_, p) = self.len_and_params(); match (p.is_sequential(), p.iteration_order) { (true, _) => (0, self.sequential(pinned_vec)), #[cfg(test)] From d49579da7f76de308887126031c71878881399ff Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 11:31:59 +0200 Subject: [PATCH 032/264] m reduce is simplified --- src/computational_variants/map.rs | 3 ++- src/computational_variants/par.rs | 3 ++- src/computations/map/mod.rs | 1 - src/computations/map/reduce.rs | 19 ------------------- src/computations/map/tests/reduce.rs | 5 +++-- 5 files changed, 7 insertions(+), 24 deletions(-) delete mode 100644 src/computations/map/reduce.rs diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index f2728e4..91e4dc5 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -4,6 +4,7 @@ use crate::computational_variants::fallible_result::ParMapResult; use crate::generic_values::{Vector, WhilstAtom}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; +use crate::runner::parallel_runner_compute; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsing, Params, computations::M, @@ -199,7 +200,7 @@ where Self::Item: Send, Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { - self.m.reduce(reduce).1 + parallel_runner_compute::reduce::m(self.m, reduce).1 } // early exit diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 5b3f76b..efbb84a 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -3,6 +3,7 @@ use crate::computational_variants::fallible_result::ParResult; use crate::generic_values::{Vector, WhilstAtom}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; +use crate::runner::parallel_runner_compute; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsing, Params, computations::{M, map_self}, @@ -193,7 +194,7 @@ where Self::Item: Send, Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { - self.m().reduce(reduce).1 + parallel_runner_compute::reduce::m(self.m(), reduce).1 } // early exit diff --git a/src/computations/map/mod.rs b/src/computations/map/mod.rs index 3a6638b..4ead8ad 100644 --- a/src/computations/map/mod.rs +++ b/src/computations/map/mod.rs @@ -4,7 +4,6 @@ mod tests; mod collect; mod m; mod next; -mod reduce; mod transformations; pub use m::M; diff --git a/src/computations/map/reduce.rs b/src/computations/map/reduce.rs deleted file mode 100644 index 02adc0b..0000000 --- a/src/computations/map/reduce.rs +++ /dev/null @@ -1,19 +0,0 @@ -use super::m::M; -use crate::orch::Orchestrator; -use crate::runner::parallel_runner_compute::reduce; -use orx_concurrent_iter::ConcurrentIter; - -impl M -where - R: Orchestrator, - I: ConcurrentIter, - O: Send, - M1: Fn(I::Item) -> O + Sync, -{ - pub fn reduce(self, reduce: X) -> (usize, Option) - where - X: Fn(O, O) -> O + Sync, - { - reduce::m(self, reduce) - } -} diff --git a/src/computations/map/tests/reduce.rs b/src/computations/map/tests/reduce.rs index c7fc256..8fa5dc1 100644 --- a/src/computations/map/tests/reduce.rs +++ b/src/computations/map/tests/reduce.rs @@ -2,6 +2,7 @@ use crate::{ Params, computations::{map::m::M, map_self}, orch::DefaultOrchestrator, + runner::parallel_runner_compute, }; use orx_concurrent_iter::IntoConcurrentIter; use test_case::test_matrix; @@ -28,7 +29,7 @@ fn m_reduce(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); let m = M::new(DefaultOrchestrator::default(), params, iter, map_self); - let (_, output) = m.reduce(reduce); + let (_, output) = parallel_runner_compute::reduce::m(m, reduce); assert_eq!(expected, output); } @@ -51,7 +52,7 @@ fn m_map_reduce(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); let m = M::new(DefaultOrchestrator::default(), params, iter, map); - let (_, output) = m.reduce(reduce); + let (_, output) = parallel_runner_compute::reduce::m(m, reduce); assert_eq!(expected, output); } From 0428a7451a90bc3743d5f0a5799cc91e58a982a5 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 11:34:37 +0200 Subject: [PATCH 033/264] M next and next_any simplified --- src/computational_variants/map.rs | 4 ++-- src/computational_variants/par.rs | 4 ++-- src/computations/map/mod.rs | 1 - src/computations/map/next.rs | 20 -------------------- src/computations/map/tests/find.rs | 5 +++-- 5 files changed, 7 insertions(+), 27 deletions(-) delete mode 100644 src/computations/map/next.rs diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index 91e4dc5..8dc426a 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -210,8 +210,8 @@ where Self::Item: Send, { match self.params().iteration_order { - IterationOrder::Ordered => self.m.next().1, - IterationOrder::Arbitrary => self.m.next_any().1, + IterationOrder::Ordered => parallel_runner_compute::next::m(self.m).1, + IterationOrder::Arbitrary => parallel_runner_compute::next_any::m(self.m).1, } } } diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index efbb84a..9b34093 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -201,8 +201,8 @@ where fn first(self) -> Option { match self.params().iteration_order { - IterationOrder::Ordered => self.m().next().1, - IterationOrder::Arbitrary => self.m().next_any().1, + IterationOrder::Ordered => parallel_runner_compute::next::m(self.m()).1, + IterationOrder::Arbitrary => parallel_runner_compute::next_any::m(self.m()).1, } } } diff --git a/src/computations/map/mod.rs b/src/computations/map/mod.rs index 4ead8ad..02a4a0b 100644 --- a/src/computations/map/mod.rs +++ b/src/computations/map/mod.rs @@ -3,7 +3,6 @@ mod tests; mod collect; mod m; -mod next; mod transformations; pub use m::M; diff --git a/src/computations/map/next.rs b/src/computations/map/next.rs deleted file mode 100644 index 2f93670..0000000 --- a/src/computations/map/next.rs +++ /dev/null @@ -1,20 +0,0 @@ -use super::m::M; -use crate::orch::Orchestrator; -use crate::runner::parallel_runner_compute::{next, next_any}; -use orx_concurrent_iter::ConcurrentIter; - -impl M -where - R: Orchestrator, - I: ConcurrentIter, - M1: Fn(I::Item) -> O + Sync, - O: Send, -{ - pub fn next(self) -> (usize, Option) { - next::m(self) - } - - pub fn next_any(self) -> (usize, Option) { - next_any::m(self) - } -} diff --git a/src/computations/map/tests/find.rs b/src/computations/map/tests/find.rs index 3677113..06c85dd 100644 --- a/src/computations/map/tests/find.rs +++ b/src/computations/map/tests/find.rs @@ -2,6 +2,7 @@ use crate::{ Params, computations::{map::m::M, map_self}, orch::DefaultOrchestrator, + runner::parallel_runner_compute, }; use orx_concurrent_iter::IntoConcurrentIter; use test_case::test_matrix; @@ -25,7 +26,7 @@ fn m_find(n: usize, nt: usize, chunk: usize) { let iter = input.into_con_iter(); let m = M::new(DefaultOrchestrator::default(), params, iter, map_self); - let output = m.next().1; + let output = parallel_runner_compute::next::m(m).1; assert_eq!(expected, output); } @@ -43,7 +44,7 @@ fn m_map_find(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); let m = M::new(DefaultOrchestrator::default(), params, iter, map); - let output = m.next().1; + let output = parallel_runner_compute::next::m(m).1; assert_eq!(expected, output); } From 2deae402d3262adac65063b37327b177e00b5185 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 11:35:17 +0200 Subject: [PATCH 034/264] clean up --- src/computations/map/mod.rs | 1 - src/computations/map/transformations.rs | 21 --------------------- 2 files changed, 22 deletions(-) delete mode 100644 src/computations/map/transformations.rs diff --git a/src/computations/map/mod.rs b/src/computations/map/mod.rs index 02a4a0b..f21734c 100644 --- a/src/computations/map/mod.rs +++ b/src/computations/map/mod.rs @@ -3,6 +3,5 @@ mod tests; mod collect; mod m; -mod transformations; pub use m::M; diff --git a/src/computations/map/transformations.rs b/src/computations/map/transformations.rs deleted file mode 100644 index b42e9e3..0000000 --- a/src/computations/map/transformations.rs +++ /dev/null @@ -1,21 +0,0 @@ -use crate::orch::Orchestrator; - -use super::m::M; -use orx_concurrent_iter::ConcurrentIter; - -impl M -where - R: Orchestrator, - I: ConcurrentIter, - M1: Fn(I::Item) -> O, -{ - pub fn map(self, map: M2) -> M Q> - where - M2: Fn(O) -> Q, - Q: Send, - { - let (orchestrator, params, iter, map1) = self.destruct(); - let map2 = move |t| map(map1(t)); - M::new(orchestrator, params, iter, map2) - } -} From 2022194211b02ecd00b5ca1e0818e3917330022f Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 11:42:13 +0200 Subject: [PATCH 035/264] collect into takes par-map directly --- src/collect_into/fixed_vec.rs | 3 ++- src/collect_into/par_collect_into.rs | 3 ++- src/collect_into/split_vec.rs | 3 ++- src/collect_into/vec.rs | 3 ++- src/computational_variants/map.rs | 2 +- src/computational_variants/par.rs | 7 ++++++- 6 files changed, 15 insertions(+), 6 deletions(-) diff --git a/src/collect_into/fixed_vec.rs b/src/collect_into/fixed_vec.rs index e101b5b..e1845af 100644 --- a/src/collect_into/fixed_vec.rs +++ b/src/collect_into/fixed_vec.rs @@ -1,4 +1,5 @@ use super::par_collect_into::ParCollectIntoCore; +use crate::computational_variants::ParMap; use crate::computations::{M, X}; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Infallible}; @@ -20,7 +21,7 @@ where vec.into() } - fn m_collect_into(self, m: M) -> Self + fn m_collect_into(self, m: ParMap) -> Self where R: Orchestrator, I: ConcurrentIter, diff --git a/src/collect_into/par_collect_into.rs b/src/collect_into/par_collect_into.rs index 61c595f..ae0237b 100644 --- a/src/collect_into/par_collect_into.rs +++ b/src/collect_into/par_collect_into.rs @@ -1,3 +1,4 @@ +use crate::computational_variants::ParMap; use crate::computations::{M, X}; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Infallible}; @@ -13,7 +14,7 @@ pub trait ParCollectIntoCore: Collection { fn empty(iter_len: Option) -> Self; - fn m_collect_into(self, m: M) -> Self + fn m_collect_into(self, m: ParMap) -> Self where R: Orchestrator, I: ConcurrentIter, diff --git a/src/collect_into/split_vec.rs b/src/collect_into/split_vec.rs index d4dfabd..7334f3c 100644 --- a/src/collect_into/split_vec.rs +++ b/src/collect_into/split_vec.rs @@ -1,4 +1,5 @@ use super::par_collect_into::ParCollectIntoCore; +use crate::computational_variants::ParMap; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::orch::Orchestrator; @@ -26,7 +27,7 @@ where vec } - fn m_collect_into(mut self, m: M) -> Self + fn m_collect_into(mut self, m: ParMap) -> Self where R: Orchestrator, I: ConcurrentIter, diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index 2678ba7..132798e 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -1,5 +1,6 @@ use super::par_collect_into::ParCollectIntoCore; use crate::collect_into::utils::extend_vec_from_split; +use crate::computational_variants::ParMap; use crate::computations::{M, X}; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Infallible}; @@ -22,7 +23,7 @@ where } } - fn m_collect_into(mut self, m: M) -> Self + fn m_collect_into(mut self, m: ParMap) -> Self where R: Orchestrator, I: ConcurrentIter, diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index 8dc426a..d05c241 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -190,7 +190,7 @@ where where C: ParCollectInto, { - output.m_collect_into(self.m) + output.m_collect_into(self) } // reduce diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 9b34093..9547a3e 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -48,6 +48,11 @@ where let (orchestrator, params, iter) = self.destruct(); M::new(orchestrator, params, iter, map_self) } + + fn into_map(self) -> ParMap I::Item, R> { + let (orchestrator, params, iter) = self.destruct(); + ParMap::new(orchestrator, params, iter, map_self) + } } unsafe impl Send for Par @@ -184,7 +189,7 @@ where where C: ParCollectInto, { - output.m_collect_into(self.m()) + output.m_collect_into(self.into_map()) } // reduce From e3a6857bb45b80cb53b6a68e8e5e9618eb03dc99 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 11:52:35 +0200 Subject: [PATCH 036/264] m computation flattened and simplified --- src/collect_into/fixed_vec.rs | 2 +- src/collect_into/split_vec.rs | 8 +-- src/collect_into/vec.rs | 4 +- src/computational_variants/map.rs | 71 ++++++++++++++----- src/computational_variants/par.rs | 6 +- src/computations/map/collect.rs | 15 ++-- src/computations/map/tests/find.rs | 8 +-- src/computations/map/tests/reduce.rs | 8 +-- .../collect_arbitrary.rs | 8 ++- .../collect_ordered.rs | 8 +-- src/runner/parallel_runner_compute/next.rs | 3 +- .../parallel_runner_compute/next_any.rs | 3 +- src/runner/parallel_runner_compute/reduce.rs | 3 +- 13 files changed, 91 insertions(+), 56 deletions(-) diff --git a/src/collect_into/fixed_vec.rs b/src/collect_into/fixed_vec.rs index e1845af..460ef44 100644 --- a/src/collect_into/fixed_vec.rs +++ b/src/collect_into/fixed_vec.rs @@ -1,6 +1,6 @@ use super::par_collect_into::ParCollectIntoCore; use crate::computational_variants::ParMap; -use crate::computations::{M, X}; +use crate::computations::X; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::orch::Orchestrator; diff --git a/src/collect_into/split_vec.rs b/src/collect_into/split_vec.rs index 7334f3c..5070115 100644 --- a/src/collect_into/split_vec.rs +++ b/src/collect_into/split_vec.rs @@ -3,11 +3,7 @@ use crate::computational_variants::ParMap; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::orch::Orchestrator; -use crate::{ - collect_into::utils::split_vec_reserve, - computations::{M, X}, - runner::ParallelRunner, -}; +use crate::{collect_into::utils::split_vec_reserve, computations::X, runner::ParallelRunner}; use orx_concurrent_iter::ConcurrentIter; #[cfg(test)] use orx_pinned_vec::PinnedVec; @@ -35,7 +31,7 @@ where O: Send, { split_vec_reserve(&mut self, m.par_len()); - let (_num_spawned, pinned_vec) = m.collect_into(self); + let (_, pinned_vec) = m.par_collect_into(self); pinned_vec } diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index 132798e..3a2e7a9 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -1,7 +1,7 @@ use super::par_collect_into::ParCollectIntoCore; use crate::collect_into::utils::extend_vec_from_split; use crate::computational_variants::ParMap; -use crate::computations::{M, X}; +use crate::computations::X; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::orch::Orchestrator; @@ -39,7 +39,7 @@ where Some(len) => { self.reserve(len); let fixed_vec = FixedVec::from(self); - let (_num_spawned, fixed_vec) = m.collect_into(fixed_vec); + let (_, fixed_vec) = m.par_collect_into(fixed_vec); Vec::from(fixed_vec) } } diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index d05c241..7f2dfda 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -7,11 +7,10 @@ use crate::par_iter_result::IntoResult; use crate::runner::parallel_runner_compute; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsing, Params, - computations::M, using::{UsingClone, UsingFun, computational_variants::UParMap}, }; use orx_concurrent_iter::ConcurrentIter; -use std::marker::PhantomData; +use orx_fixed_vec::IntoConcurrentPinnedVec; /// A parallel iterator that maps inputs. pub struct ParMap @@ -20,8 +19,10 @@ where I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, { - m: M, - phantom: PhantomData, + orchestrator: R, + params: Params, + iter: I, + map1: M1, } impl ParMap @@ -30,15 +31,53 @@ where I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, { - pub(crate) fn new(orchestrator: R, params: Params, iter: I, m1: M1) -> Self { + pub(crate) fn new(orchestrator: R, params: Params, iter: I, map1: M1) -> Self { Self { - m: M::new(orchestrator, params, iter, m1), - phantom: PhantomData, + orchestrator, + params, + iter, + map1, } } pub(crate) fn destruct(self) -> (R, Params, I, M1) { - self.m.destruct() + (self.orchestrator, self.params, self.iter, self.map1) + } + + pub(crate) fn par_len(&self) -> Option { + match (self.params.is_sequential(), self.iter.try_get_len()) { + (true, _) => None, // not required to concurrent reserve when seq + (false, x) => x, + } + } + + pub(crate) fn par_collect_into

(self, pinned_vec: P) -> (usize, P) + where + P: IntoConcurrentPinnedVec, + O: Send, + { + match (self.params.is_sequential(), self.params.iteration_order) { + (true, _) => (0, self.seq_collect_into(pinned_vec)), + #[cfg(test)] + (false, IterationOrder::Arbitrary) => { + parallel_runner_compute::collect_arbitrary::m(self, pinned_vec) + } + (false, _) => parallel_runner_compute::collect_ordered::m(self, pinned_vec), + } + } + + fn seq_collect_into

(self, mut pinned_vec: P) -> P + where + P: IntoConcurrentPinnedVec, + { + let (_, _, iter, map1) = self.destruct(); + + let iter = iter.into_seq_iter(); + for i in iter { + pinned_vec.push(map1(i)); + } + + pinned_vec } } @@ -67,27 +106,27 @@ where type Item = O; fn con_iter(&self) -> &impl ConcurrentIter { - self.m.iter() + &self.iter } fn params(&self) -> Params { - self.m.params() + self.params } // params transformations fn num_threads(mut self, num_threads: impl Into) -> Self { - self.m.num_threads(num_threads); + self.params = self.params.with_num_threads(num_threads); self } fn chunk_size(mut self, chunk_size: impl Into) -> Self { - self.m.chunk_size(chunk_size); + self.params = self.params.with_chunk_size(chunk_size); self } fn iteration_order(mut self, collect: IterationOrder) -> Self { - self.m.iteration_order(collect); + self.params = self.params.with_collect_ordering(collect); self } @@ -200,7 +239,7 @@ where Self::Item: Send, Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { - parallel_runner_compute::reduce::m(self.m, reduce).1 + parallel_runner_compute::reduce::m(self, reduce).1 } // early exit @@ -210,8 +249,8 @@ where Self::Item: Send, { match self.params().iteration_order { - IterationOrder::Ordered => parallel_runner_compute::next::m(self.m).1, - IterationOrder::Arbitrary => parallel_runner_compute::next_any::m(self.m).1, + IterationOrder::Ordered => parallel_runner_compute::next::m(self).1, + IterationOrder::Arbitrary => parallel_runner_compute::next_any::m(self).1, } } } diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 9547a3e..7ca7c4d 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -199,15 +199,15 @@ where Self::Item: Send, Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { - parallel_runner_compute::reduce::m(self.m(), reduce).1 + parallel_runner_compute::reduce::m(self.into_map(), reduce).1 } // early exit fn first(self) -> Option { match self.params().iteration_order { - IterationOrder::Ordered => parallel_runner_compute::next::m(self.m()).1, - IterationOrder::Arbitrary => parallel_runner_compute::next_any::m(self.m()).1, + IterationOrder::Ordered => parallel_runner_compute::next::m(self.into_map()).1, + IterationOrder::Arbitrary => parallel_runner_compute::next_any::m(self.into_map()).1, } } } diff --git a/src/computations/map/collect.rs b/src/computations/map/collect.rs index 0268779..073648c 100644 --- a/src/computations/map/collect.rs +++ b/src/computations/map/collect.rs @@ -19,13 +19,14 @@ where where P: IntoConcurrentPinnedVec, { - let (_, p) = self.len_and_params(); - match (p.is_sequential(), p.iteration_order) { - (true, _) => (0, self.sequential(pinned_vec)), - #[cfg(test)] - (false, IterationOrder::Arbitrary) => collect_arbitrary::m(self, pinned_vec), - (false, _) => collect_ordered::m(self, pinned_vec), - } + // let (_, p) = self.len_and_params(); + // match (p.is_sequential(), p.iteration_order) { + // (true, _) => (0, self.sequential(pinned_vec)), + // #[cfg(test)] + // (false, IterationOrder::Arbitrary) => collect_arbitrary::m(self, pinned_vec), + // (false, _) => collect_ordered::m(self, pinned_vec), + // } + todo!() } fn sequential

(self, mut pinned_vec: P) -> P diff --git a/src/computations/map/tests/find.rs b/src/computations/map/tests/find.rs index 06c85dd..a727b4e 100644 --- a/src/computations/map/tests/find.rs +++ b/src/computations/map/tests/find.rs @@ -1,7 +1,5 @@ use crate::{ - Params, - computations::{map::m::M, map_self}, - orch::DefaultOrchestrator, + Params, computational_variants::ParMap, computations::map_self, orch::DefaultOrchestrator, runner::parallel_runner_compute, }; use orx_concurrent_iter::IntoConcurrentIter; @@ -24,7 +22,7 @@ fn m_find(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let m = M::new(DefaultOrchestrator::default(), params, iter, map_self); + let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map_self); let output = parallel_runner_compute::next::m(m).1; assert_eq!(expected, output); @@ -43,7 +41,7 @@ fn m_map_find(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let m = M::new(DefaultOrchestrator::default(), params, iter, map); + let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map); let output = parallel_runner_compute::next::m(m).1; assert_eq!(expected, output); diff --git a/src/computations/map/tests/reduce.rs b/src/computations/map/tests/reduce.rs index 8fa5dc1..266f3ca 100644 --- a/src/computations/map/tests/reduce.rs +++ b/src/computations/map/tests/reduce.rs @@ -1,7 +1,5 @@ use crate::{ - Params, - computations::{map::m::M, map_self}, - orch::DefaultOrchestrator, + Params, computational_variants::ParMap, computations::map_self, orch::DefaultOrchestrator, runner::parallel_runner_compute, }; use orx_concurrent_iter::IntoConcurrentIter; @@ -28,7 +26,7 @@ fn m_reduce(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let m = M::new(DefaultOrchestrator::default(), params, iter, map_self); + let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map_self); let (_, output) = parallel_runner_compute::reduce::m(m, reduce); assert_eq!(expected, output); @@ -51,7 +49,7 @@ fn m_map_reduce(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let m = M::new(DefaultOrchestrator::default(), params, iter, map); + let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map); let (_, output) = parallel_runner_compute::reduce::m(m, reduce); assert_eq!(expected, output); diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 10a3958..5c7b8cc 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -1,9 +1,11 @@ +#[cfg(test)] +use crate::computational_variants::ParMap; use crate::generic_values::Values; use crate::generic_values::runner_results::{ParallelCollectArbitrary, ThreadCollectArbitrary}; use crate::runner::thread_runner_compute as thread; -#[cfg(test)] -use crate::{computations::M, orch::Orchestrator, runner::ParallelRunner}; use crate::{computations::X, runner::ParallelRunnerCompute}; +#[cfg(test)] +use crate::{orch::Orchestrator, runner::ParallelRunner}; use orx_concurrent_bag::ConcurrentBag; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; @@ -11,7 +13,7 @@ use orx_fixed_vec::IntoConcurrentPinnedVec; // m #[cfg(test)] -pub fn m(m: M, pinned_vec: P) -> (usize, P) +pub fn m(m: ParMap, pinned_vec: P) -> (usize, P) where C: Orchestrator, I: ConcurrentIter, diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index f269a0e..c4b5791 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -1,19 +1,17 @@ +use crate::computational_variants::ParMap; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect, ThreadCollect}; use crate::orch::Orchestrator; use crate::runner::parallel_runner::ParallelRunner; use crate::runner::{ComputationKind, thread_runner_compute as thread}; -use crate::{ - computations::{M, X}, - runner::ParallelRunnerCompute, -}; +use crate::{computations::X, runner::ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; use orx_concurrent_ordered_bag::ConcurrentOrderedBag; use orx_fixed_vec::IntoConcurrentPinnedVec; // m -pub fn m(m: M, pinned_vec: P) -> (usize, P) +pub fn m(m: ParMap, pinned_vec: P) -> (usize, P) where C: Orchestrator, I: ConcurrentIter, diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index 3d08cde..36b9107 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -1,4 +1,5 @@ use crate::ParallelRunner; +use crate::computational_variants::ParMap; use crate::computations::{M, X}; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; use crate::orch::Orchestrator; @@ -6,7 +7,7 @@ use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{generic_values::Values, runner::ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; -pub fn m(m: M) -> (usize, Option) +pub fn m(m: ParMap) -> (usize, Option) where C: Orchestrator, I: ConcurrentIter, diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index 1b4e978..a5dfb08 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -1,4 +1,5 @@ use crate::ParallelRunner; +use crate::computational_variants::ParMap; use crate::computations::{M, X}; use crate::generic_values::runner_results::Fallibility; use crate::orch::Orchestrator; @@ -6,7 +7,7 @@ use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{generic_values::Values, runner::ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; -pub fn m(m: M) -> (usize, Option) +pub fn m(m: ParMap) -> (usize, Option) where C: Orchestrator, I: ConcurrentIter, diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index ba366c0..d25f4b4 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -1,4 +1,5 @@ use crate::ParallelRunner; +use crate::computational_variants::ParMap; use crate::computations::{M, X}; use crate::generic_values::runner_results::{Fallibility, Reduce}; use crate::orch::Orchestrator; @@ -8,7 +9,7 @@ use orx_concurrent_iter::ConcurrentIter; // m -pub fn m(m: M, reduce: Red) -> (usize, Option) +pub fn m(m: ParMap, reduce: Red) -> (usize, Option) where C: Orchestrator, I: ConcurrentIter, From f64614c7281dd0c7a82f63bce80c0dbdc134d391 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 11:53:15 +0200 Subject: [PATCH 037/264] collect map tests fixed --- src/computations/map/collect.rs | 2 +- src/computations/map/tests/collect.rs | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/computations/map/collect.rs b/src/computations/map/collect.rs index 073648c..2f7c640 100644 --- a/src/computations/map/collect.rs +++ b/src/computations/map/collect.rs @@ -15,7 +15,7 @@ where O: Send, M1: Fn(I::Item) -> O + Sync, { - pub fn collect_into

(self, pinned_vec: P) -> (usize, P) + pub fn collect_into2

(self, pinned_vec: P) -> (usize, P) where P: IntoConcurrentPinnedVec, { diff --git a/src/computations/map/tests/collect.rs b/src/computations/map/tests/collect.rs index ff2680b..3939362 100644 --- a/src/computations/map/tests/collect.rs +++ b/src/computations/map/tests/collect.rs @@ -1,4 +1,7 @@ -use crate::{IterationOrder, Params, computations::map::m::M, orch::DefaultOrchestrator}; +use crate::{ + IterationOrder, Params, computational_variants::ParMap, computations::map::m::M, + orch::DefaultOrchestrator, +}; use orx_concurrent_iter::IntoConcurrentIter; use orx_pinned_vec::PinnedVec; use orx_split_vec::SplitVec; @@ -33,9 +36,9 @@ fn m_map_collect(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { let params = Params::new(nt, chunk, ordering); let iter = input.into_con_iter(); - let m = M::new(DefaultOrchestrator::default(), params, iter, map); + let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map); - let (_, mut output) = m.collect_into(output); + let (_, mut output) = m.par_collect_into(output); if !params.is_sequential() && matches!(params.iteration_order, IterationOrder::Arbitrary) { expected.sort(); From b19fb1e8cad1c7613ff674e18f797b2e20975534 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 11:54:28 +0200 Subject: [PATCH 038/264] m computation made redundant --- src/collect_into/par_collect_into.rs | 2 +- src/computational_variants/par.rs | 7 +------ src/computations/mod.rs | 2 +- src/runner/parallel_runner_compute/next.rs | 2 +- src/runner/parallel_runner_compute/next_any.rs | 2 +- src/runner/parallel_runner_compute/reduce.rs | 2 +- 6 files changed, 6 insertions(+), 11 deletions(-) diff --git a/src/collect_into/par_collect_into.rs b/src/collect_into/par_collect_into.rs index ae0237b..a0172c8 100644 --- a/src/collect_into/par_collect_into.rs +++ b/src/collect_into/par_collect_into.rs @@ -1,5 +1,5 @@ use crate::computational_variants::ParMap; -use crate::computations::{M, X}; +use crate::computations::X; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::orch::Orchestrator; diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 7ca7c4d..a2e32a5 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -6,7 +6,7 @@ use crate::par_iter_result::IntoResult; use crate::runner::parallel_runner_compute; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsing, Params, - computations::{M, map_self}, + computations::map_self, using::{UsingClone, UsingFun, computational_variants::UPar}, }; use crate::{IntoParIter, ParIterResult}; @@ -44,11 +44,6 @@ where (self.orchestrator, self.params, self.iter) } - fn m(self) -> M I::Item> { - let (orchestrator, params, iter) = self.destruct(); - M::new(orchestrator, params, iter, map_self) - } - fn into_map(self) -> ParMap I::Item, R> { let (orchestrator, params, iter) = self.destruct(); ParMap::new(orchestrator, params, iter, map_self) diff --git a/src/computations/mod.rs b/src/computations/mod.rs index a1a5a7f..8120bee 100644 --- a/src/computations/mod.rs +++ b/src/computations/mod.rs @@ -5,5 +5,5 @@ mod xap; pub(crate) use default_fns::*; pub(crate) use heap_sort::heap_sort_into; -pub(crate) use map::M; +// pub(crate) use map::M; pub(crate) use xap::X; diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index 36b9107..0560b8f 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -1,6 +1,6 @@ use crate::ParallelRunner; use crate::computational_variants::ParMap; -use crate::computations::{M, X}; +use crate::computations::X; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; use crate::orch::Orchestrator; use crate::runner::{ComputationKind, thread_runner_compute as thread}; diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index a5dfb08..f93f4c6 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -1,6 +1,6 @@ use crate::ParallelRunner; use crate::computational_variants::ParMap; -use crate::computations::{M, X}; +use crate::computations::X; use crate::generic_values::runner_results::Fallibility; use crate::orch::Orchestrator; use crate::runner::{ComputationKind, thread_runner_compute as thread}; diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index d25f4b4..b55afdc 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -1,6 +1,6 @@ use crate::ParallelRunner; use crate::computational_variants::ParMap; -use crate::computations::{M, X}; +use crate::computations::X; use crate::generic_values::runner_results::{Fallibility, Reduce}; use crate::orch::Orchestrator; use crate::runner::{ComputationKind, thread_runner_compute as thread}; From e5ffff91228df7757cc7177693dbc2839c4e64f7 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 11:56:01 +0200 Subject: [PATCH 039/264] m tests moved to computational variants --- .../tests/map/collect.rs | 46 +++++++++++++++ src/computational_variants/tests/map/find.rs | 48 ++++++++++++++++ src/computational_variants/tests/map/mod.rs | 3 + .../tests/map/reduce.rs | 56 +++++++++++++++++++ src/computational_variants/tests/mod.rs | 1 + src/computations/mod.rs | 2 - 6 files changed, 154 insertions(+), 2 deletions(-) create mode 100644 src/computational_variants/tests/map/collect.rs create mode 100644 src/computational_variants/tests/map/find.rs create mode 100644 src/computational_variants/tests/map/mod.rs create mode 100644 src/computational_variants/tests/map/reduce.rs diff --git a/src/computational_variants/tests/map/collect.rs b/src/computational_variants/tests/map/collect.rs new file mode 100644 index 0000000..c9e6715 --- /dev/null +++ b/src/computational_variants/tests/map/collect.rs @@ -0,0 +1,46 @@ +use crate::{IterationOrder, Params, computational_variants::ParMap, orch::DefaultOrchestrator}; +use orx_concurrent_iter::IntoConcurrentIter; +use orx_pinned_vec::PinnedVec; +use orx_split_vec::SplitVec; +use test_case::test_matrix; + +#[cfg(miri)] +const N: [usize; 2] = [37, 125]; +#[cfg(not(miri))] +const N: [usize; 2] = [1025, 4735]; + +#[test_matrix( + [0, 1, N[0], N[1]], + [1, 4], + [1, 64], + [IterationOrder::Ordered, IterationOrder::Arbitrary]) +] +fn m_map_collect(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { + let offset = 33; + + let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); + let map = |x: String| format!("{}!", x); + + let mut output = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); + let mut expected = Vec::new(); + + for i in 0..offset { + let value = || map(i.to_string()); + output.push(value()); + expected.push(value()); + } + expected.extend(input.clone().into_iter().map(|x| map(x))); + + let params = Params::new(nt, chunk, ordering); + let iter = input.into_con_iter(); + let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map); + + let (_, mut output) = m.par_collect_into(output); + + if !params.is_sequential() && matches!(params.iteration_order, IterationOrder::Arbitrary) { + expected.sort(); + output.sort(); + } + + assert_eq!(expected, output.to_vec()); +} diff --git a/src/computational_variants/tests/map/find.rs b/src/computational_variants/tests/map/find.rs new file mode 100644 index 0000000..a727b4e --- /dev/null +++ b/src/computational_variants/tests/map/find.rs @@ -0,0 +1,48 @@ +use crate::{ + Params, computational_variants::ParMap, computations::map_self, orch::DefaultOrchestrator, + runner::parallel_runner_compute, +}; +use orx_concurrent_iter::IntoConcurrentIter; +use test_case::test_matrix; + +#[cfg(miri)] +const N: [usize; 2] = [37, 125]; +#[cfg(not(miri))] +const N: [usize; 2] = [1025, 4735]; + +#[test_matrix( + [0, 1, N[0], N[1]], + [1, 4], + [1, 64]) +] +fn m_find(n: usize, nt: usize, chunk: usize) { + let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); + + let expected = input.clone().into_iter().next(); + + let params = Params::new(nt, chunk, Default::default()); + let iter = input.into_con_iter(); + let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map_self); + + let output = parallel_runner_compute::next::m(m).1; + assert_eq!(expected, output); +} + +#[test_matrix( + [0, 1, N[0], N[1]], + [1, 4], + [1, 64]) +] +fn m_map_find(n: usize, nt: usize, chunk: usize) { + let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); + let map = |x: String| format!("{}!", x); + + let expected = input.clone().into_iter().map(map).next(); + + let params = Params::new(nt, chunk, Default::default()); + let iter = input.into_con_iter(); + let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map); + let output = parallel_runner_compute::next::m(m).1; + + assert_eq!(expected, output); +} diff --git a/src/computational_variants/tests/map/mod.rs b/src/computational_variants/tests/map/mod.rs new file mode 100644 index 0000000..5493e3c --- /dev/null +++ b/src/computational_variants/tests/map/mod.rs @@ -0,0 +1,3 @@ +mod collect; +mod find; +mod reduce; diff --git a/src/computational_variants/tests/map/reduce.rs b/src/computational_variants/tests/map/reduce.rs new file mode 100644 index 0000000..266f3ca --- /dev/null +++ b/src/computational_variants/tests/map/reduce.rs @@ -0,0 +1,56 @@ +use crate::{ + Params, computational_variants::ParMap, computations::map_self, orch::DefaultOrchestrator, + runner::parallel_runner_compute, +}; +use orx_concurrent_iter::IntoConcurrentIter; +use test_case::test_matrix; + +#[cfg(miri)] +const N: [usize; 2] = [37, 125]; +#[cfg(not(miri))] +const N: [usize; 2] = [1025, 4735]; + +#[test_matrix( + [0, 1, N[0], N[1]], + [1, 4], + [1, 64]) +] +fn m_reduce(n: usize, nt: usize, chunk: usize) { + let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); + let reduce = |x: String, y: String| match x < y { + true => y, + false => x, + }; + + let expected = input.clone().into_iter().reduce(reduce); + + let params = Params::new(nt, chunk, Default::default()); + let iter = input.into_con_iter(); + let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map_self); + let (_, output) = parallel_runner_compute::reduce::m(m, reduce); + + assert_eq!(expected, output); +} + +#[test_matrix( + [0, 1, N[0], N[1]], + [1, 4], + [1, 64]) +] +fn m_map_reduce(n: usize, nt: usize, chunk: usize) { + let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); + let map = |x: String| format!("{}!", x); + let reduce = |x: String, y: String| match x < y { + true => y, + false => x, + }; + + let expected = input.clone().into_iter().map(map).reduce(reduce); + + let params = Params::new(nt, chunk, Default::default()); + let iter = input.into_con_iter(); + let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map); + let (_, output) = parallel_runner_compute::reduce::m(m, reduce); + + assert_eq!(expected, output); +} diff --git a/src/computational_variants/tests/mod.rs b/src/computational_variants/tests/mod.rs index b36eec8..a27cd6a 100644 --- a/src/computational_variants/tests/mod.rs +++ b/src/computational_variants/tests/mod.rs @@ -5,6 +5,7 @@ mod for_each; mod inspect; mod iter_consuming; mod iter_ref; +mod map; mod min_max; mod range; mod slice; diff --git a/src/computations/mod.rs b/src/computations/mod.rs index 8120bee..e7005df 100644 --- a/src/computations/mod.rs +++ b/src/computations/mod.rs @@ -1,9 +1,7 @@ mod default_fns; mod heap_sort; -mod map; mod xap; pub(crate) use default_fns::*; pub(crate) use heap_sort::heap_sort_into; -// pub(crate) use map::M; pub(crate) use xap::X; From 09457f774fc63cc98b93f440c73778de0fd8bf45 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 11:56:23 +0200 Subject: [PATCH 040/264] m computation clean up --- src/computations/map/collect.rs | 45 ------------------- src/computations/map/m.rs | 65 --------------------------- src/computations/map/mod.rs | 7 --- src/computations/map/tests/collect.rs | 49 -------------------- src/computations/map/tests/find.rs | 48 -------------------- src/computations/map/tests/mod.rs | 3 -- src/computations/map/tests/reduce.rs | 56 ----------------------- 7 files changed, 273 deletions(-) delete mode 100644 src/computations/map/collect.rs delete mode 100644 src/computations/map/m.rs delete mode 100644 src/computations/map/mod.rs delete mode 100644 src/computations/map/tests/collect.rs delete mode 100644 src/computations/map/tests/find.rs delete mode 100644 src/computations/map/tests/mod.rs delete mode 100644 src/computations/map/tests/reduce.rs diff --git a/src/computations/map/collect.rs b/src/computations/map/collect.rs deleted file mode 100644 index 2f7c640..0000000 --- a/src/computations/map/collect.rs +++ /dev/null @@ -1,45 +0,0 @@ -use super::m::M; -#[cfg(test)] -use crate::IterationOrder; -use crate::orch::Orchestrator; -#[cfg(test)] -use crate::runner::parallel_runner_compute::collect_arbitrary; -use crate::runner::parallel_runner_compute::collect_ordered; -use orx_concurrent_iter::ConcurrentIter; -use orx_pinned_vec::IntoConcurrentPinnedVec; - -impl M -where - R: Orchestrator, - I: ConcurrentIter, - O: Send, - M1: Fn(I::Item) -> O + Sync, -{ - pub fn collect_into2

(self, pinned_vec: P) -> (usize, P) - where - P: IntoConcurrentPinnedVec, - { - // let (_, p) = self.len_and_params(); - // match (p.is_sequential(), p.iteration_order) { - // (true, _) => (0, self.sequential(pinned_vec)), - // #[cfg(test)] - // (false, IterationOrder::Arbitrary) => collect_arbitrary::m(self, pinned_vec), - // (false, _) => collect_ordered::m(self, pinned_vec), - // } - todo!() - } - - fn sequential

(self, mut pinned_vec: P) -> P - where - P: IntoConcurrentPinnedVec, - { - let (_, _, iter, map1) = self.destruct(); - - let iter = iter.into_seq_iter(); - for i in iter { - pinned_vec.push(map1(i)); - } - - pinned_vec - } -} diff --git a/src/computations/map/m.rs b/src/computations/map/m.rs deleted file mode 100644 index 3c19f72..0000000 --- a/src/computations/map/m.rs +++ /dev/null @@ -1,65 +0,0 @@ -use crate::{ChunkSize, IterationOrder, NumThreads, Params, orch::Orchestrator}; -use orx_concurrent_iter::ConcurrentIter; - -pub struct M -where - R: Orchestrator, - I: ConcurrentIter, - M1: Fn(I::Item) -> O, -{ - orchestrator: R, - params: Params, - iter: I, - map1: M1, -} - -impl M -where - R: Orchestrator, - I: ConcurrentIter, - M1: Fn(I::Item) -> O, -{ - pub fn new(orchestrator: R, params: Params, iter: I, map1: M1) -> Self { - Self { - orchestrator, - params, - iter, - map1, - } - } - - pub fn destruct(self) -> (R, Params, I, M1) { - (self.orchestrator, self.params, self.iter, self.map1) - } - - pub fn params(&self) -> Params { - self.params - } - - pub fn len_and_params(&self) -> (Option, Params) { - (self.iter.try_get_len(), self.params) - } - - pub fn num_threads(&mut self, num_threads: impl Into) { - self.params = self.params.with_num_threads(num_threads); - } - - pub fn chunk_size(&mut self, chunk_size: impl Into) { - self.params = self.params.with_chunk_size(chunk_size); - } - - pub fn iteration_order(&mut self, collect: IterationOrder) { - self.params = self.params.with_collect_ordering(collect); - } - - pub fn iter(&self) -> &I { - &self.iter - } - - pub fn par_len(&self) -> Option { - match (self.params.is_sequential(), self.iter.try_get_len()) { - (true, _) => None, // not required to concurrent reserve when seq - (false, x) => x, - } - } -} diff --git a/src/computations/map/mod.rs b/src/computations/map/mod.rs deleted file mode 100644 index f21734c..0000000 --- a/src/computations/map/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -#[cfg(test)] -mod tests; - -mod collect; -mod m; - -pub use m::M; diff --git a/src/computations/map/tests/collect.rs b/src/computations/map/tests/collect.rs deleted file mode 100644 index 3939362..0000000 --- a/src/computations/map/tests/collect.rs +++ /dev/null @@ -1,49 +0,0 @@ -use crate::{ - IterationOrder, Params, computational_variants::ParMap, computations::map::m::M, - orch::DefaultOrchestrator, -}; -use orx_concurrent_iter::IntoConcurrentIter; -use orx_pinned_vec::PinnedVec; -use orx_split_vec::SplitVec; -use test_case::test_matrix; - -#[cfg(miri)] -const N: [usize; 2] = [37, 125]; -#[cfg(not(miri))] -const N: [usize; 2] = [1025, 4735]; - -#[test_matrix( - [0, 1, N[0], N[1]], - [1, 4], - [1, 64], - [IterationOrder::Ordered, IterationOrder::Arbitrary]) -] -fn m_map_collect(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { - let offset = 33; - - let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); - let map = |x: String| format!("{}!", x); - - let mut output = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let mut expected = Vec::new(); - - for i in 0..offset { - let value = || map(i.to_string()); - output.push(value()); - expected.push(value()); - } - expected.extend(input.clone().into_iter().map(|x| map(x))); - - let params = Params::new(nt, chunk, ordering); - let iter = input.into_con_iter(); - let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map); - - let (_, mut output) = m.par_collect_into(output); - - if !params.is_sequential() && matches!(params.iteration_order, IterationOrder::Arbitrary) { - expected.sort(); - output.sort(); - } - - assert_eq!(expected, output.to_vec()); -} diff --git a/src/computations/map/tests/find.rs b/src/computations/map/tests/find.rs deleted file mode 100644 index a727b4e..0000000 --- a/src/computations/map/tests/find.rs +++ /dev/null @@ -1,48 +0,0 @@ -use crate::{ - Params, computational_variants::ParMap, computations::map_self, orch::DefaultOrchestrator, - runner::parallel_runner_compute, -}; -use orx_concurrent_iter::IntoConcurrentIter; -use test_case::test_matrix; - -#[cfg(miri)] -const N: [usize; 2] = [37, 125]; -#[cfg(not(miri))] -const N: [usize; 2] = [1025, 4735]; - -#[test_matrix( - [0, 1, N[0], N[1]], - [1, 4], - [1, 64]) -] -fn m_find(n: usize, nt: usize, chunk: usize) { - let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); - - let expected = input.clone().into_iter().next(); - - let params = Params::new(nt, chunk, Default::default()); - let iter = input.into_con_iter(); - let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map_self); - - let output = parallel_runner_compute::next::m(m).1; - assert_eq!(expected, output); -} - -#[test_matrix( - [0, 1, N[0], N[1]], - [1, 4], - [1, 64]) -] -fn m_map_find(n: usize, nt: usize, chunk: usize) { - let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); - let map = |x: String| format!("{}!", x); - - let expected = input.clone().into_iter().map(map).next(); - - let params = Params::new(nt, chunk, Default::default()); - let iter = input.into_con_iter(); - let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map); - let output = parallel_runner_compute::next::m(m).1; - - assert_eq!(expected, output); -} diff --git a/src/computations/map/tests/mod.rs b/src/computations/map/tests/mod.rs deleted file mode 100644 index 5493e3c..0000000 --- a/src/computations/map/tests/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod collect; -mod find; -mod reduce; diff --git a/src/computations/map/tests/reduce.rs b/src/computations/map/tests/reduce.rs deleted file mode 100644 index 266f3ca..0000000 --- a/src/computations/map/tests/reduce.rs +++ /dev/null @@ -1,56 +0,0 @@ -use crate::{ - Params, computational_variants::ParMap, computations::map_self, orch::DefaultOrchestrator, - runner::parallel_runner_compute, -}; -use orx_concurrent_iter::IntoConcurrentIter; -use test_case::test_matrix; - -#[cfg(miri)] -const N: [usize; 2] = [37, 125]; -#[cfg(not(miri))] -const N: [usize; 2] = [1025, 4735]; - -#[test_matrix( - [0, 1, N[0], N[1]], - [1, 4], - [1, 64]) -] -fn m_reduce(n: usize, nt: usize, chunk: usize) { - let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); - let reduce = |x: String, y: String| match x < y { - true => y, - false => x, - }; - - let expected = input.clone().into_iter().reduce(reduce); - - let params = Params::new(nt, chunk, Default::default()); - let iter = input.into_con_iter(); - let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map_self); - let (_, output) = parallel_runner_compute::reduce::m(m, reduce); - - assert_eq!(expected, output); -} - -#[test_matrix( - [0, 1, N[0], N[1]], - [1, 4], - [1, 64]) -] -fn m_map_reduce(n: usize, nt: usize, chunk: usize) { - let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); - let map = |x: String| format!("{}!", x); - let reduce = |x: String, y: String| match x < y { - true => y, - false => x, - }; - - let expected = input.clone().into_iter().map(map).reduce(reduce); - - let params = Params::new(nt, chunk, Default::default()); - let iter = input.into_con_iter(); - let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map); - let (_, output) = parallel_runner_compute::reduce::m(m, reduce); - - assert_eq!(expected, output); -} From f30f90dd07bdd8b23a77b4f385bfaadf7248d422 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 13:43:59 +0200 Subject: [PATCH 041/264] collect arbitrary receives ParXap --- .../collect_arbitrary.rs | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 5c7b8cc..0f5891e 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -1,10 +1,11 @@ #[cfg(test)] use crate::computational_variants::ParMap; -use crate::generic_values::Values; -use crate::generic_values::runner_results::{ParallelCollectArbitrary, ThreadCollectArbitrary}; -use crate::runner::thread_runner_compute as thread; -use crate::{computations::X, runner::ParallelRunnerCompute}; -#[cfg(test)] +use crate::computational_variants::ParXap; +use crate::generic_values::TransformableValues; +use crate::generic_values::runner_results::{ + Infallible, ParallelCollectArbitrary, ThreadCollectArbitrary, +}; +use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{orch::Orchestrator, runner::ParallelRunner}; use orx_concurrent_bag::ConcurrentBag; use orx_concurrent_iter::ConcurrentIter; @@ -61,21 +62,22 @@ where // x pub fn x( - runner: C, - x: X, + x: ParXap, pinned_vec: P, ) -> (usize, ParallelCollectArbitrary) where - C: ParallelRunnerCompute, + C: Orchestrator, I: ConcurrentIter, - Vo: Values, + Vo: TransformableValues, Vo::Item: Send, M1: Fn(I::Item) -> Vo + Sync, P: IntoConcurrentPinnedVec, { let capacity_bound = pinned_vec.capacity_bound(); let offset = pinned_vec.len(); - let (_, iter, xap1) = x.destruct(); + + let (orchestrator, params, iter, xap1) = x.destruct(); + let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let mut bag: ConcurrentBag = pinned_vec.into(); match iter.try_get_len() { From 72b1b2d1d84ac70fe23f7b6cefdcf37d9e95c36d Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 13:49:25 +0200 Subject: [PATCH 042/264] xap compute methods expect ParXap with orchestrator --- .../collect_ordered.rs | 24 +++++++++---------- src/runner/parallel_runner_compute/next.rs | 17 ++++++------- .../parallel_runner_compute/next_any.rs | 17 ++++++------- src/runner/parallel_runner_compute/reduce.rs | 16 +++++++------ 4 files changed, 38 insertions(+), 36 deletions(-) diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index c4b5791..a40726d 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -1,10 +1,11 @@ -use crate::computational_variants::ParMap; -use crate::generic_values::Values; -use crate::generic_values::runner_results::{Fallibility, ParallelCollect, ThreadCollect}; +use crate::computational_variants::{ParMap, ParXap}; +use crate::generic_values::TransformableValues; +use crate::generic_values::runner_results::{ + Fallibility, Infallible, ParallelCollect, ThreadCollect, +}; use crate::orch::Orchestrator; use crate::runner::parallel_runner::ParallelRunner; use crate::runner::{ComputationKind, thread_runner_compute as thread}; -use crate::{computations::X, runner::ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; use orx_concurrent_ordered_bag::ConcurrentOrderedBag; use orx_fixed_vec::IntoConcurrentPinnedVec; @@ -52,21 +53,18 @@ where // x -pub fn x( - runner: C, - x: X, - pinned_vec: P, -) -> (usize, ParallelCollect) +pub fn x(x: ParXap, pinned_vec: P) -> (usize, ParallelCollect) where - C: ParallelRunnerCompute, + C: Orchestrator, I: ConcurrentIter, - Vo: Values, + Vo: TransformableValues, Vo::Item: Send, ::Error: Send, - M1: Fn(I::Item) -> Vo + Sync, + X1: Fn(I::Item) -> Vo + Sync, P: IntoConcurrentPinnedVec, { - let (_, iter, xap1) = x.destruct(); + let (orchestrator, params, iter, xap1) = x.destruct(); + let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); // compute let state = runner.new_shared_state(); diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index 0560b8f..3feac11 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -1,10 +1,10 @@ use crate::ParallelRunner; -use crate::computational_variants::ParMap; -use crate::computations::X; -use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; +use crate::computational_variants::{ParMap, ParXap}; +use crate::generic_values::TransformableValues; +use crate::generic_values::Values; +use crate::generic_values::runner_results::{Fallibility, Infallible, NextSuccess, NextWithIdx}; use crate::orch::Orchestrator; use crate::runner::{ComputationKind, thread_runner_compute as thread}; -use crate::{generic_values::Values, runner::ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; pub fn m(m: ParMap) -> (usize, Option) @@ -55,15 +55,16 @@ type ResultNext = Result< <::Fallibility as Fallibility>::Error, >; -pub fn x(runner: C, x: X) -> (usize, ResultNext) +pub fn x(x: ParXap) -> (usize, ResultNext) where - C: ParallelRunnerCompute, + C: Orchestrator, I: ConcurrentIter, - Vo: Values, + Vo: TransformableValues, Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, { - let (_, iter, xap1) = x.destruct(); + let (orchestrator, params, iter, xap1) = x.destruct(); + let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); let shared_state = &state; diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index f93f4c6..f523ad9 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -1,10 +1,10 @@ use crate::ParallelRunner; -use crate::computational_variants::ParMap; -use crate::computations::X; -use crate::generic_values::runner_results::Fallibility; +use crate::computational_variants::{ParMap, ParXap}; +use crate::generic_values::TransformableValues; +use crate::generic_values::Values; +use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::orch::Orchestrator; use crate::runner::{ComputationKind, thread_runner_compute as thread}; -use crate::{generic_values::Values, runner::ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; pub fn m(m: ParMap) -> (usize, Option) @@ -48,15 +48,16 @@ where type ResultNextAny = Result::Item>, <::Fallibility as Fallibility>::Error>; -pub fn x(runner: C, x: X) -> (usize, ResultNextAny) +pub fn x(x: ParXap) -> (usize, ResultNextAny) where - C: ParallelRunnerCompute, + C: Orchestrator, I: ConcurrentIter, - Vo: Values, + Vo: TransformableValues, Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, { - let (_, iter, xap1) = x.destruct(); + let (orchestrator, params, iter, xap1) = x.destruct(); + let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); let shared_state = &state; diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index b55afdc..c27be8a 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -1,7 +1,8 @@ use crate::ParallelRunner; -use crate::computational_variants::ParMap; +use crate::computational_variants::{ParMap, ParXap}; use crate::computations::X; -use crate::generic_values::runner_results::{Fallibility, Reduce}; +use crate::generic_values::TransformableValues; +use crate::generic_values::runner_results::{Fallibility, Infallible, Reduce}; use crate::orch::Orchestrator; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{generic_values::Values, runner::ParallelRunnerCompute}; @@ -59,16 +60,17 @@ where type ResultReduce = Result::Item>, <::Fallibility as Fallibility>::Error>; -pub fn x(runner: C, x: X, reduce: Red) -> (usize, ResultReduce) +pub fn x(x: ParXap, reduce: Red) -> (usize, ResultReduce) where - C: ParallelRunnerCompute, + C: Orchestrator, I: ConcurrentIter, - Vo: Values, + Vo: TransformableValues, Vo::Item: Send, - M1: Fn(I::Item) -> Vo + Sync, + X1: Fn(I::Item) -> Vo + Sync, Red: Fn(Vo::Item, Vo::Item) -> Vo::Item + Sync, { - let (_, iter, xap1) = x.destruct(); + let (orchestrator, params, iter, xap1) = x.destruct(); + let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); let shared_state = &state; From fff239fa6a451259c3307a3afbc22651f3d46dfe Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 14:06:48 +0200 Subject: [PATCH 043/264] ParXap computations flattened and simplified --- src/collect_into/fixed_vec.rs | 12 +-- src/collect_into/par_collect_into.rs | 12 +-- src/collect_into/split_vec.rs | 14 +-- src/collect_into/vec.rs | 12 +-- src/computational_variants/map.rs | 2 +- src/computational_variants/xap.rs | 129 ++++++++++++++++++++------- src/computations/xap/x.rs | 6 +- 7 files changed, 127 insertions(+), 60 deletions(-) diff --git a/src/collect_into/fixed_vec.rs b/src/collect_into/fixed_vec.rs index 460ef44..3f63219 100644 --- a/src/collect_into/fixed_vec.rs +++ b/src/collect_into/fixed_vec.rs @@ -1,8 +1,8 @@ use super::par_collect_into::ParCollectIntoCore; -use crate::computational_variants::ParMap; +use crate::computational_variants::{ParMap, ParXap}; use crate::computations::X; -use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Infallible}; +use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; use crate::runner::ParallelRunner; use orx_concurrent_iter::ConcurrentIter; @@ -32,12 +32,12 @@ where FixedVec::from(vec.m_collect_into::(m)) } - fn x_collect_into(self, x: X) -> Self + fn x_collect_into(self, x: ParXap) -> Self where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, - Vo: Values, - M1: Fn(I::Item) -> Vo + Sync, + Vo: TransformableValues, + X1: Fn(I::Item) -> Vo + Sync, { let vec = Vec::from(self); FixedVec::from(vec.x_collect_into::(x)) diff --git a/src/collect_into/par_collect_into.rs b/src/collect_into/par_collect_into.rs index a0172c8..2b99979 100644 --- a/src/collect_into/par_collect_into.rs +++ b/src/collect_into/par_collect_into.rs @@ -1,7 +1,7 @@ -use crate::computational_variants::ParMap; +use crate::computational_variants::{ParMap, ParXap}; use crate::computations::X; -use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Infallible}; +use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; use crate::runner::ParallelRunner; use crate::using::UParCollectIntoCore; @@ -20,12 +20,12 @@ pub trait ParCollectIntoCore: Collection { I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync; - fn x_collect_into(self, x: X) -> Self + fn x_collect_into(self, x: ParXap) -> Self where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, - Vo: Values, - M1: Fn(I::Item) -> Vo + Sync; + Vo: TransformableValues, + X1: Fn(I::Item) -> Vo + Sync; fn x_try_collect_into( self, diff --git a/src/collect_into/split_vec.rs b/src/collect_into/split_vec.rs index 5070115..51ea4fc 100644 --- a/src/collect_into/split_vec.rs +++ b/src/collect_into/split_vec.rs @@ -1,7 +1,7 @@ use super::par_collect_into::ParCollectIntoCore; -use crate::computational_variants::ParMap; -use crate::generic_values::Values; +use crate::computational_variants::{ParMap, ParXap}; use crate::generic_values::runner_results::{Fallibility, Infallible}; +use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; use crate::{collect_into::utils::split_vec_reserve, computations::X, runner::ParallelRunner}; use orx_concurrent_iter::ConcurrentIter; @@ -35,15 +35,15 @@ where pinned_vec } - fn x_collect_into(mut self, x: X) -> Self + fn x_collect_into(mut self, x: ParXap) -> Self where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, - Vo: Values, - M1: Fn(I::Item) -> Vo + Sync, + Vo: TransformableValues, + X1: Fn(I::Item) -> Vo + Sync, { split_vec_reserve(&mut self, x.par_len()); - let (_num_spawned, pinned_vec) = x.collect_into::(self); + let (_num_spawned, pinned_vec) = x.par_collect_into(self); pinned_vec } diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index 3a2e7a9..1bc6397 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -1,9 +1,9 @@ use super::par_collect_into::ParCollectIntoCore; use crate::collect_into::utils::extend_vec_from_split; -use crate::computational_variants::ParMap; +use crate::computational_variants::{ParMap, ParXap}; use crate::computations::X; -use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Infallible}; +use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; use crate::runner::ParallelRunner; use orx_concurrent_iter::ConcurrentIter; @@ -45,12 +45,12 @@ where } } - fn x_collect_into(self, x: X) -> Self + fn x_collect_into(self, x: ParXap) -> Self where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, - Vo: Values, - M1: Fn(I::Item) -> Vo + Sync, + Vo: TransformableValues, + X1: Fn(I::Item) -> Vo + Sync, { let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); let split_vec = split_vec.x_collect_into::(x); diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index 7f2dfda..da3a32e 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -248,7 +248,7 @@ where where Self::Item: Send, { - match self.params().iteration_order { + match self.params.iteration_order { IterationOrder::Ordered => parallel_runner_compute::next::m(self).1, IterationOrder::Arbitrary => parallel_runner_compute::next_any::m(self).1, } diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index f63b214..fc29d0a 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -1,102 +1,162 @@ use crate::ParIterResult; use crate::computational_variants::fallible_result::ParXapResult; use crate::generic_values::TransformableValues; -use crate::generic_values::runner_results::Infallible; +use crate::generic_values::runner_results::{ + Infallible, ParallelCollect, ParallelCollectArbitrary, +}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; +use crate::runner::parallel_runner_compute; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsing, Params, - computations::X, using::{UsingClone, UsingFun, computational_variants::UParXap}, }; use orx_concurrent_iter::ConcurrentIter; -use std::marker::PhantomData; +use orx_fixed_vec::IntoConcurrentPinnedVec; /// A parallel iterator that xaps inputs. /// /// *xap* is a generalization of one-to-one map, filter-map and flat-map operations. -pub struct ParXap +pub struct ParXap where R: Orchestrator, I: ConcurrentIter, Vo: TransformableValues, - M1: Fn(I::Item) -> Vo + Sync, + X1: Fn(I::Item) -> Vo + Sync, { orchestrator: R, - x: X, - phantom: PhantomData, + params: Params, + iter: I, + xap1: X1, } -impl ParXap +impl ParXap where R: Orchestrator, I: ConcurrentIter, Vo: TransformableValues, - M1: Fn(I::Item) -> Vo + Sync, + X1: Fn(I::Item) -> Vo + Sync, { - pub(crate) fn new(orchestrator: R, params: Params, iter: I, x1: M1) -> Self { + pub(crate) fn new(orchestrator: R, params: Params, iter: I, xap1: X1) -> Self { Self { orchestrator, - x: X::new(params, iter, x1), - phantom: PhantomData, + params, + iter, + xap1, + } + } + + pub(crate) fn destruct(self) -> (R, Params, I, X1) { + (self.orchestrator, self.params, self.iter, self.xap1) + } + + pub(crate) fn par_len(&self) -> Option { + match (self.params.is_sequential(), self.iter.try_get_len()) { + (true, _) => None, // not required to concurrent reserve when seq + (false, x) => x, + } + } + + pub(crate) fn par_collect_into

(self, pinned_vec: P) -> (usize, P) + where + P: IntoConcurrentPinnedVec, + Vo: TransformableValues, + Vo::Item: Send, + { + match (self.params.is_sequential(), self.params.iteration_order) { + (true, _) => (0, self.seq_collect_into(pinned_vec)), + (false, IterationOrder::Arbitrary) => { + let (num_threads, result) = + parallel_runner_compute::collect_arbitrary::x(self, pinned_vec); + let pinned_vec = match result { + ParallelCollectArbitrary::AllCollected { pinned_vec } => pinned_vec, + ParallelCollectArbitrary::StoppedByWhileCondition { pinned_vec } => pinned_vec, + }; + (num_threads, pinned_vec) + } + (false, IterationOrder::Ordered) => { + let (num_threads, result) = + parallel_runner_compute::collect_ordered::x(self, pinned_vec); + let pinned_vec = match result { + ParallelCollect::AllCollected { pinned_vec } => pinned_vec, + ParallelCollect::StoppedByWhileCondition { + pinned_vec, + stopped_idx: _, + } => pinned_vec, + }; + (num_threads, pinned_vec) + } } } - pub(crate) fn destruct(self) -> (R, Params, I, M1) { - let (params, iter, x1) = self.x.destruct(); - (self.orchestrator, params, iter, x1) + fn seq_collect_into

(self, mut pinned_vec: P) -> P + where + P: IntoConcurrentPinnedVec, + { + let (_, _, iter, xap1) = self.destruct(); + + let iter = iter.into_seq_iter(); + for i in iter { + let vt = xap1(i); + let done = vt.push_to_pinned_vec(&mut pinned_vec); + if Vo::sequential_push_to_stop(done).is_some() { + break; + } + } + + pinned_vec } } -unsafe impl Send for ParXap +unsafe impl Send for ParXap where R: Orchestrator, I: ConcurrentIter, Vo: TransformableValues, - M1: Fn(I::Item) -> Vo + Sync, + X1: Fn(I::Item) -> Vo + Sync, { } -unsafe impl Sync for ParXap +unsafe impl Sync for ParXap where R: Orchestrator, I: ConcurrentIter, Vo: TransformableValues, - M1: Fn(I::Item) -> Vo + Sync, + X1: Fn(I::Item) -> Vo + Sync, { } -impl ParIter for ParXap +impl ParIter for ParXap where R: Orchestrator, I: ConcurrentIter, Vo: TransformableValues, - M1: Fn(I::Item) -> Vo + Sync, + X1: Fn(I::Item) -> Vo + Sync, { type Item = Vo::Item; fn con_iter(&self) -> &impl ConcurrentIter { - self.x.iter() + &self.iter } fn params(&self) -> Params { - self.x.params() + self.params } // params transformations fn num_threads(mut self, num_threads: impl Into) -> Self { - self.x.num_threads(num_threads); + self.params = self.params.with_num_threads(num_threads); self } fn chunk_size(mut self, chunk_size: impl Into) -> Self { - self.x.chunk_size(chunk_size); + self.params = self.params.with_chunk_size(chunk_size); self } fn iteration_order(mut self, collect: IterationOrder) -> Self { - self.x.iteration_order(collect); + self.params = self.params.with_collect_ordering(collect); self } @@ -211,7 +271,7 @@ where where C: ParCollectInto, { - output.x_collect_into::(self.x) + output.x_collect_into(self) } // reduce @@ -221,7 +281,8 @@ where Self::Item: Send, Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { - self.x.reduce::(reduce).1 + let (_, Ok(acc)) = parallel_runner_compute::reduce::x(self, reduce); + acc } // early exit @@ -230,9 +291,15 @@ where where Self::Item: Send, { - match self.params().iteration_order { - IterationOrder::Ordered => self.x.next::().1, - IterationOrder::Arbitrary => self.x.next_any::().1, + match self.params.iteration_order { + IterationOrder::Ordered => { + let (_num_threads, Ok(result)) = parallel_runner_compute::next::x(self); + result.map(|x| x.1) + } + IterationOrder::Arbitrary => { + let (_num_threads, Ok(result)) = parallel_runner_compute::next_any::x(self); + result + } } } } diff --git a/src/computations/xap/x.rs b/src/computations/xap/x.rs index dd0448d..6227787 100644 --- a/src/computations/xap/x.rs +++ b/src/computations/xap/x.rs @@ -1,15 +1,15 @@ use crate::{ChunkSize, IterationOrder, NumThreads, Params, generic_values::Values}; use orx_concurrent_iter::ConcurrentIter; -pub struct X +pub struct X where I: ConcurrentIter, Vo: Values, - M1: Fn(I::Item) -> Vo, + X1: Fn(I::Item) -> Vo, { params: Params, iter: I, - xap1: M1, + xap1: X1, } impl X From 8f81f4553ca06e0d766e7578f9bd5f6b26d69404 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 14:07:27 +0200 Subject: [PATCH 044/264] clean up --- src/computations/xap/collect.rs | 51 --------------------------------- src/computations/xap/next.rs | 20 ------------- src/computations/xap/reduce.rs | 11 ------- 3 files changed, 82 deletions(-) diff --git a/src/computations/xap/collect.rs b/src/computations/xap/collect.rs index beaf5cc..1edc761 100644 --- a/src/computations/xap/collect.rs +++ b/src/computations/xap/collect.rs @@ -18,39 +18,6 @@ where Vo::Item: Send, M1: Fn(I::Item) -> Vo + Sync, { - pub fn collect_into(self, pinned_vec: P) -> (usize, P) - where - R: ParallelRunner, - P: IntoConcurrentPinnedVec, - Vo: Values, - { - let (len, p) = self.len_and_params(); - match (p.is_sequential(), p.iteration_order) { - (true, _) => (0, self.sequential(pinned_vec)), - (false, IterationOrder::Arbitrary) => { - let (num_threads, result) = - collect_arbitrary::x(R::collection(p, len), self, pinned_vec); - let pinned_vec = match result { - ParallelCollectArbitrary::AllCollected { pinned_vec } => pinned_vec, - ParallelCollectArbitrary::StoppedByWhileCondition { pinned_vec } => pinned_vec, - }; - (num_threads, pinned_vec) - } - (false, IterationOrder::Ordered) => { - let (num_threads, result) = - collect_ordered::x(R::collection(p, len), self, pinned_vec); - let pinned_vec = match result { - ParallelCollect::AllCollected { pinned_vec } => pinned_vec, - ParallelCollect::StoppedByWhileCondition { - pinned_vec, - stopped_idx: _, - } => pinned_vec, - }; - (num_threads, pinned_vec) - } - } - } - pub fn try_collect_into( self, pinned_vec: P, @@ -73,24 +40,6 @@ where } } - fn sequential

(self, mut pinned_vec: P) -> P - where - P: IntoConcurrentPinnedVec, - { - let (_, iter, xap1) = self.destruct(); - - let iter = iter.into_seq_iter(); - for i in iter { - let vt = xap1(i); - let done = vt.push_to_pinned_vec(&mut pinned_vec); - if Vo::sequential_push_to_stop(done).is_some() { - break; - } - } - - pinned_vec - } - fn try_sequential

( self, mut pinned_vec: P, diff --git a/src/computations/xap/next.rs b/src/computations/xap/next.rs index 2911bd5..af99117 100644 --- a/src/computations/xap/next.rs +++ b/src/computations/xap/next.rs @@ -12,26 +12,6 @@ where M1: Fn(I::Item) -> Vo + Sync, Vo::Item: Send, { - pub fn next(self) -> (usize, Option) - where - R: ParallelRunner, - Vo: Values, - { - let (len, p) = self.len_and_params(); - let (num_threads, Ok(result)) = next::x(R::early_return(p, len), self); - (num_threads, result.map(|x| x.1)) - } - - pub fn next_any(self) -> (usize, Option) - where - R: ParallelRunner, - Vo: Values, - { - let (len, p) = self.len_and_params(); - let (num_threads, Ok(next)) = next_any::x(R::early_return(p, len), self); - (num_threads, next) - } - pub fn try_next(self) -> (usize, ResultTryNext) where R: ParallelRunner, diff --git a/src/computations/xap/reduce.rs b/src/computations/xap/reduce.rs index 1c223ac..87e2a86 100644 --- a/src/computations/xap/reduce.rs +++ b/src/computations/xap/reduce.rs @@ -12,17 +12,6 @@ where Vo::Item: Send, M1: Fn(I::Item) -> Vo + Sync, { - pub fn reduce(self, reduce: Red) -> (usize, Option) - where - R: ParallelRunner, - Red: Fn(Vo::Item, Vo::Item) -> Vo::Item + Sync, - Vo: Values, - { - let (len, p) = self.len_and_params(); - let (num_threads, Ok(acc)) = reduce::x(R::reduce(p, len), self, reduce); - (num_threads, acc) - } - pub fn try_reduce(self, reduce: Red) -> (usize, ResultTryReduce) where R: ParallelRunner, From 12a0983097b919c194fd53c4233cd75292c7ccc9 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 14:10:40 +0200 Subject: [PATCH 045/264] par-xap tests are fixed for orchestrator --- src/computations/xap/tests/collect.rs | 13 ++++++++----- src/computations/xap/tests/find.rs | 13 ++++++++----- src/computations/xap/tests/reduce.rs | 13 ++++++++----- 3 files changed, 24 insertions(+), 15 deletions(-) diff --git a/src/computations/xap/tests/collect.rs b/src/computations/xap/tests/collect.rs index 8257386..23f7f41 100644 --- a/src/computations/xap/tests/collect.rs +++ b/src/computations/xap/tests/collect.rs @@ -1,5 +1,8 @@ +use crate::ParIter; +use crate::computational_variants::ParXap; use crate::generic_values::Vector; -use crate::{IterationOrder, Params, computations::X, runner::DefaultRunner}; +use crate::orch::DefaultOrchestrator; +use crate::{IterationOrder, Params}; use orx_concurrent_iter::IntoConcurrentIter; use orx_pinned_vec::PinnedVec; use orx_split_vec::SplitVec; @@ -37,9 +40,9 @@ fn x_flat_map_collect(n: usize, nt: usize, chunk: usize, ordering: IterationOrde let params = Params::new(nt, chunk, ordering); let iter = input.into_con_iter(); - let x = X::new(params, iter, xmap); + let x = ParXap::new(DefaultOrchestrator::default(), params, iter, xmap); - let (_, mut output) = x.collect_into::(output); + let mut output = x.collect_into(output); if !params.is_sequential() && matches!(params.iteration_order, IterationOrder::Arbitrary) { expected.sort(); @@ -76,9 +79,9 @@ fn x_filter_map_collect(n: usize, nt: usize, chunk: usize, ordering: IterationOr let params = Params::new(nt, chunk, ordering); let iter = input.into_con_iter(); - let x = X::new(params, iter, xmap); + let x = ParXap::new(DefaultOrchestrator::default(), params, iter, xmap); - let (_, mut output) = x.collect_into::(output); + let mut output = x.collect_into(output); if !params.is_sequential() && matches!(params.iteration_order, IterationOrder::Arbitrary) { expected.sort(); diff --git a/src/computations/xap/tests/find.rs b/src/computations/xap/tests/find.rs index c57b36e..54ad109 100644 --- a/src/computations/xap/tests/find.rs +++ b/src/computations/xap/tests/find.rs @@ -1,5 +1,8 @@ +use crate::ParIter; +use crate::Params; +use crate::computational_variants::ParXap; use crate::generic_values::Vector; -use crate::{DefaultRunner, Params, computations::X}; +use crate::orch::DefaultOrchestrator; use orx_concurrent_iter::IntoConcurrentIter; use test_case::test_matrix; @@ -22,9 +25,9 @@ fn x_flat_map_find(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let x = X::new(params, iter, xmap); + let x = ParXap::new(DefaultOrchestrator::default(), params, iter, xmap); - let output = x.next::().1; + let output = x.first(); assert_eq!(expected, output); } @@ -43,9 +46,9 @@ fn x_filter_map_find(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let x = X::new(params, iter, xmap); + let x = ParXap::new(DefaultOrchestrator::default(), params, iter, xmap); - let output = x.next::().1; + let output = x.first(); assert_eq!(expected, output); } diff --git a/src/computations/xap/tests/reduce.rs b/src/computations/xap/tests/reduce.rs index cff8196..d5a5016 100644 --- a/src/computations/xap/tests/reduce.rs +++ b/src/computations/xap/tests/reduce.rs @@ -1,5 +1,8 @@ +use crate::ParIter; +use crate::Params; +use crate::computational_variants::ParXap; use crate::generic_values::Vector; -use crate::{Params, computations::X, runner::DefaultRunner}; +use crate::orch::DefaultOrchestrator; use orx_concurrent_iter::IntoConcurrentIter; use test_case::test_matrix; @@ -26,9 +29,9 @@ fn x_flat_map_reduce(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let x = X::new(params, iter, xmap); + let x = ParXap::new(DefaultOrchestrator::default(), params, iter, xmap); - let (_, output) = x.reduce::(reduce); + let output = x.reduce(reduce); assert_eq!(expected, output); } @@ -51,9 +54,9 @@ fn x_filter_map_reduce(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let x = X::new(params, iter, xmap); + let x = ParXap::new(DefaultOrchestrator::default(), params, iter, xmap); - let (_, output) = x.reduce::(reduce); + let output = x.reduce(reduce); assert_eq!(expected, output); } From 627f38d61dcbe2001da58442c9a5f5285f8807f5 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 14:15:51 +0200 Subject: [PATCH 046/264] xap uses direct reduce --- src/computational_variants/fallible_result/xap_result.rs | 5 +++-- src/computational_variants/xap.rs | 6 +++--- src/runner/parallel_runner_compute/reduce.rs | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index 1fb08dd..c31bc61 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -4,6 +4,7 @@ use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; +use crate::runner::parallel_runner_compute; use crate::{IterationOrder, ParCollectInto, ParIter}; use orx_concurrent_iter::ConcurrentIter; use std::marker::PhantomData; @@ -105,8 +106,8 @@ where { let (orchestrator, params, iter, x1) = self.par.destruct(); let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); - let x = X::new(params, iter, x1); - x.try_reduce::(reduce).1 + let x = ParXap::new(orchestrator, params, iter, x1); + parallel_runner_compute::reduce::x(x, reduce).1 } // early exit diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index fc29d0a..773e196 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -1,9 +1,9 @@ use crate::ParIterResult; use crate::computational_variants::fallible_result::ParXapResult; -use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::{ Infallible, ParallelCollect, ParallelCollectArbitrary, }; +use crate::generic_values::{TransformableValues, Values}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; use crate::runner::parallel_runner_compute; @@ -21,7 +21,7 @@ pub struct ParXap where R: Orchestrator, I: ConcurrentIter, - Vo: TransformableValues, + Vo: Values, X1: Fn(I::Item) -> Vo + Sync, { orchestrator: R, @@ -34,7 +34,7 @@ impl ParXap where R: Orchestrator, I: ConcurrentIter, - Vo: TransformableValues, + Vo: Values, X1: Fn(I::Item) -> Vo + Sync, { pub(crate) fn new(orchestrator: R, params: Params, iter: I, xap1: X1) -> Self { diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index c27be8a..b494ac9 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -64,7 +64,7 @@ pub fn x(x: ParXap, reduce: Red) -> (usize, Res where C: Orchestrator, I: ConcurrentIter, - Vo: TransformableValues, + Vo: Values, Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, Red: Fn(Vo::Item, Vo::Item) -> Vo::Item + Sync, From 3b681fbd3ca0a29b306731d383ed9978b7f208f5 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 14:19:58 +0200 Subject: [PATCH 047/264] xap-result first flattened and simplified --- .../fallible_result/xap_result.rs | 12 +++++++++--- src/runner/parallel_runner_compute/next.rs | 2 +- src/runner/parallel_runner_compute/next_any.rs | 2 +- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index c31bc61..c3c972f 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -119,10 +119,16 @@ where { let (orchestrator, params, iter, x1) = self.par.destruct(); let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); - let x = X::new(params, iter, x1); + let x = ParXap::new(orchestrator, params, iter, x1); match params.iteration_order { - IterationOrder::Ordered => x.try_next::().1, - IterationOrder::Arbitrary => x.try_next_any::().1, + IterationOrder::Ordered => { + let (_, result) = parallel_runner_compute::next::x(x); + result.map(|x| x.map(|y| y.1)) + } + IterationOrder::Arbitrary => { + let (_, result) = parallel_runner_compute::next_any::x(x); + result + } } } } diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index 3feac11..9f5b624 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -59,7 +59,7 @@ pub fn x(x: ParXap) -> (usize, ResultNext) where C: Orchestrator, I: ConcurrentIter, - Vo: TransformableValues, + Vo: Values, Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, { diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index f523ad9..6dc5e02 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -52,7 +52,7 @@ pub fn x(x: ParXap) -> (usize, ResultNextAny) where C: Orchestrator, I: ConcurrentIter, - Vo: TransformableValues, + Vo: Values, Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, { From 343d61dee0646833633b9d6eab1715b9a16222fd Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 17:37:54 +0200 Subject: [PATCH 048/264] seq_try_collect_into is implemented for par iter result --- .../fallible_result/xap_result.rs | 84 +++++++++++++------ src/computational_variants/xap.rs | 5 +- src/computations/xap/collect.rs | 25 +++--- src/computations/xap/next.rs | 16 ++-- src/computations/xap/reduce.rs | 5 +- .../collect_arbitrary.rs | 4 +- 6 files changed, 87 insertions(+), 52 deletions(-) diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index c3c972f..2bfd4a0 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -1,49 +1,83 @@ use crate::computational_variants::ParXap; use crate::computations::X; -use crate::generic_values::TransformableValues; -use crate::generic_values::runner_results::Infallible; +use crate::generic_values::runner_results::{Fallibility, Fallible, Infallible, Stop}; +use crate::generic_values::{TransformableValues, Values}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::runner::parallel_runner_compute; -use crate::{IterationOrder, ParCollectInto, ParIter}; +use crate::{IterationOrder, ParCollectInto, ParIter, Params}; use orx_concurrent_iter::ConcurrentIter; +use orx_fixed_vec::IntoConcurrentPinnedVec; use std::marker::PhantomData; -pub struct ParXapResult +pub struct ParXapResult where R: Orchestrator, I: ConcurrentIter, Vo: TransformableValues, Vo::Item: IntoResult, - M1: Fn(I::Item) -> Vo + Sync, + X1: Fn(I::Item) -> Vo + Sync, { - par: ParXap, + orchestrator: R, + params: Params, + iter: I, + xap1: X1, phantom: PhantomData<(T, E)>, } -impl ParXapResult +impl ParXapResult where R: Orchestrator, I: ConcurrentIter, Vo: TransformableValues, Vo::Item: IntoResult, - M1: Fn(I::Item) -> Vo + Sync, + X1: Fn(I::Item) -> Vo + Sync, { - pub(crate) fn new(par: ParXap) -> Self { + pub(crate) fn new(orchestrator: R, params: Params, iter: I, xap1: X1) -> Self { Self { - par, + orchestrator, + params, + iter, + xap1, phantom: PhantomData, } } + + fn destruct(self) -> (R, Params, I, X1) { + (self.orchestrator, self.params, self.iter, self.xap1) + } + + fn seq_try_collect_into

(self, mut pinned_vec: P) -> Result + where + P: IntoConcurrentPinnedVec, + E: Send, + { + let (_, _, iter, x1) = self.destruct(); + let iter = iter.into_seq_iter(); + let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); + + for i in iter { + let vt = x1(i); + let done = vt.push_to_pinned_vec(&mut pinned_vec); + if let Some(stop) = Fallible::::sequential_push_to_stop(done) { + match stop { + Stop::DueToWhile => return Ok(pinned_vec), + Stop::DueToError { error } => return Err(error), + } + } + } + + Ok(pinned_vec) + } } -impl ParIterResult for ParXapResult +impl ParIterResult for ParXapResult where R: Orchestrator, I: ConcurrentIter, Vo: TransformableValues, Vo::Item: IntoResult, - M1: Fn(I::Item) -> Vo + Sync, + X1: Fn(I::Item) -> Vo + Sync, { type Item = T; @@ -51,21 +85,20 @@ where type RegularItem = Vo::Item; - type RegularParIter = ParXap; + type RegularParIter = ParXap; fn con_iter_len(&self) -> Option { - self.par.con_iter().try_get_len() + self.iter.try_get_len() } fn into_regular_par(self) -> Self::RegularParIter { - self.par + let (orchestrator, params, iter, x1) = self.destruct(); + ParXap::new(orchestrator, params, iter, x1) } fn from_regular_par(regular_par: Self::RegularParIter) -> Self { - Self { - par: regular_par, - phantom: PhantomData, - } + let (orchestrator, params, iter, x1) = regular_par.destruct(); + Self::new(orchestrator, params, iter, x1) } // params transformations @@ -74,11 +107,8 @@ where self, orchestrator: Q, ) -> impl ParIterResult { - let (_, params, iter, m1) = self.par.destruct(); - ParXapResult { - par: ParXap::new(orchestrator, params, iter, m1), - phantom: PhantomData, - } + let (_, params, iter, x1) = self.destruct(); + ParXapResult::new(orchestrator, params, iter, x1) } // collect @@ -90,7 +120,7 @@ where Self::Err: Send, Self::Err: Send, { - let (orchestrator, params, iter, x1) = self.par.destruct(); + let (orchestrator, params, iter, x1) = self.destruct(); let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); let x = X::new(params, iter, x1); output.x_try_collect_into::(x) @@ -104,7 +134,7 @@ where Self::Err: Send, Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { - let (orchestrator, params, iter, x1) = self.par.destruct(); + let (orchestrator, params, iter, x1) = self.destruct(); let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); let x = ParXap::new(orchestrator, params, iter, x1); parallel_runner_compute::reduce::x(x, reduce).1 @@ -117,7 +147,7 @@ where Self::Item: Send, Self::Err: Send, { - let (orchestrator, params, iter, x1) = self.par.destruct(); + let (orchestrator, params, iter, x1) = self.destruct(); let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); let x = ParXap::new(orchestrator, params, iter, x1); match params.iteration_order { diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index 773e196..5f2ce9c 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -89,7 +89,7 @@ where } } - fn seq_collect_into

(self, mut pinned_vec: P) -> P + pub(crate) fn seq_collect_into

(self, mut pinned_vec: P) -> P where P: IntoConcurrentPinnedVec, { @@ -262,7 +262,8 @@ where where Self::Item: IntoResult, { - ParXapResult::new(self) + let (orchestrator, params, iter, x1) = self.destruct(); + ParXapResult::new(orchestrator, params, iter, x1) } // collect diff --git a/src/computations/xap/collect.rs b/src/computations/xap/collect.rs index 1edc761..53825d7 100644 --- a/src/computations/xap/collect.rs +++ b/src/computations/xap/collect.rs @@ -26,18 +26,19 @@ where R: ParallelRunner, P: IntoConcurrentPinnedVec, { - let (len, p) = self.len_and_params(); - match (p.is_sequential(), p.iteration_order) { - (true, _) => (0, self.try_sequential(pinned_vec)), - (false, IterationOrder::Arbitrary) => { - let (nt, result) = collect_arbitrary::x(R::collection(p, len), self, pinned_vec); - (nt, result.into_result()) - } - (false, IterationOrder::Ordered) => { - let (nt, result) = collect_ordered::x(R::collection(p, len), self, pinned_vec); - (nt, result.into_result()) - } - } + todo!() + // let (len, p) = self.len_and_params(); + // match (p.is_sequential(), p.iteration_order) { + // (true, _) => (0, self.try_sequential(pinned_vec)), + // (false, IterationOrder::Arbitrary) => { + // let (nt, result) = collect_arbitrary::x(R::collection(p, len), self, pinned_vec); + // (nt, result.into_result()) + // } + // (false, IterationOrder::Ordered) => { + // let (nt, result) = collect_ordered::x(R::collection(p, len), self, pinned_vec); + // (nt, result.into_result()) + // } + // } } fn try_sequential

( diff --git a/src/computations/xap/next.rs b/src/computations/xap/next.rs index af99117..22cd2c7 100644 --- a/src/computations/xap/next.rs +++ b/src/computations/xap/next.rs @@ -16,19 +16,21 @@ where where R: ParallelRunner, { - let (len, p) = self.len_and_params(); - let (num_threads, result) = next::x(R::early_return(p, len), self); - let result = result.map(|x| x.map(|y| y.1)); - (num_threads, result) + todo!() + // let (len, p) = self.len_and_params(); + // let (num_threads, result) = next::x(R::early_return(p, len), self); + // let result = result.map(|x| x.map(|y| y.1)); + // (num_threads, result) } pub fn try_next_any(self) -> (usize, ResultTryNext) where R: ParallelRunner, { - let (len, p) = self.len_and_params(); - let (num_threads, result) = next_any::x(R::early_return(p, len), self); - (num_threads, result) + todo!() + // let (len, p) = self.len_and_params(); + // let (num_threads, result) = next_any::x(R::early_return(p, len), self); + // (num_threads, result) } } diff --git a/src/computations/xap/reduce.rs b/src/computations/xap/reduce.rs index 87e2a86..d9dc9c8 100644 --- a/src/computations/xap/reduce.rs +++ b/src/computations/xap/reduce.rs @@ -17,8 +17,9 @@ where R: ParallelRunner, Red: Fn(Vo::Item, Vo::Item) -> Vo::Item + Sync, { - let (len, p) = self.len_and_params(); - reduce::x(R::reduce(p, len), self, reduce) + todo!() + // let (len, p) = self.len_and_params(); + // reduce::x(R::reduce(p, len), self, reduce) } } diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 0f5891e..294b7ba 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -1,10 +1,10 @@ #[cfg(test)] use crate::computational_variants::ParMap; use crate::computational_variants::ParXap; -use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::{ Infallible, ParallelCollectArbitrary, ThreadCollectArbitrary, }; +use crate::generic_values::{TransformableValues, Values}; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{orch::Orchestrator, runner::ParallelRunner}; use orx_concurrent_bag::ConcurrentBag; @@ -68,7 +68,7 @@ pub fn x( where C: Orchestrator, I: ConcurrentIter, - Vo: TransformableValues, + Vo: Values, Vo::Item: Send, M1: Fn(I::Item) -> Vo + Sync, P: IntoConcurrentPinnedVec, From 21888b7e4b49ddc6758714882dc61e530cffdff8 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 10 Sep 2025 19:00:28 +0200 Subject: [PATCH 049/264] par_collect_into for par-xap --- .../fallible_result/xap_result.rs | 28 +++++++++++++++++++ .../collect_ordered.rs | 4 +-- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index 2bfd4a0..28b8ecb 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -47,6 +47,34 @@ where (self.orchestrator, self.params, self.iter, self.xap1) } + fn par_collect_into

(self, pinned_vec: P) -> (usize, Result) + where + P: IntoConcurrentPinnedVec, + Vo::Item: Send, + E: Send, + T: Send, + { + match (self.params.is_sequential(), self.params.iteration_order) { + (true, _) => (0, self.seq_try_collect_into(pinned_vec)), + + (false, IterationOrder::Arbitrary) => { + let (orchestrator, params, iter, x1) = self.destruct(); + let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); + let x = ParXap::new(orchestrator, params, iter, x1); + let (nt, result) = parallel_runner_compute::collect_arbitrary::x(x, pinned_vec); + (nt, result.into_result()) + } + + (false, IterationOrder::Ordered) => { + let (orchestrator, params, iter, x1) = self.destruct(); + let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); + let x = ParXap::new(orchestrator, params, iter, x1); + let (nt, result) = parallel_runner_compute::collect_ordered::x(x, pinned_vec); + (nt, result.into_result()) + } + } + } + fn seq_try_collect_into

(self, mut pinned_vec: P) -> Result where P: IntoConcurrentPinnedVec, diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index a40726d..245e263 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -1,8 +1,8 @@ use crate::computational_variants::{ParMap, ParXap}; -use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::{ Fallibility, Infallible, ParallelCollect, ThreadCollect, }; +use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; use crate::runner::parallel_runner::ParallelRunner; use crate::runner::{ComputationKind, thread_runner_compute as thread}; @@ -57,7 +57,7 @@ pub fn x(x: ParXap, pinned_vec: P) -> (usize, Par where C: Orchestrator, I: ConcurrentIter, - Vo: TransformableValues, + Vo: Values, Vo::Item: Send, ::Error: Send, X1: Fn(I::Item) -> Vo + Sync, From b26d5cdd96cb8d290c95877a4184ca9b7c84aa16 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 10:20:34 +0200 Subject: [PATCH 050/264] rename --- src/computational_variants/fallible_result/xap_result.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index 28b8ecb..50fe741 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -47,7 +47,7 @@ where (self.orchestrator, self.params, self.iter, self.xap1) } - fn par_collect_into

(self, pinned_vec: P) -> (usize, Result) + pub(crate) fn par_collect_into

(self, pinned_vec: P) -> (usize, Result) where P: IntoConcurrentPinnedVec, Vo::Item: Send, @@ -146,7 +146,6 @@ where C: ParCollectInto, Self::Item: Send, Self::Err: Send, - Self::Err: Send, { let (orchestrator, params, iter, x1) = self.destruct(); let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); From e638b3f950d9fa19efd7ed2c33ee6974b67ecfce Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 10:23:29 +0200 Subject: [PATCH 051/264] clean up --- src/generic_values/transformable_values.rs | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/generic_values/transformable_values.rs b/src/generic_values/transformable_values.rs index 631aecc..2194b3e 100644 --- a/src/generic_values/transformable_values.rs +++ b/src/generic_values/transformable_values.rs @@ -51,16 +51,6 @@ pub trait TransformableValues: Values { where M: Fn(&mut U, Self::Item) -> O; - // fn u_map2( - // self, - // u: &mut U, - // map: M, - // ) -> impl TransformableValues + 'static - // where - // M: Fn(&mut U, Self::Item) -> O, - // { - // } - fn u_filter( self, u: &mut U, From eabd40fdaa17658c6c433b6a32a5024532c784bd Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 11:15:15 +0200 Subject: [PATCH 052/264] pub fallible modules --- src/computational_variants/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/computational_variants/mod.rs b/src/computational_variants/mod.rs index eea29b1..4dc451c 100644 --- a/src/computational_variants/mod.rs +++ b/src/computational_variants/mod.rs @@ -1,8 +1,8 @@ #[cfg(test)] mod tests; -pub(crate) mod fallible_option; -mod fallible_result; +pub mod fallible_option; +pub mod fallible_result; mod map; mod par; mod xap; From d0138f0d49998a5c0892b006ea38d943d24084d4 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 11:25:06 +0200 Subject: [PATCH 053/264] x_try_collect_into_2 is defined --- src/collect_into/par_collect_into.rs | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/src/collect_into/par_collect_into.rs b/src/collect_into/par_collect_into.rs index 2b99979..081523d 100644 --- a/src/collect_into/par_collect_into.rs +++ b/src/collect_into/par_collect_into.rs @@ -1,8 +1,10 @@ +use crate::computational_variants::fallible_result::ParXapResult; use crate::computational_variants::{ParMap, ParXap}; use crate::computations::X; -use crate::generic_values::runner_results::{Fallibility, Infallible}; +use crate::generic_values::runner_results::{Fallibility, Fallible, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; +use crate::par_iter_result::IntoResult; use crate::runner::ParallelRunner; use crate::using::UParCollectIntoCore; use orx_concurrent_iter::ConcurrentIter; @@ -38,6 +40,22 @@ pub trait ParCollectIntoCore: Collection { Vo: Values, Self: Sized; + fn x_try_collect_into_2( + self, + x: ParXapResult, + ) -> Result::Error> + where + R: Orchestrator, + I: ConcurrentIter, + Vo: TransformableValues>, + X1: Fn(I::Item) -> Vo + Sync, + Vo::Item: IntoResult + Send, + E: Send, + Self: Sized, + { + todo!() + } + // test #[cfg(test)] From 26306c9b005e2eb56f697f658f8aeeb942e9534c Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 11:25:09 +0200 Subject: [PATCH 054/264] x_try_collect_into_2 --- src/collect_into/fixed_vec.rs | 22 ++++++++++++++++++- src/collect_into/split_vec.rs | 21 +++++++++++++++++- src/collect_into/vec.rs | 22 ++++++++++++++++++- .../fallible_result/xap_result.rs | 11 ++++++++-- 4 files changed, 71 insertions(+), 5 deletions(-) diff --git a/src/collect_into/fixed_vec.rs b/src/collect_into/fixed_vec.rs index 3f63219..e133142 100644 --- a/src/collect_into/fixed_vec.rs +++ b/src/collect_into/fixed_vec.rs @@ -1,9 +1,11 @@ use super::par_collect_into::ParCollectIntoCore; +use crate::computational_variants::fallible_result::ParXapResult; use crate::computational_variants::{ParMap, ParXap}; use crate::computations::X; -use crate::generic_values::runner_results::{Fallibility, Infallible}; +use crate::generic_values::runner_results::{Fallibility, Fallible, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; +use crate::par_iter_result::IntoResult; use crate::runner::ParallelRunner; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; @@ -59,6 +61,24 @@ where result.map(FixedVec::from) } + fn x_try_collect_into_2( + self, + x: ParXapResult, + ) -> Result::Error> + where + R: Orchestrator, + I: ConcurrentIter, + Vo: TransformableValues>, + X1: Fn(I::Item) -> Vo + Sync, + Vo::Item: IntoResult + Send, + E: Send, + Self: Sized, + { + let vec = Vec::from(self); + let result = vec.x_try_collect_into_2(x); + result.map(FixedVec::from) + } + // test #[cfg(test)] diff --git a/src/collect_into/split_vec.rs b/src/collect_into/split_vec.rs index 51ea4fc..b860564 100644 --- a/src/collect_into/split_vec.rs +++ b/src/collect_into/split_vec.rs @@ -1,8 +1,10 @@ use super::par_collect_into::ParCollectIntoCore; +use crate::computational_variants::fallible_result::ParXapResult; use crate::computational_variants::{ParMap, ParXap}; -use crate::generic_values::runner_results::{Fallibility, Infallible}; +use crate::generic_values::runner_results::{Fallibility, Fallible, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; +use crate::par_iter_result::IntoResult; use crate::{collect_into::utils::split_vec_reserve, computations::X, runner::ParallelRunner}; use orx_concurrent_iter::ConcurrentIter; #[cfg(test)] @@ -63,6 +65,23 @@ where result } + fn x_try_collect_into_2( + mut self, + x: ParXapResult, + ) -> Result::Error> + where + R: Orchestrator, + I: ConcurrentIter, + Vo: TransformableValues>, + X1: Fn(I::Item) -> Vo + Sync, + Vo::Item: IntoResult + Send, + E: Send, + { + split_vec_reserve(&mut self, x.par_len()); + let (_num_spawned, result) = x.par_collect_into(self); + result + } + // test #[cfg(test)] diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index 1bc6397..e482c62 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -1,10 +1,12 @@ use super::par_collect_into::ParCollectIntoCore; use crate::collect_into::utils::extend_vec_from_split; +use crate::computational_variants::fallible_result::ParXapResult; use crate::computational_variants::{ParMap, ParXap}; use crate::computations::X; -use crate::generic_values::runner_results::{Fallibility, Infallible}; +use crate::generic_values::runner_results::{Fallibility, Fallible, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; +use crate::par_iter_result::IntoResult; use crate::runner::ParallelRunner; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; @@ -73,6 +75,24 @@ where result.map(|split_vec| extend_vec_from_split(self, split_vec)) } + fn x_try_collect_into_2( + self, + x: ParXapResult, + ) -> Result::Error> + where + R: Orchestrator, + I: ConcurrentIter, + Vo: TransformableValues>, + X1: Fn(I::Item) -> Vo + Sync, + Vo::Item: IntoResult + Send, + E: Send, + Self: Sized, + { + let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); + let result = split_vec.x_try_collect_into_2(x); + result.map(|split_vec| extend_vec_from_split(self, split_vec)) + } + // test #[cfg(test)] diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index 50fe741..41eb8ef 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -14,7 +14,7 @@ pub struct ParXapResult where R: Orchestrator, I: ConcurrentIter, - Vo: TransformableValues, + Vo: TransformableValues, Vo::Item: IntoResult, X1: Fn(I::Item) -> Vo + Sync, { @@ -29,7 +29,7 @@ impl ParXapResult where R: Orchestrator, I: ConcurrentIter, - Vo: TransformableValues, + Vo: TransformableValues, Vo::Item: IntoResult, X1: Fn(I::Item) -> Vo + Sync, { @@ -47,6 +47,13 @@ where (self.orchestrator, self.params, self.iter, self.xap1) } + pub(crate) fn par_len(&self) -> Option { + match (self.params.is_sequential(), self.iter.try_get_len()) { + (true, _) => None, // not required to concurrent reserve when seq + (false, x) => x, + } + } + pub(crate) fn par_collect_into

(self, pinned_vec: P) -> (usize, Result) where P: IntoConcurrentPinnedVec, From 9799c5b581b5f877d5dcf6563ba84723c1d4ce5d Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 12:27:56 +0200 Subject: [PATCH 055/264] collect into is fixed to use orchestrator --- src/collect_into/fixed_vec.rs | 39 ++++-- src/collect_into/par_collect_into.rs | 38 ++++-- src/collect_into/split_vec.rs | 42 +++++-- src/collect_into/vec.rs | 39 ++++-- .../fallible_result/computations/collect.rs | 64 ++++++++++ .../computations/collect_into.rs | 111 ++++++++++++++++++ .../fallible_result/computations/mod.rs | 9 ++ .../fallible_result/computations/next.rs | 34 ++++++ .../fallible_result/computations/reduce.rs | 28 +++++ .../fallible_result/computations/x.rs | 69 +++++++++++ .../fallible_result/map_result.rs | 16 +-- .../fallible_result/mod.rs | 1 + .../fallible_result/par_result.rs | 16 +-- .../fallible_result/xap_result.rs | 64 +--------- .../runner_results/collect_sequential.rs | 23 +++- 15 files changed, 472 insertions(+), 121 deletions(-) create mode 100644 src/computational_variants/fallible_result/computations/collect.rs create mode 100644 src/computational_variants/fallible_result/computations/collect_into.rs create mode 100644 src/computational_variants/fallible_result/computations/mod.rs create mode 100644 src/computational_variants/fallible_result/computations/next.rs create mode 100644 src/computational_variants/fallible_result/computations/reduce.rs create mode 100644 src/computational_variants/fallible_result/computations/x.rs diff --git a/src/collect_into/fixed_vec.rs b/src/collect_into/fixed_vec.rs index e133142..c251399 100644 --- a/src/collect_into/fixed_vec.rs +++ b/src/collect_into/fixed_vec.rs @@ -1,7 +1,7 @@ use super::par_collect_into::ParCollectIntoCore; use crate::computational_variants::fallible_result::ParXapResult; +use crate::computational_variants::fallible_result::computations::{ParResultCollectInto, X}; use crate::computational_variants::{ParMap, ParXap}; -use crate::computations::X; use crate::generic_values::runner_results::{Fallibility, Fallible, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; @@ -47,38 +47,57 @@ where fn x_try_collect_into( self, - x: X, + x: X, ) -> Result::Error> where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, Vo: Values, M1: Fn(I::Item) -> Vo + Sync, Self: Sized, { let vec = Vec::from(self); - let result = vec.x_try_collect_into::(x); + let result = vec.x_try_collect_into(x); result.map(FixedVec::from) } - fn x_try_collect_into_2( + fn x_try_collect_into_3( self, - x: ParXapResult, - ) -> Result::Error> + c: ParResultCollectInto, + ) -> Result where R: Orchestrator, I: ConcurrentIter, - Vo: TransformableValues>, + Vo: TransformableValues, + Vo::Item: IntoResult, X1: Fn(I::Item) -> Vo + Sync, - Vo::Item: IntoResult + Send, + O: Send, E: Send, Self: Sized, { let vec = Vec::from(self); - let result = vec.x_try_collect_into_2(x); + let result = vec.x_try_collect_into_3(c); result.map(FixedVec::from) } + // fn x_try_collect_into_2( + // self, + // x: ParXapResult, + // ) -> Result::Error> + // where + // R: Orchestrator, + // I: ConcurrentIter, + // Vo: TransformableValues>, + // X1: Fn(I::Item) -> Vo + Sync, + // Vo::Item: IntoResult + Send, + // E: Send, + // Self: Sized, + // { + // let vec = Vec::from(self); + // let result = vec.x_try_collect_into_2(x); + // result.map(FixedVec::from) + // } + // test #[cfg(test)] diff --git a/src/collect_into/par_collect_into.rs b/src/collect_into/par_collect_into.rs index 081523d..ea2e566 100644 --- a/src/collect_into/par_collect_into.rs +++ b/src/collect_into/par_collect_into.rs @@ -1,6 +1,6 @@ use crate::computational_variants::fallible_result::ParXapResult; +use crate::computational_variants::fallible_result::computations::{ParResultCollectInto, X}; use crate::computational_variants::{ParMap, ParXap}; -use crate::computations::X; use crate::generic_values::runner_results::{Fallibility, Fallible, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; @@ -31,30 +31,44 @@ pub trait ParCollectIntoCore: Collection { fn x_try_collect_into( self, - x: X, + x: X, ) -> Result::Error> where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, M1: Fn(I::Item) -> Vo + Sync, Vo: Values, Self: Sized; - fn x_try_collect_into_2( + // fn x_try_collect_into_2( + // self, + // x: ParXapResult, + // ) -> Result::Error> + // where + // R: Orchestrator, + // I: ConcurrentIter, + // Vo: TransformableValues>, + // X1: Fn(I::Item) -> Vo + Sync, + // Vo::Item: IntoResult + Send, + // E: Send, + // Self: Sized, + // { + // todo!() + // } + + fn x_try_collect_into_3( self, - x: ParXapResult, - ) -> Result::Error> + c: ParResultCollectInto, + ) -> Result where R: Orchestrator, I: ConcurrentIter, - Vo: TransformableValues>, + Vo: TransformableValues, + Vo::Item: IntoResult, X1: Fn(I::Item) -> Vo + Sync, - Vo::Item: IntoResult + Send, + O: Send, E: Send, - Self: Sized, - { - todo!() - } + Self: Sized; // test diff --git a/src/collect_into/split_vec.rs b/src/collect_into/split_vec.rs index b860564..fafedea 100644 --- a/src/collect_into/split_vec.rs +++ b/src/collect_into/split_vec.rs @@ -1,11 +1,12 @@ use super::par_collect_into::ParCollectIntoCore; use crate::computational_variants::fallible_result::ParXapResult; +use crate::computational_variants::fallible_result::computations::{ParResultCollectInto, X}; use crate::computational_variants::{ParMap, ParXap}; use crate::generic_values::runner_results::{Fallibility, Fallible, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; use crate::par_iter_result::IntoResult; -use crate::{collect_into::utils::split_vec_reserve, computations::X, runner::ParallelRunner}; +use crate::{collect_into::utils::split_vec_reserve, runner::ParallelRunner}; use orx_concurrent_iter::ConcurrentIter; #[cfg(test)] use orx_pinned_vec::PinnedVec; @@ -51,37 +52,56 @@ where fn x_try_collect_into( mut self, - x: X, + x: X, ) -> Result::Error> where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, Vo: Values, M1: Fn(I::Item) -> Vo + Sync, Self: Sized, { split_vec_reserve(&mut self, x.par_len()); - let (_num_spawned, result) = x.try_collect_into::(self); + let (_num_spawned, result) = x.try_collect_into(self); result } - fn x_try_collect_into_2( + fn x_try_collect_into_3( mut self, - x: ParXapResult, - ) -> Result::Error> + c: ParResultCollectInto, + ) -> Result where R: Orchestrator, I: ConcurrentIter, - Vo: TransformableValues>, + Vo: TransformableValues, + Vo::Item: IntoResult, X1: Fn(I::Item) -> Vo + Sync, - Vo::Item: IntoResult + Send, + O: Send, E: Send, + Self: Sized, { - split_vec_reserve(&mut self, x.par_len()); - let (_num_spawned, result) = x.par_collect_into(self); + split_vec_reserve(&mut self, c.par_len()); + let (_num_spawned, result) = c.par_collect_into(self); result } + // fn x_try_collect_into_2( + // mut self, + // x: ParXapResult, + // ) -> Result::Error> + // where + // R: Orchestrator, + // I: ConcurrentIter, + // Vo: TransformableValues>, + // X1: Fn(I::Item) -> Vo + Sync, + // Vo::Item: IntoResult + Send, + // E: Send, + // { + // split_vec_reserve(&mut self, x.par_len()); + // let (_num_spawned, result) = x.par_collect_into(self); + // result + // } + // test #[cfg(test)] diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index e482c62..666ec38 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -1,8 +1,8 @@ use super::par_collect_into::ParCollectIntoCore; use crate::collect_into::utils::extend_vec_from_split; use crate::computational_variants::fallible_result::ParXapResult; +use crate::computational_variants::fallible_result::computations::{ParResultCollectInto, X}; use crate::computational_variants::{ParMap, ParXap}; -use crate::computations::X; use crate::generic_values::runner_results::{Fallibility, Fallible, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; @@ -61,38 +61,57 @@ where fn x_try_collect_into( self, - x: X, + x: X, ) -> Result::Error> where - R: ParallelRunner, + R: Orchestrator, I: ConcurrentIter, Vo: Values, M1: Fn(I::Item) -> Vo + Sync, Self: Sized, { let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let result = split_vec.x_try_collect_into::(x); + let result = split_vec.x_try_collect_into(x); result.map(|split_vec| extend_vec_from_split(self, split_vec)) } - fn x_try_collect_into_2( + fn x_try_collect_into_3( self, - x: ParXapResult, - ) -> Result::Error> + c: ParResultCollectInto, + ) -> Result where R: Orchestrator, I: ConcurrentIter, - Vo: TransformableValues>, + Vo: TransformableValues, + Vo::Item: IntoResult, X1: Fn(I::Item) -> Vo + Sync, - Vo::Item: IntoResult + Send, + O: Send, E: Send, Self: Sized, { let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let result = split_vec.x_try_collect_into_2(x); + let result = split_vec.x_try_collect_into_3(c); result.map(|split_vec| extend_vec_from_split(self, split_vec)) } + // fn x_try_collect_into_2( + // self, + // x: ParXapResult, + // ) -> Result::Error> + // where + // R: Orchestrator, + // I: ConcurrentIter, + // Vo: TransformableValues>, + // X1: Fn(I::Item) -> Vo + Sync, + // Vo::Item: IntoResult + Send, + // E: Send, + // Self: Sized, + // { + // let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); + // let result = split_vec.x_try_collect_into_2(x); + // result.map(|split_vec| extend_vec_from_split(self, split_vec)) + // } + // test #[cfg(test)] diff --git a/src/computational_variants/fallible_result/computations/collect.rs b/src/computational_variants/fallible_result/computations/collect.rs new file mode 100644 index 0000000..c2e728f --- /dev/null +++ b/src/computational_variants/fallible_result/computations/collect.rs @@ -0,0 +1,64 @@ +use super::x::X; +use crate::computational_variants::ParXap; +use crate::generic_values::runner_results::{Fallibility, Stop}; +use crate::orch::Orchestrator; +use crate::runner::parallel_runner_compute::{collect_arbitrary, collect_ordered}; +use crate::{IterationOrder, generic_values::Values}; +use orx_concurrent_iter::ConcurrentIter; +use orx_fixed_vec::IntoConcurrentPinnedVec; + +impl X +where + R: Orchestrator, + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + X1: Fn(I::Item) -> Vo + Sync, +{ + pub fn try_collect_into

( + self, + pinned_vec: P, + ) -> (usize, Result::Error>) + where + P: IntoConcurrentPinnedVec, + { + let (orchestrator, params, iter, xap1) = self.destruct(); + + match (params.is_sequential(), params.iteration_order) { + (true, _) => (0, Self::try_sequential(iter, xap1, pinned_vec)), + (false, IterationOrder::Arbitrary) => { + let xap = ParXap::new(orchestrator, params, iter, xap1); + let (nt, result) = collect_arbitrary::x(xap, pinned_vec); + (nt, result.into_result()) + } + (false, IterationOrder::Ordered) => { + let xap = ParXap::new(orchestrator, params, iter, xap1); + let (nt, result) = collect_ordered::x(xap, pinned_vec); + (nt, result.into_result()) + } + } + } + + fn try_sequential

( + iter: I, + xap1: X1, + mut pinned_vec: P, + ) -> Result::Error> + where + P: IntoConcurrentPinnedVec, + { + let iter = iter.into_seq_iter(); + for i in iter { + let vt = xap1(i); + let done = vt.push_to_pinned_vec(&mut pinned_vec); + if let Some(stop) = Vo::sequential_push_to_stop(done) { + match stop { + Stop::DueToWhile => return Ok(pinned_vec), + Stop::DueToError { error } => return Err(error), + } + } + } + + Ok(pinned_vec) + } +} diff --git a/src/computational_variants/fallible_result/computations/collect_into.rs b/src/computational_variants/fallible_result/computations/collect_into.rs new file mode 100644 index 0000000..55bfb7b --- /dev/null +++ b/src/computational_variants/fallible_result/computations/collect_into.rs @@ -0,0 +1,111 @@ +use std::marker::PhantomData; + +use crate::{ + IterationOrder, Params, + computational_variants::ParXap, + generic_values::{ + TransformableValues, Values, + runner_results::{Fallibility, Stop}, + }, + orch::Orchestrator, + par_iter_result::IntoResult, + runner::parallel_runner_compute, +}; +use orx_concurrent_iter::ConcurrentIter; +use orx_fixed_vec::IntoConcurrentPinnedVec; + +pub struct ParResultCollectInto +where + R: Orchestrator, + I: ConcurrentIter, + Vo: TransformableValues, + Vo::Item: IntoResult, + X1: Fn(I::Item) -> Vo + Sync, + T: Send, + E: Send, +{ + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + p: PhantomData<(T, E)>, +} + +impl ParResultCollectInto +where + R: Orchestrator, + I: ConcurrentIter, + Vo: TransformableValues, + Vo::Item: IntoResult, + X1: Fn(I::Item) -> Vo + Sync, + T: Send, + E: Send, +{ + pub fn new(orchestrator: R, params: Params, iter: I, xap1: X1) -> Self { + Self { + orchestrator, + params, + iter, + xap1, + p: PhantomData, + } + } + + pub fn par_len(&self) -> Option { + match (self.params.is_sequential(), self.iter.try_get_len()) { + (true, _) => None, // not required to concurrent reserve when seq + (false, x) => x, + } + } + + fn destruct(self) -> (R, Params, I, X1) { + (self.orchestrator, self.params, self.iter, self.xap1) + } + + fn seq_try_collect_into

(self, mut pinned_vec: P) -> Result + where + P: IntoConcurrentPinnedVec, + { + let (_, _, iter, x1) = self.destruct(); + let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); + + let iter = iter.into_seq_iter(); + for i in iter { + let vt = x1(i); + let done = vt.push_to_pinned_vec(&mut pinned_vec); + if let Some(stop) = done.sequential_push_to_stop() { + match stop { + Stop::DueToWhile => return Ok(pinned_vec), + Stop::DueToError { error } => return Err(error), + } + } + } + + Ok(pinned_vec) + } + + pub fn par_collect_into

(self, pinned_vec: P) -> (usize, Result) + where + P: IntoConcurrentPinnedVec, + { + match (self.params.is_sequential(), self.params.iteration_order) { + (true, _) => (0, self.seq_try_collect_into(pinned_vec)), + + (false, IterationOrder::Arbitrary) => { + let (orchestrator, params, iter, x1) = self.destruct(); + let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); + let x = ParXap::new(orchestrator, params, iter, x1); + let (nt, result) = parallel_runner_compute::collect_arbitrary::x(x, pinned_vec); + (nt, result.into_result()) + } + + (false, IterationOrder::Ordered) => { + let (orchestrator, params, iter, x1) = self.destruct(); + let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); + let x = ParXap::new(orchestrator, params, iter, x1); + let (nt, result) = parallel_runner_compute::collect_ordered::x(x, pinned_vec); + (nt, result.into_result()) + } + } + } +} diff --git a/src/computational_variants/fallible_result/computations/mod.rs b/src/computational_variants/fallible_result/computations/mod.rs new file mode 100644 index 0000000..790362c --- /dev/null +++ b/src/computational_variants/fallible_result/computations/mod.rs @@ -0,0 +1,9 @@ +mod collect; +mod collect_into; +mod next; +mod reduce; +mod x; + +pub use x::X; + +pub use collect_into::ParResultCollectInto; diff --git a/src/computational_variants/fallible_result/computations/next.rs b/src/computational_variants/fallible_result/computations/next.rs new file mode 100644 index 0000000..82e2ef8 --- /dev/null +++ b/src/computational_variants/fallible_result/computations/next.rs @@ -0,0 +1,34 @@ +use super::x::X; +use crate::generic_values::Values; +use crate::generic_values::runner_results::{Fallibility, Infallible}; +use crate::orch::Orchestrator; +use crate::runner::parallel_runner_compute::{next, next_any}; +use crate::runner::{ParallelRunner, ParallelRunnerCompute}; +use orx_concurrent_iter::ConcurrentIter; + +impl X +where + R: Orchestrator, + I: ConcurrentIter, + Vo: Values, + M1: Fn(I::Item) -> Vo + Sync, + Vo::Item: Send, +{ + pub fn try_next(self) -> (usize, ResultTryNext) { + todo!() + // let (len, p) = self.len_and_params(); + // let (num_threads, result) = next::x(R::early_return(p, len), self); + // let result = result.map(|x| x.map(|y| y.1)); + // (num_threads, result) + } + + pub fn try_next_any(self) -> (usize, ResultTryNext) { + todo!() + // let (len, p) = self.len_and_params(); + // let (num_threads, result) = next_any::x(R::early_return(p, len), self); + // (num_threads, result) + } +} + +type ResultTryNext = + Result::Item>, <::Fallibility as Fallibility>::Error>; diff --git a/src/computational_variants/fallible_result/computations/reduce.rs b/src/computational_variants/fallible_result/computations/reduce.rs new file mode 100644 index 0000000..6bdfa27 --- /dev/null +++ b/src/computational_variants/fallible_result/computations/reduce.rs @@ -0,0 +1,28 @@ +use super::x::X; +use crate::generic_values::Values; +use crate::generic_values::runner_results::{Fallibility, Infallible}; +use crate::orch::Orchestrator; +use crate::runner::parallel_runner_compute::reduce; +use crate::runner::{ParallelRunner, ParallelRunnerCompute}; +use orx_concurrent_iter::ConcurrentIter; + +impl X +where + R: Orchestrator, + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + X1: Fn(I::Item) -> Vo + Sync, +{ + pub fn try_reduce(self, reduce: Red) -> (usize, ResultTryReduce) + where + Red: Fn(Vo::Item, Vo::Item) -> Vo::Item + Sync, + { + todo!() + // let (len, p) = self.len_and_params(); + // reduce::x(R::reduce(p, len), self, reduce) + } +} + +type ResultTryReduce = + Result::Item>, <::Fallibility as Fallibility>::Error>; diff --git a/src/computational_variants/fallible_result/computations/x.rs b/src/computational_variants/fallible_result/computations/x.rs new file mode 100644 index 0000000..f8a8b0c --- /dev/null +++ b/src/computational_variants/fallible_result/computations/x.rs @@ -0,0 +1,69 @@ +use crate::{ + ChunkSize, IterationOrder, NumThreads, Params, generic_values::Values, orch::Orchestrator, +}; +use orx_concurrent_iter::ConcurrentIter; + +pub struct X +where + R: Orchestrator, + I: ConcurrentIter, + Vo: Values, + X1: Fn(I::Item) -> Vo, +{ + orchestrator: R, + params: Params, + iter: I, + xap1: X1, +} + +impl X +where + R: Orchestrator, + I: ConcurrentIter, + Vo: Values, + M1: Fn(I::Item) -> Vo, +{ + pub fn new(orchestrator: R, params: Params, iter: I, xap1: M1) -> Self { + Self { + orchestrator, + params, + iter, + xap1, + } + } + + pub fn destruct(self) -> (R, Params, I, M1) { + (self.orchestrator, self.params, self.iter, self.xap1) + } + + pub fn params(&self) -> Params { + self.params + } + + pub fn len_and_params(&self) -> (Option, Params) { + (self.iter.try_get_len(), self.params) + } + + pub fn num_threads(&mut self, num_threads: impl Into) { + self.params = self.params.with_num_threads(num_threads); + } + + pub fn chunk_size(&mut self, chunk_size: impl Into) { + self.params = self.params.with_chunk_size(chunk_size); + } + + pub fn iteration_order(&mut self, collect: IterationOrder) { + self.params = self.params.with_collect_ordering(collect); + } + + pub fn iter(&self) -> &I { + &self.iter + } + + pub fn par_len(&self) -> Option { + match (self.params.is_sequential(), self.iter.try_get_len()) { + (true, _) => None, // not required to concurrent reserve when seq + (false, x) => x, + } + } +} diff --git a/src/computational_variants/fallible_result/map_result.rs b/src/computational_variants/fallible_result/map_result.rs index 3148475..4ef88cf 100644 --- a/src/computational_variants/fallible_result/map_result.rs +++ b/src/computational_variants/fallible_result/map_result.rs @@ -1,5 +1,5 @@ use crate::computational_variants::ParMap; -use crate::computations::X; +use crate::computational_variants::fallible_result::computations::X; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::{IterationOrder, ParCollectInto, ParIter}; @@ -85,8 +85,8 @@ where { let (orchestrator, params, iter, m1) = self.par.destruct(); let x1 = |i: I::Item| m1(i).into_result(); - let x = X::new(params, iter, x1); - output.x_try_collect_into::(x) + let x = X::new(orchestrator, params, iter, x1); + output.x_try_collect_into(x) } // reduce @@ -99,8 +99,8 @@ where { let (orchestrator, params, iter, m1) = self.par.destruct(); let x1 = |i: I::Item| m1(i).into_result(); - let x = X::new(params, iter, x1); - x.try_reduce::(reduce).1 + let x = X::new(orchestrator, params, iter, x1); + x.try_reduce(reduce).1 } // early exit @@ -112,10 +112,10 @@ where { let (orchestrator, params, iter, m1) = self.par.destruct(); let x1 = |i: I::Item| m1(i).into_result(); - let x = X::new(params, iter, x1); + let x = X::new(orchestrator, params, iter, x1); match params.iteration_order { - IterationOrder::Ordered => x.try_next::().1, - IterationOrder::Arbitrary => x.try_next_any::().1, + IterationOrder::Ordered => x.try_next().1, + IterationOrder::Arbitrary => x.try_next_any().1, } } } diff --git a/src/computational_variants/fallible_result/mod.rs b/src/computational_variants/fallible_result/mod.rs index 6197ee5..d2c9286 100644 --- a/src/computational_variants/fallible_result/mod.rs +++ b/src/computational_variants/fallible_result/mod.rs @@ -1,3 +1,4 @@ +pub(crate) mod computations; mod map_result; mod par_result; mod xap_result; diff --git a/src/computational_variants/fallible_result/par_result.rs b/src/computational_variants/fallible_result/par_result.rs index 7facb75..4cb75fc 100644 --- a/src/computational_variants/fallible_result/par_result.rs +++ b/src/computational_variants/fallible_result/par_result.rs @@ -1,5 +1,5 @@ use crate::computational_variants::Par; -use crate::computations::X; +use crate::computational_variants::fallible_result::computations::X; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::{IterationOrder, ParCollectInto, ParIter}; @@ -82,8 +82,8 @@ where { let (orchestrator, params, iter) = self.par.destruct(); let x1 = |i: I::Item| i.into_result(); - let x = X::new(params, iter, x1); - output.x_try_collect_into::(x) + let x = X::new(orchestrator, params, iter, x1); + output.x_try_collect_into(x) } // reduce @@ -96,8 +96,8 @@ where { let (orchestrator, params, iter) = self.par.destruct(); let x1 = |i: I::Item| i.into_result(); - let x = X::new(params, iter, x1); - x.try_reduce::(reduce).1 + let x = X::new(orchestrator, params, iter, x1); + x.try_reduce(reduce).1 } // early exit @@ -109,10 +109,10 @@ where { let (orchestrator, params, iter) = self.par.destruct(); let x1 = |i: I::Item| i.into_result(); - let x = X::new(params, iter, x1); + let x = X::new(orchestrator, params, iter, x1); match params.iteration_order { - IterationOrder::Ordered => x.try_next::().1, - IterationOrder::Arbitrary => x.try_next_any::().1, + IterationOrder::Ordered => x.try_next().1, + IterationOrder::Arbitrary => x.try_next_any().1, } } } diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index 41eb8ef..81764d3 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -1,5 +1,5 @@ use crate::computational_variants::ParXap; -use crate::computations::X; +use crate::computational_variants::fallible_result::computations::{ParResultCollectInto, X}; use crate::generic_values::runner_results::{Fallibility, Fallible, Infallible, Stop}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::{DefaultOrchestrator, Orchestrator}; @@ -46,64 +46,6 @@ where fn destruct(self) -> (R, Params, I, X1) { (self.orchestrator, self.params, self.iter, self.xap1) } - - pub(crate) fn par_len(&self) -> Option { - match (self.params.is_sequential(), self.iter.try_get_len()) { - (true, _) => None, // not required to concurrent reserve when seq - (false, x) => x, - } - } - - pub(crate) fn par_collect_into

(self, pinned_vec: P) -> (usize, Result) - where - P: IntoConcurrentPinnedVec, - Vo::Item: Send, - E: Send, - T: Send, - { - match (self.params.is_sequential(), self.params.iteration_order) { - (true, _) => (0, self.seq_try_collect_into(pinned_vec)), - - (false, IterationOrder::Arbitrary) => { - let (orchestrator, params, iter, x1) = self.destruct(); - let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); - let x = ParXap::new(orchestrator, params, iter, x1); - let (nt, result) = parallel_runner_compute::collect_arbitrary::x(x, pinned_vec); - (nt, result.into_result()) - } - - (false, IterationOrder::Ordered) => { - let (orchestrator, params, iter, x1) = self.destruct(); - let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); - let x = ParXap::new(orchestrator, params, iter, x1); - let (nt, result) = parallel_runner_compute::collect_ordered::x(x, pinned_vec); - (nt, result.into_result()) - } - } - } - - fn seq_try_collect_into

(self, mut pinned_vec: P) -> Result - where - P: IntoConcurrentPinnedVec, - E: Send, - { - let (_, _, iter, x1) = self.destruct(); - let iter = iter.into_seq_iter(); - let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); - - for i in iter { - let vt = x1(i); - let done = vt.push_to_pinned_vec(&mut pinned_vec); - if let Some(stop) = Fallible::::sequential_push_to_stop(done) { - match stop { - Stop::DueToWhile => return Ok(pinned_vec), - Stop::DueToError { error } => return Err(error), - } - } - } - - Ok(pinned_vec) - } } impl ParIterResult for ParXapResult @@ -156,8 +98,8 @@ where { let (orchestrator, params, iter, x1) = self.destruct(); let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); - let x = X::new(params, iter, x1); - output.x_try_collect_into::(x) + let x = X::new(orchestrator, params, iter, x1); + output.x_try_collect_into(x) } // reduce diff --git a/src/generic_values/runner_results/collect_sequential.rs b/src/generic_values/runner_results/collect_sequential.rs index 3dcb4cf..c5b4cc4 100644 --- a/src/generic_values/runner_results/collect_sequential.rs +++ b/src/generic_values/runner_results/collect_sequential.rs @@ -1,7 +1,28 @@ -use crate::generic_values::runner_results::Fallibility; +use crate::generic_values::runner_results::{ + Fallibility, Fallible, Infallible, Stop, fallibility::Never, +}; pub enum SequentialPush { Done, StoppedByWhileCondition, StoppedByError { error: F::Error }, } + +impl SequentialPush { + pub fn sequential_push_to_stop(self) -> Option> { + match self { + SequentialPush::StoppedByWhileCondition => Some(Stop::DueToWhile), + _ => None, + } + } +} + +impl SequentialPush> { + pub fn sequential_push_to_stop(self) -> Option> { + match self { + SequentialPush::Done => None, + SequentialPush::StoppedByWhileCondition => Some(Stop::DueToWhile), + SequentialPush::StoppedByError { error } => Some(Stop::DueToError { error }), + } + } +} From eca8d9b80b1c83ef9e906a19a61ef3bc678a28fa Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 12:30:33 +0200 Subject: [PATCH 056/264] orchestrator checkpoint - passing --- .../fallible_result/map_result.rs | 19 +++++++++++++------ .../fallible_result/par_result.rs | 19 +++++++++++++------ 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/src/computational_variants/fallible_result/map_result.rs b/src/computational_variants/fallible_result/map_result.rs index 4ef88cf..632e581 100644 --- a/src/computational_variants/fallible_result/map_result.rs +++ b/src/computational_variants/fallible_result/map_result.rs @@ -1,7 +1,8 @@ -use crate::computational_variants::ParMap; use crate::computational_variants::fallible_result::computations::X; +use crate::computational_variants::{ParMap, ParXap}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; +use crate::runner::parallel_runner_compute; use crate::{IterationOrder, ParCollectInto, ParIter}; use orx_concurrent_iter::ConcurrentIter; use std::marker::PhantomData; @@ -99,8 +100,8 @@ where { let (orchestrator, params, iter, m1) = self.par.destruct(); let x1 = |i: I::Item| m1(i).into_result(); - let x = X::new(orchestrator, params, iter, x1); - x.try_reduce(reduce).1 + let x = ParXap::new(orchestrator, params, iter, x1); + parallel_runner_compute::reduce::x(x, reduce).1 } // early exit @@ -112,10 +113,16 @@ where { let (orchestrator, params, iter, m1) = self.par.destruct(); let x1 = |i: I::Item| m1(i).into_result(); - let x = X::new(orchestrator, params, iter, x1); + let x = ParXap::new(orchestrator, params, iter, x1); match params.iteration_order { - IterationOrder::Ordered => x.try_next().1, - IterationOrder::Arbitrary => x.try_next_any().1, + IterationOrder::Ordered => { + let (_, result) = parallel_runner_compute::next::x(x); + result.map(|x| x.map(|y| y.1)) + } + IterationOrder::Arbitrary => { + let (_, result) = parallel_runner_compute::next_any::x(x); + result + } } } } diff --git a/src/computational_variants/fallible_result/par_result.rs b/src/computational_variants/fallible_result/par_result.rs index 4cb75fc..4d3806e 100644 --- a/src/computational_variants/fallible_result/par_result.rs +++ b/src/computational_variants/fallible_result/par_result.rs @@ -1,7 +1,8 @@ -use crate::computational_variants::Par; use crate::computational_variants::fallible_result::computations::X; +use crate::computational_variants::{Par, ParXap}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; +use crate::runner::parallel_runner_compute; use crate::{IterationOrder, ParCollectInto, ParIter}; use orx_concurrent_iter::ConcurrentIter; use std::marker::PhantomData; @@ -96,8 +97,8 @@ where { let (orchestrator, params, iter) = self.par.destruct(); let x1 = |i: I::Item| i.into_result(); - let x = X::new(orchestrator, params, iter, x1); - x.try_reduce(reduce).1 + let x = ParXap::new(orchestrator, params, iter, x1); + parallel_runner_compute::reduce::x(x, reduce).1 } // early exit @@ -109,10 +110,16 @@ where { let (orchestrator, params, iter) = self.par.destruct(); let x1 = |i: I::Item| i.into_result(); - let x = X::new(orchestrator, params, iter, x1); + let x = ParXap::new(orchestrator, params, iter, x1); match params.iteration_order { - IterationOrder::Ordered => x.try_next().1, - IterationOrder::Arbitrary => x.try_next_any().1, + IterationOrder::Ordered => { + let (_, result) = parallel_runner_compute::next::x(x); + result.map(|x| x.map(|y| y.1)) + } + IterationOrder::Arbitrary => { + let (_, result) = parallel_runner_compute::next_any::x(x); + result + } } } } From 130ecf4a9ee421303fccd2a6f43cb8f9e63433ef Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 12:32:35 +0200 Subject: [PATCH 057/264] clean up --- src/collect_into/fixed_vec.rs | 44 +------ src/collect_into/par_collect_into.rs | 37 +----- src/collect_into/split_vec.rs | 44 +------ src/collect_into/vec.rs | 44 +------ .../computations/collect_into.rs | 111 ------------------ .../fallible_result/computations/mod.rs | 3 - .../fallible_result/xap_result.rs | 9 +- 7 files changed, 13 insertions(+), 279 deletions(-) delete mode 100644 src/computational_variants/fallible_result/computations/collect_into.rs diff --git a/src/collect_into/fixed_vec.rs b/src/collect_into/fixed_vec.rs index c251399..e8e7b53 100644 --- a/src/collect_into/fixed_vec.rs +++ b/src/collect_into/fixed_vec.rs @@ -1,12 +1,9 @@ use super::par_collect_into::ParCollectIntoCore; -use crate::computational_variants::fallible_result::ParXapResult; -use crate::computational_variants::fallible_result::computations::{ParResultCollectInto, X}; +use crate::computational_variants::fallible_result::computations::X; use crate::computational_variants::{ParMap, ParXap}; -use crate::generic_values::runner_results::{Fallibility, Fallible, Infallible}; +use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; -use crate::par_iter_result::IntoResult; -use crate::runner::ParallelRunner; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; #[cfg(test)] @@ -61,43 +58,6 @@ where result.map(FixedVec::from) } - fn x_try_collect_into_3( - self, - c: ParResultCollectInto, - ) -> Result - where - R: Orchestrator, - I: ConcurrentIter, - Vo: TransformableValues, - Vo::Item: IntoResult, - X1: Fn(I::Item) -> Vo + Sync, - O: Send, - E: Send, - Self: Sized, - { - let vec = Vec::from(self); - let result = vec.x_try_collect_into_3(c); - result.map(FixedVec::from) - } - - // fn x_try_collect_into_2( - // self, - // x: ParXapResult, - // ) -> Result::Error> - // where - // R: Orchestrator, - // I: ConcurrentIter, - // Vo: TransformableValues>, - // X1: Fn(I::Item) -> Vo + Sync, - // Vo::Item: IntoResult + Send, - // E: Send, - // Self: Sized, - // { - // let vec = Vec::from(self); - // let result = vec.x_try_collect_into_2(x); - // result.map(FixedVec::from) - // } - // test #[cfg(test)] diff --git a/src/collect_into/par_collect_into.rs b/src/collect_into/par_collect_into.rs index ea2e566..563527e 100644 --- a/src/collect_into/par_collect_into.rs +++ b/src/collect_into/par_collect_into.rs @@ -1,11 +1,8 @@ -use crate::computational_variants::fallible_result::ParXapResult; -use crate::computational_variants::fallible_result::computations::{ParResultCollectInto, X}; +use crate::computational_variants::fallible_result::computations::X; use crate::computational_variants::{ParMap, ParXap}; -use crate::generic_values::runner_results::{Fallibility, Fallible, Infallible}; +use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; -use crate::par_iter_result::IntoResult; -use crate::runner::ParallelRunner; use crate::using::UParCollectIntoCore; use orx_concurrent_iter::ConcurrentIter; use orx_iterable::Collection; @@ -40,36 +37,6 @@ pub trait ParCollectIntoCore: Collection { Vo: Values, Self: Sized; - // fn x_try_collect_into_2( - // self, - // x: ParXapResult, - // ) -> Result::Error> - // where - // R: Orchestrator, - // I: ConcurrentIter, - // Vo: TransformableValues>, - // X1: Fn(I::Item) -> Vo + Sync, - // Vo::Item: IntoResult + Send, - // E: Send, - // Self: Sized, - // { - // todo!() - // } - - fn x_try_collect_into_3( - self, - c: ParResultCollectInto, - ) -> Result - where - R: Orchestrator, - I: ConcurrentIter, - Vo: TransformableValues, - Vo::Item: IntoResult, - X1: Fn(I::Item) -> Vo + Sync, - O: Send, - E: Send, - Self: Sized; - // test #[cfg(test)] diff --git a/src/collect_into/split_vec.rs b/src/collect_into/split_vec.rs index fafedea..467c288 100644 --- a/src/collect_into/split_vec.rs +++ b/src/collect_into/split_vec.rs @@ -1,12 +1,10 @@ use super::par_collect_into::ParCollectIntoCore; -use crate::computational_variants::fallible_result::ParXapResult; -use crate::computational_variants::fallible_result::computations::{ParResultCollectInto, X}; +use crate::collect_into::utils::split_vec_reserve; +use crate::computational_variants::fallible_result::computations::X; use crate::computational_variants::{ParMap, ParXap}; -use crate::generic_values::runner_results::{Fallibility, Fallible, Infallible}; +use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; -use crate::par_iter_result::IntoResult; -use crate::{collect_into::utils::split_vec_reserve, runner::ParallelRunner}; use orx_concurrent_iter::ConcurrentIter; #[cfg(test)] use orx_pinned_vec::PinnedVec; @@ -66,42 +64,6 @@ where result } - fn x_try_collect_into_3( - mut self, - c: ParResultCollectInto, - ) -> Result - where - R: Orchestrator, - I: ConcurrentIter, - Vo: TransformableValues, - Vo::Item: IntoResult, - X1: Fn(I::Item) -> Vo + Sync, - O: Send, - E: Send, - Self: Sized, - { - split_vec_reserve(&mut self, c.par_len()); - let (_num_spawned, result) = c.par_collect_into(self); - result - } - - // fn x_try_collect_into_2( - // mut self, - // x: ParXapResult, - // ) -> Result::Error> - // where - // R: Orchestrator, - // I: ConcurrentIter, - // Vo: TransformableValues>, - // X1: Fn(I::Item) -> Vo + Sync, - // Vo::Item: IntoResult + Send, - // E: Send, - // { - // split_vec_reserve(&mut self, x.par_len()); - // let (_num_spawned, result) = x.par_collect_into(self); - // result - // } - // test #[cfg(test)] diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index 666ec38..245e424 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -1,13 +1,10 @@ use super::par_collect_into::ParCollectIntoCore; use crate::collect_into::utils::extend_vec_from_split; -use crate::computational_variants::fallible_result::ParXapResult; -use crate::computational_variants::fallible_result::computations::{ParResultCollectInto, X}; +use crate::computational_variants::fallible_result::computations::X; use crate::computational_variants::{ParMap, ParXap}; -use crate::generic_values::runner_results::{Fallibility, Fallible, Infallible}; +use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; -use crate::par_iter_result::IntoResult; -use crate::runner::ParallelRunner; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; use orx_split_vec::SplitVec; @@ -75,43 +72,6 @@ where result.map(|split_vec| extend_vec_from_split(self, split_vec)) } - fn x_try_collect_into_3( - self, - c: ParResultCollectInto, - ) -> Result - where - R: Orchestrator, - I: ConcurrentIter, - Vo: TransformableValues, - Vo::Item: IntoResult, - X1: Fn(I::Item) -> Vo + Sync, - O: Send, - E: Send, - Self: Sized, - { - let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let result = split_vec.x_try_collect_into_3(c); - result.map(|split_vec| extend_vec_from_split(self, split_vec)) - } - - // fn x_try_collect_into_2( - // self, - // x: ParXapResult, - // ) -> Result::Error> - // where - // R: Orchestrator, - // I: ConcurrentIter, - // Vo: TransformableValues>, - // X1: Fn(I::Item) -> Vo + Sync, - // Vo::Item: IntoResult + Send, - // E: Send, - // Self: Sized, - // { - // let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - // let result = split_vec.x_try_collect_into_2(x); - // result.map(|split_vec| extend_vec_from_split(self, split_vec)) - // } - // test #[cfg(test)] diff --git a/src/computational_variants/fallible_result/computations/collect_into.rs b/src/computational_variants/fallible_result/computations/collect_into.rs deleted file mode 100644 index 55bfb7b..0000000 --- a/src/computational_variants/fallible_result/computations/collect_into.rs +++ /dev/null @@ -1,111 +0,0 @@ -use std::marker::PhantomData; - -use crate::{ - IterationOrder, Params, - computational_variants::ParXap, - generic_values::{ - TransformableValues, Values, - runner_results::{Fallibility, Stop}, - }, - orch::Orchestrator, - par_iter_result::IntoResult, - runner::parallel_runner_compute, -}; -use orx_concurrent_iter::ConcurrentIter; -use orx_fixed_vec::IntoConcurrentPinnedVec; - -pub struct ParResultCollectInto -where - R: Orchestrator, - I: ConcurrentIter, - Vo: TransformableValues, - Vo::Item: IntoResult, - X1: Fn(I::Item) -> Vo + Sync, - T: Send, - E: Send, -{ - orchestrator: R, - params: Params, - iter: I, - xap1: X1, - p: PhantomData<(T, E)>, -} - -impl ParResultCollectInto -where - R: Orchestrator, - I: ConcurrentIter, - Vo: TransformableValues, - Vo::Item: IntoResult, - X1: Fn(I::Item) -> Vo + Sync, - T: Send, - E: Send, -{ - pub fn new(orchestrator: R, params: Params, iter: I, xap1: X1) -> Self { - Self { - orchestrator, - params, - iter, - xap1, - p: PhantomData, - } - } - - pub fn par_len(&self) -> Option { - match (self.params.is_sequential(), self.iter.try_get_len()) { - (true, _) => None, // not required to concurrent reserve when seq - (false, x) => x, - } - } - - fn destruct(self) -> (R, Params, I, X1) { - (self.orchestrator, self.params, self.iter, self.xap1) - } - - fn seq_try_collect_into

(self, mut pinned_vec: P) -> Result - where - P: IntoConcurrentPinnedVec, - { - let (_, _, iter, x1) = self.destruct(); - let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); - - let iter = iter.into_seq_iter(); - for i in iter { - let vt = x1(i); - let done = vt.push_to_pinned_vec(&mut pinned_vec); - if let Some(stop) = done.sequential_push_to_stop() { - match stop { - Stop::DueToWhile => return Ok(pinned_vec), - Stop::DueToError { error } => return Err(error), - } - } - } - - Ok(pinned_vec) - } - - pub fn par_collect_into

(self, pinned_vec: P) -> (usize, Result) - where - P: IntoConcurrentPinnedVec, - { - match (self.params.is_sequential(), self.params.iteration_order) { - (true, _) => (0, self.seq_try_collect_into(pinned_vec)), - - (false, IterationOrder::Arbitrary) => { - let (orchestrator, params, iter, x1) = self.destruct(); - let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); - let x = ParXap::new(orchestrator, params, iter, x1); - let (nt, result) = parallel_runner_compute::collect_arbitrary::x(x, pinned_vec); - (nt, result.into_result()) - } - - (false, IterationOrder::Ordered) => { - let (orchestrator, params, iter, x1) = self.destruct(); - let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); - let x = ParXap::new(orchestrator, params, iter, x1); - let (nt, result) = parallel_runner_compute::collect_ordered::x(x, pinned_vec); - (nt, result.into_result()) - } - } - } -} diff --git a/src/computational_variants/fallible_result/computations/mod.rs b/src/computational_variants/fallible_result/computations/mod.rs index 790362c..99bec0e 100644 --- a/src/computational_variants/fallible_result/computations/mod.rs +++ b/src/computational_variants/fallible_result/computations/mod.rs @@ -1,9 +1,6 @@ mod collect; -mod collect_into; mod next; mod reduce; mod x; pub use x::X; - -pub use collect_into::ParResultCollectInto; diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index 81764d3..281bcdd 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -1,13 +1,12 @@ use crate::computational_variants::ParXap; -use crate::computational_variants::fallible_result::computations::{ParResultCollectInto, X}; -use crate::generic_values::runner_results::{Fallibility, Fallible, Infallible, Stop}; -use crate::generic_values::{TransformableValues, Values}; +use crate::computational_variants::fallible_result::computations::X; +use crate::generic_values::TransformableValues; +use crate::generic_values::runner_results::Infallible; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::runner::parallel_runner_compute; -use crate::{IterationOrder, ParCollectInto, ParIter, Params}; +use crate::{IterationOrder, ParCollectInto, Params}; use orx_concurrent_iter::ConcurrentIter; -use orx_fixed_vec::IntoConcurrentPinnedVec; use std::marker::PhantomData; pub struct ParXapResult From 147546f4223c3b2f6881fdbb94eebf34e7684c2f Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 12:33:01 +0200 Subject: [PATCH 058/264] clean up --- .../fallible_result/computations/mod.rs | 2 -- .../fallible_result/computations/next.rs | 34 ------------------- .../fallible_result/computations/reduce.rs | 28 --------------- 3 files changed, 64 deletions(-) delete mode 100644 src/computational_variants/fallible_result/computations/next.rs delete mode 100644 src/computational_variants/fallible_result/computations/reduce.rs diff --git a/src/computational_variants/fallible_result/computations/mod.rs b/src/computational_variants/fallible_result/computations/mod.rs index 99bec0e..75a939f 100644 --- a/src/computational_variants/fallible_result/computations/mod.rs +++ b/src/computational_variants/fallible_result/computations/mod.rs @@ -1,6 +1,4 @@ mod collect; -mod next; -mod reduce; mod x; pub use x::X; diff --git a/src/computational_variants/fallible_result/computations/next.rs b/src/computational_variants/fallible_result/computations/next.rs deleted file mode 100644 index 82e2ef8..0000000 --- a/src/computational_variants/fallible_result/computations/next.rs +++ /dev/null @@ -1,34 +0,0 @@ -use super::x::X; -use crate::generic_values::Values; -use crate::generic_values::runner_results::{Fallibility, Infallible}; -use crate::orch::Orchestrator; -use crate::runner::parallel_runner_compute::{next, next_any}; -use crate::runner::{ParallelRunner, ParallelRunnerCompute}; -use orx_concurrent_iter::ConcurrentIter; - -impl X -where - R: Orchestrator, - I: ConcurrentIter, - Vo: Values, - M1: Fn(I::Item) -> Vo + Sync, - Vo::Item: Send, -{ - pub fn try_next(self) -> (usize, ResultTryNext) { - todo!() - // let (len, p) = self.len_and_params(); - // let (num_threads, result) = next::x(R::early_return(p, len), self); - // let result = result.map(|x| x.map(|y| y.1)); - // (num_threads, result) - } - - pub fn try_next_any(self) -> (usize, ResultTryNext) { - todo!() - // let (len, p) = self.len_and_params(); - // let (num_threads, result) = next_any::x(R::early_return(p, len), self); - // (num_threads, result) - } -} - -type ResultTryNext = - Result::Item>, <::Fallibility as Fallibility>::Error>; diff --git a/src/computational_variants/fallible_result/computations/reduce.rs b/src/computational_variants/fallible_result/computations/reduce.rs deleted file mode 100644 index 6bdfa27..0000000 --- a/src/computational_variants/fallible_result/computations/reduce.rs +++ /dev/null @@ -1,28 +0,0 @@ -use super::x::X; -use crate::generic_values::Values; -use crate::generic_values::runner_results::{Fallibility, Infallible}; -use crate::orch::Orchestrator; -use crate::runner::parallel_runner_compute::reduce; -use crate::runner::{ParallelRunner, ParallelRunnerCompute}; -use orx_concurrent_iter::ConcurrentIter; - -impl X -where - R: Orchestrator, - I: ConcurrentIter, - Vo: Values, - Vo::Item: Send, - X1: Fn(I::Item) -> Vo + Sync, -{ - pub fn try_reduce(self, reduce: Red) -> (usize, ResultTryReduce) - where - Red: Fn(Vo::Item, Vo::Item) -> Vo::Item + Sync, - { - todo!() - // let (len, p) = self.len_and_params(); - // reduce::x(R::reduce(p, len), self, reduce) - } -} - -type ResultTryReduce = - Result::Item>, <::Fallibility as Fallibility>::Error>; From 3b81cc52d753b96e822676751e3648814c93ef19 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 14:07:25 +0200 Subject: [PATCH 059/264] clean --- src/collect_into/fixed_vec.rs | 7 +++---- src/collect_into/vec.rs | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/collect_into/fixed_vec.rs b/src/collect_into/fixed_vec.rs index e8e7b53..786cb95 100644 --- a/src/collect_into/fixed_vec.rs +++ b/src/collect_into/fixed_vec.rs @@ -28,7 +28,7 @@ where O: Send, { let vec = Vec::from(self); - FixedVec::from(vec.m_collect_into::(m)) + FixedVec::from(vec.m_collect_into(m)) } fn x_collect_into(self, x: ParXap) -> Self @@ -39,7 +39,7 @@ where X1: Fn(I::Item) -> Vo + Sync, { let vec = Vec::from(self); - FixedVec::from(vec.x_collect_into::(x)) + FixedVec::from(vec.x_collect_into(x)) } fn x_try_collect_into( @@ -54,8 +54,7 @@ where Self: Sized, { let vec = Vec::from(self); - let result = vec.x_try_collect_into(x); - result.map(FixedVec::from) + vec.x_try_collect_into(x).map(FixedVec::from) } // test diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index 245e424..04ccbf7 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -52,7 +52,7 @@ where X1: Fn(I::Item) -> Vo + Sync, { let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let split_vec = split_vec.x_collect_into::(x); + let split_vec = split_vec.x_collect_into(x); extend_vec_from_split(self, split_vec) } From e0c8f92342c39a468b6887851c95daaf398dd7b6 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 14:16:34 +0200 Subject: [PATCH 060/264] x_try_collect_into2 is defined to simplifie x_try_collect_into --- src/collect_into/fixed_vec.rs | 20 +++++++ src/collect_into/par_collect_into.rs | 18 ++++++ src/collect_into/split_vec.rs | 27 ++++++++- src/collect_into/vec.rs | 20 +++++++ .../fallible_result/computations/collect.rs | 58 +++++++++++++++++++ .../fallible_result/computations/mod.rs | 1 + 6 files changed, 143 insertions(+), 1 deletion(-) diff --git a/src/collect_into/fixed_vec.rs b/src/collect_into/fixed_vec.rs index 786cb95..927a3bc 100644 --- a/src/collect_into/fixed_vec.rs +++ b/src/collect_into/fixed_vec.rs @@ -1,4 +1,5 @@ use super::par_collect_into::ParCollectIntoCore; +use crate::Params; use crate::computational_variants::fallible_result::computations::X; use crate::computational_variants::{ParMap, ParXap}; use crate::generic_values::runner_results::{Fallibility, Infallible}; @@ -57,6 +58,25 @@ where vec.x_try_collect_into(x).map(FixedVec::from) } + fn x_try_collect_into2( + self, + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + ) -> Result::Error> + where + R: Orchestrator, + I: ConcurrentIter, + X1: Fn(I::Item) -> Vo + Sync, + Vo: Values, + Self: Sized, + { + let vec = Vec::from(self); + vec.x_try_collect_into2(orchestrator, params, iter, xap1) + .map(FixedVec::from) + } + // test #[cfg(test)] diff --git a/src/collect_into/par_collect_into.rs b/src/collect_into/par_collect_into.rs index 563527e..afacef5 100644 --- a/src/collect_into/par_collect_into.rs +++ b/src/collect_into/par_collect_into.rs @@ -1,3 +1,4 @@ +use crate::Params; use crate::computational_variants::fallible_result::computations::X; use crate::computational_variants::{ParMap, ParXap}; use crate::generic_values::runner_results::{Fallibility, Infallible}; @@ -37,6 +38,23 @@ pub trait ParCollectIntoCore: Collection { Vo: Values, Self: Sized; + fn x_try_collect_into2( + self, + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + ) -> Result::Error> + where + R: Orchestrator, + I: ConcurrentIter, + X1: Fn(I::Item) -> Vo + Sync, + Vo: Values, + Self: Sized, + { + todo!() + } + // test #[cfg(test)] diff --git a/src/collect_into/split_vec.rs b/src/collect_into/split_vec.rs index 467c288..d37393e 100644 --- a/src/collect_into/split_vec.rs +++ b/src/collect_into/split_vec.rs @@ -1,6 +1,7 @@ use super::par_collect_into::ParCollectIntoCore; +use crate::Params; use crate::collect_into::utils::split_vec_reserve; -use crate::computational_variants::fallible_result::computations::X; +use crate::computational_variants::fallible_result::computations::{X, try_collect_into}; use crate::computational_variants::{ParMap, ParXap}; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; @@ -64,6 +65,30 @@ where result } + fn x_try_collect_into2( + mut self, + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + ) -> Result::Error> + where + R: Orchestrator, + I: ConcurrentIter, + X1: Fn(I::Item) -> Vo + Sync, + Vo: Values, + Self: Sized, + { + let par_len = match (params.is_sequential(), iter.try_get_len()) { + (true, _) => None, // not required to concurrent reserve when seq + (false, x) => x, + }; + + split_vec_reserve(&mut self, par_len); + let (_num_spawned, result) = try_collect_into(orchestrator, params, iter, xap1, self); + result + } + // test #[cfg(test)] diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index 04ccbf7..c3b1e00 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -1,4 +1,5 @@ use super::par_collect_into::ParCollectIntoCore; +use crate::Params; use crate::collect_into::utils::extend_vec_from_split; use crate::computational_variants::fallible_result::computations::X; use crate::computational_variants::{ParMap, ParXap}; @@ -72,6 +73,25 @@ where result.map(|split_vec| extend_vec_from_split(self, split_vec)) } + fn x_try_collect_into2( + self, + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + ) -> Result::Error> + where + R: Orchestrator, + I: ConcurrentIter, + X1: Fn(I::Item) -> Vo + Sync, + Vo: Values, + Self: Sized, + { + let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); + let result = split_vec.x_try_collect_into2(orchestrator, params, iter, xap1); + result.map(|split_vec| extend_vec_from_split(self, split_vec)) + } + // test #[cfg(test)] diff --git a/src/computational_variants/fallible_result/computations/collect.rs b/src/computational_variants/fallible_result/computations/collect.rs index c2e728f..2ad675d 100644 --- a/src/computational_variants/fallible_result/computations/collect.rs +++ b/src/computational_variants/fallible_result/computations/collect.rs @@ -1,4 +1,5 @@ use super::x::X; +use crate::Params; use crate::computational_variants::ParXap; use crate::generic_values::runner_results::{Fallibility, Stop}; use crate::orch::Orchestrator; @@ -7,6 +8,63 @@ use crate::{IterationOrder, generic_values::Values}; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; +pub fn try_collect_into( + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + pinned_vec: P, +) -> (usize, Result::Error>) +where + R: Orchestrator, + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + X1: Fn(I::Item) -> Vo + Sync, + P: IntoConcurrentPinnedVec, +{ + match (params.is_sequential(), params.iteration_order) { + (true, _) => (0, try_sequential(iter, xap1, pinned_vec)), + (false, IterationOrder::Arbitrary) => { + let xap = ParXap::new(orchestrator, params, iter, xap1); + let (nt, result) = collect_arbitrary::x(xap, pinned_vec); + (nt, result.into_result()) + } + (false, IterationOrder::Ordered) => { + let xap = ParXap::new(orchestrator, params, iter, xap1); + let (nt, result) = collect_ordered::x(xap, pinned_vec); + (nt, result.into_result()) + } + } +} + +fn try_sequential( + iter: I, + xap1: X1, + mut pinned_vec: P, +) -> Result::Error> +where + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + X1: Fn(I::Item) -> Vo + Sync, + P: IntoConcurrentPinnedVec, +{ + let iter = iter.into_seq_iter(); + for i in iter { + let vt = xap1(i); + let done = vt.push_to_pinned_vec(&mut pinned_vec); + if let Some(stop) = Vo::sequential_push_to_stop(done) { + match stop { + Stop::DueToWhile => return Ok(pinned_vec), + Stop::DueToError { error } => return Err(error), + } + } + } + + Ok(pinned_vec) +} + impl X where R: Orchestrator, diff --git a/src/computational_variants/fallible_result/computations/mod.rs b/src/computational_variants/fallible_result/computations/mod.rs index 75a939f..43eb5ac 100644 --- a/src/computational_variants/fallible_result/computations/mod.rs +++ b/src/computational_variants/fallible_result/computations/mod.rs @@ -1,4 +1,5 @@ mod collect; mod x; +pub use collect::try_collect_into; pub use x::X; From 4c934cad6bab53ba9f98f2678370ca84fbc9045f Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 14:17:55 +0200 Subject: [PATCH 061/264] collect into implementations simplified --- src/collect_into/fixed_vec.rs | 20 ++----------------- src/collect_into/par_collect_into.rs | 18 ++--------------- src/collect_into/split_vec.rs | 18 +---------------- src/collect_into/vec.rs | 20 ++----------------- .../fallible_result/map_result.rs | 3 +-- .../fallible_result/par_result.rs | 3 +-- .../fallible_result/xap_result.rs | 3 +-- 7 files changed, 10 insertions(+), 75 deletions(-) diff --git a/src/collect_into/fixed_vec.rs b/src/collect_into/fixed_vec.rs index 927a3bc..47f618e 100644 --- a/src/collect_into/fixed_vec.rs +++ b/src/collect_into/fixed_vec.rs @@ -43,22 +43,7 @@ where FixedVec::from(vec.x_collect_into(x)) } - fn x_try_collect_into( - self, - x: X, - ) -> Result::Error> - where - R: Orchestrator, - I: ConcurrentIter, - Vo: Values, - M1: Fn(I::Item) -> Vo + Sync, - Self: Sized, - { - let vec = Vec::from(self); - vec.x_try_collect_into(x).map(FixedVec::from) - } - - fn x_try_collect_into2( + fn x_try_collect_into( self, orchestrator: R, params: Params, @@ -70,10 +55,9 @@ where I: ConcurrentIter, X1: Fn(I::Item) -> Vo + Sync, Vo: Values, - Self: Sized, { let vec = Vec::from(self); - vec.x_try_collect_into2(orchestrator, params, iter, xap1) + vec.x_try_collect_into(orchestrator, params, iter, xap1) .map(FixedVec::from) } diff --git a/src/collect_into/par_collect_into.rs b/src/collect_into/par_collect_into.rs index afacef5..d1dd016 100644 --- a/src/collect_into/par_collect_into.rs +++ b/src/collect_into/par_collect_into.rs @@ -27,18 +27,7 @@ pub trait ParCollectIntoCore: Collection { Vo: TransformableValues, X1: Fn(I::Item) -> Vo + Sync; - fn x_try_collect_into( - self, - x: X, - ) -> Result::Error> - where - R: Orchestrator, - I: ConcurrentIter, - M1: Fn(I::Item) -> Vo + Sync, - Vo: Values, - Self: Sized; - - fn x_try_collect_into2( + fn x_try_collect_into( self, orchestrator: R, params: Params, @@ -50,10 +39,7 @@ pub trait ParCollectIntoCore: Collection { I: ConcurrentIter, X1: Fn(I::Item) -> Vo + Sync, Vo: Values, - Self: Sized, - { - todo!() - } + Self: Sized; // test diff --git a/src/collect_into/split_vec.rs b/src/collect_into/split_vec.rs index d37393e..1555d06 100644 --- a/src/collect_into/split_vec.rs +++ b/src/collect_into/split_vec.rs @@ -49,23 +49,7 @@ where pinned_vec } - fn x_try_collect_into( - mut self, - x: X, - ) -> Result::Error> - where - R: Orchestrator, - I: ConcurrentIter, - Vo: Values, - M1: Fn(I::Item) -> Vo + Sync, - Self: Sized, - { - split_vec_reserve(&mut self, x.par_len()); - let (_num_spawned, result) = x.try_collect_into(self); - result - } - - fn x_try_collect_into2( + fn x_try_collect_into( mut self, orchestrator: R, params: Params, diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index c3b1e00..abc0f49 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -57,23 +57,7 @@ where extend_vec_from_split(self, split_vec) } - fn x_try_collect_into( - self, - x: X, - ) -> Result::Error> - where - R: Orchestrator, - I: ConcurrentIter, - Vo: Values, - M1: Fn(I::Item) -> Vo + Sync, - Self: Sized, - { - let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let result = split_vec.x_try_collect_into(x); - result.map(|split_vec| extend_vec_from_split(self, split_vec)) - } - - fn x_try_collect_into2( + fn x_try_collect_into( self, orchestrator: R, params: Params, @@ -88,7 +72,7 @@ where Self: Sized, { let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let result = split_vec.x_try_collect_into2(orchestrator, params, iter, xap1); + let result = split_vec.x_try_collect_into(orchestrator, params, iter, xap1); result.map(|split_vec| extend_vec_from_split(self, split_vec)) } diff --git a/src/computational_variants/fallible_result/map_result.rs b/src/computational_variants/fallible_result/map_result.rs index 632e581..ce2fbb2 100644 --- a/src/computational_variants/fallible_result/map_result.rs +++ b/src/computational_variants/fallible_result/map_result.rs @@ -86,8 +86,7 @@ where { let (orchestrator, params, iter, m1) = self.par.destruct(); let x1 = |i: I::Item| m1(i).into_result(); - let x = X::new(orchestrator, params, iter, x1); - output.x_try_collect_into(x) + output.x_try_collect_into(orchestrator, params, iter, x1) } // reduce diff --git a/src/computational_variants/fallible_result/par_result.rs b/src/computational_variants/fallible_result/par_result.rs index 4d3806e..497703e 100644 --- a/src/computational_variants/fallible_result/par_result.rs +++ b/src/computational_variants/fallible_result/par_result.rs @@ -83,8 +83,7 @@ where { let (orchestrator, params, iter) = self.par.destruct(); let x1 = |i: I::Item| i.into_result(); - let x = X::new(orchestrator, params, iter, x1); - output.x_try_collect_into(x) + output.x_try_collect_into(orchestrator, params, iter, x1) } // reduce diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index 281bcdd..c531988 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -97,8 +97,7 @@ where { let (orchestrator, params, iter, x1) = self.destruct(); let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); - let x = X::new(orchestrator, params, iter, x1); - output.x_try_collect_into(x) + output.x_try_collect_into(orchestrator, params, iter, x1) } // reduce From bf76a845780ac5010cb80d23b2c2addc63ad2f0e Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 14:20:14 +0200 Subject: [PATCH 062/264] clean up --- src/collect_into/fixed_vec.rs | 1 - src/collect_into/par_collect_into.rs | 1 - src/collect_into/split_vec.rs | 2 +- src/collect_into/vec.rs | 1 - .../fallible_result/computations/collect.rs | 57 --------------- .../fallible_result/computations/mod.rs | 2 - .../fallible_result/computations/x.rs | 69 ------------------- .../fallible_result/map_result.rs | 1 - .../fallible_result/par_result.rs | 1 - .../fallible_result/xap_result.rs | 1 - 10 files changed, 1 insertion(+), 135 deletions(-) delete mode 100644 src/computational_variants/fallible_result/computations/x.rs diff --git a/src/collect_into/fixed_vec.rs b/src/collect_into/fixed_vec.rs index 47f618e..9629eed 100644 --- a/src/collect_into/fixed_vec.rs +++ b/src/collect_into/fixed_vec.rs @@ -1,6 +1,5 @@ use super::par_collect_into::ParCollectIntoCore; use crate::Params; -use crate::computational_variants::fallible_result::computations::X; use crate::computational_variants::{ParMap, ParXap}; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; diff --git a/src/collect_into/par_collect_into.rs b/src/collect_into/par_collect_into.rs index d1dd016..34c69b9 100644 --- a/src/collect_into/par_collect_into.rs +++ b/src/collect_into/par_collect_into.rs @@ -1,5 +1,4 @@ use crate::Params; -use crate::computational_variants::fallible_result::computations::X; use crate::computational_variants::{ParMap, ParXap}; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; diff --git a/src/collect_into/split_vec.rs b/src/collect_into/split_vec.rs index 1555d06..0d0517d 100644 --- a/src/collect_into/split_vec.rs +++ b/src/collect_into/split_vec.rs @@ -1,7 +1,7 @@ use super::par_collect_into::ParCollectIntoCore; use crate::Params; use crate::collect_into::utils::split_vec_reserve; -use crate::computational_variants::fallible_result::computations::{X, try_collect_into}; +use crate::computational_variants::fallible_result::computations::try_collect_into; use crate::computational_variants::{ParMap, ParXap}; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index abc0f49..31227a7 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -1,7 +1,6 @@ use super::par_collect_into::ParCollectIntoCore; use crate::Params; use crate::collect_into::utils::extend_vec_from_split; -use crate::computational_variants::fallible_result::computations::X; use crate::computational_variants::{ParMap, ParXap}; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; diff --git a/src/computational_variants/fallible_result/computations/collect.rs b/src/computational_variants/fallible_result/computations/collect.rs index 2ad675d..c38d45b 100644 --- a/src/computational_variants/fallible_result/computations/collect.rs +++ b/src/computational_variants/fallible_result/computations/collect.rs @@ -1,4 +1,3 @@ -use super::x::X; use crate::Params; use crate::computational_variants::ParXap; use crate::generic_values::runner_results::{Fallibility, Stop}; @@ -64,59 +63,3 @@ where Ok(pinned_vec) } - -impl X -where - R: Orchestrator, - I: ConcurrentIter, - Vo: Values, - Vo::Item: Send, - X1: Fn(I::Item) -> Vo + Sync, -{ - pub fn try_collect_into

( - self, - pinned_vec: P, - ) -> (usize, Result::Error>) - where - P: IntoConcurrentPinnedVec, - { - let (orchestrator, params, iter, xap1) = self.destruct(); - - match (params.is_sequential(), params.iteration_order) { - (true, _) => (0, Self::try_sequential(iter, xap1, pinned_vec)), - (false, IterationOrder::Arbitrary) => { - let xap = ParXap::new(orchestrator, params, iter, xap1); - let (nt, result) = collect_arbitrary::x(xap, pinned_vec); - (nt, result.into_result()) - } - (false, IterationOrder::Ordered) => { - let xap = ParXap::new(orchestrator, params, iter, xap1); - let (nt, result) = collect_ordered::x(xap, pinned_vec); - (nt, result.into_result()) - } - } - } - - fn try_sequential

( - iter: I, - xap1: X1, - mut pinned_vec: P, - ) -> Result::Error> - where - P: IntoConcurrentPinnedVec, - { - let iter = iter.into_seq_iter(); - for i in iter { - let vt = xap1(i); - let done = vt.push_to_pinned_vec(&mut pinned_vec); - if let Some(stop) = Vo::sequential_push_to_stop(done) { - match stop { - Stop::DueToWhile => return Ok(pinned_vec), - Stop::DueToError { error } => return Err(error), - } - } - } - - Ok(pinned_vec) - } -} diff --git a/src/computational_variants/fallible_result/computations/mod.rs b/src/computational_variants/fallible_result/computations/mod.rs index 43eb5ac..1581c9f 100644 --- a/src/computational_variants/fallible_result/computations/mod.rs +++ b/src/computational_variants/fallible_result/computations/mod.rs @@ -1,5 +1,3 @@ mod collect; -mod x; pub use collect::try_collect_into; -pub use x::X; diff --git a/src/computational_variants/fallible_result/computations/x.rs b/src/computational_variants/fallible_result/computations/x.rs deleted file mode 100644 index f8a8b0c..0000000 --- a/src/computational_variants/fallible_result/computations/x.rs +++ /dev/null @@ -1,69 +0,0 @@ -use crate::{ - ChunkSize, IterationOrder, NumThreads, Params, generic_values::Values, orch::Orchestrator, -}; -use orx_concurrent_iter::ConcurrentIter; - -pub struct X -where - R: Orchestrator, - I: ConcurrentIter, - Vo: Values, - X1: Fn(I::Item) -> Vo, -{ - orchestrator: R, - params: Params, - iter: I, - xap1: X1, -} - -impl X -where - R: Orchestrator, - I: ConcurrentIter, - Vo: Values, - M1: Fn(I::Item) -> Vo, -{ - pub fn new(orchestrator: R, params: Params, iter: I, xap1: M1) -> Self { - Self { - orchestrator, - params, - iter, - xap1, - } - } - - pub fn destruct(self) -> (R, Params, I, M1) { - (self.orchestrator, self.params, self.iter, self.xap1) - } - - pub fn params(&self) -> Params { - self.params - } - - pub fn len_and_params(&self) -> (Option, Params) { - (self.iter.try_get_len(), self.params) - } - - pub fn num_threads(&mut self, num_threads: impl Into) { - self.params = self.params.with_num_threads(num_threads); - } - - pub fn chunk_size(&mut self, chunk_size: impl Into) { - self.params = self.params.with_chunk_size(chunk_size); - } - - pub fn iteration_order(&mut self, collect: IterationOrder) { - self.params = self.params.with_collect_ordering(collect); - } - - pub fn iter(&self) -> &I { - &self.iter - } - - pub fn par_len(&self) -> Option { - match (self.params.is_sequential(), self.iter.try_get_len()) { - (true, _) => None, // not required to concurrent reserve when seq - (false, x) => x, - } - } -} diff --git a/src/computational_variants/fallible_result/map_result.rs b/src/computational_variants/fallible_result/map_result.rs index ce2fbb2..0f50859 100644 --- a/src/computational_variants/fallible_result/map_result.rs +++ b/src/computational_variants/fallible_result/map_result.rs @@ -1,4 +1,3 @@ -use crate::computational_variants::fallible_result::computations::X; use crate::computational_variants::{ParMap, ParXap}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; diff --git a/src/computational_variants/fallible_result/par_result.rs b/src/computational_variants/fallible_result/par_result.rs index 497703e..8395e93 100644 --- a/src/computational_variants/fallible_result/par_result.rs +++ b/src/computational_variants/fallible_result/par_result.rs @@ -1,4 +1,3 @@ -use crate::computational_variants::fallible_result::computations::X; use crate::computational_variants::{Par, ParXap}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index c531988..b13e28e 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -1,5 +1,4 @@ use crate::computational_variants::ParXap; -use crate::computational_variants::fallible_result::computations::X; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; use crate::orch::{DefaultOrchestrator, Orchestrator}; From 75e7ced1588abb5066d71f644ae37bb6bbe4d02d Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 14:29:24 +0200 Subject: [PATCH 063/264] simplify collect into code --- src/collect_into/split_vec.rs | 23 +++++++++++++---------- src/collect_into/utils.rs | 8 +++++++- src/collect_into/vec.rs | 4 ++-- src/computational_variants/map.rs | 7 ------- src/computational_variants/xap.rs | 7 ------- src/using/collect_into/split_vec.rs | 12 ++++++++++-- src/using/collect_into/vec.rs | 2 +- src/using/computations/u_map/m.rs | 9 +-------- src/using/computations/u_xap/x.rs | 9 +-------- 9 files changed, 35 insertions(+), 46 deletions(-) diff --git a/src/collect_into/split_vec.rs b/src/collect_into/split_vec.rs index 0d0517d..c862eb2 100644 --- a/src/collect_into/split_vec.rs +++ b/src/collect_into/split_vec.rs @@ -1,11 +1,11 @@ use super::par_collect_into::ParCollectIntoCore; -use crate::Params; use crate::collect_into::utils::split_vec_reserve; use crate::computational_variants::fallible_result::computations::try_collect_into; use crate::computational_variants::{ParMap, ParXap}; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; +use crate::{ParIter, Params}; use orx_concurrent_iter::ConcurrentIter; #[cfg(test)] use orx_pinned_vec::PinnedVec; @@ -21,7 +21,7 @@ where fn empty(iter_len: Option) -> Self { let mut vec = Self::pseudo_default(); - split_vec_reserve(&mut vec, iter_len); + split_vec_reserve(&mut vec, false, iter_len); vec } @@ -32,7 +32,11 @@ where M1: Fn(I::Item) -> O + Sync, O: Send, { - split_vec_reserve(&mut self, m.par_len()); + split_vec_reserve( + &mut self, + m.params().is_sequential(), + m.con_iter().try_get_len(), + ); let (_, pinned_vec) = m.par_collect_into(self); pinned_vec } @@ -44,7 +48,11 @@ where Vo: TransformableValues, X1: Fn(I::Item) -> Vo + Sync, { - split_vec_reserve(&mut self, x.par_len()); + split_vec_reserve( + &mut self, + x.params().is_sequential(), + x.con_iter().try_get_len(), + ); let (_num_spawned, pinned_vec) = x.par_collect_into(self); pinned_vec } @@ -63,12 +71,7 @@ where Vo: Values, Self: Sized, { - let par_len = match (params.is_sequential(), iter.try_get_len()) { - (true, _) => None, // not required to concurrent reserve when seq - (false, x) => x, - }; - - split_vec_reserve(&mut self, par_len); + split_vec_reserve(&mut self, params.is_sequential(), iter.try_get_len()); let (_num_spawned, result) = try_collect_into(orchestrator, params, iter, xap1, self); result } diff --git a/src/collect_into/utils.rs b/src/collect_into/utils.rs index 356468c..6153e2a 100644 --- a/src/collect_into/utils.rs +++ b/src/collect_into/utils.rs @@ -20,8 +20,14 @@ where pub fn split_vec_reserve( split_vec: &mut SplitVec, - len_to_extend: Option, + is_sequential: bool, + iter_len: Option, ) { + let len_to_extend = match (is_sequential, iter_len) { + (true, _) => None, // not required to concurrent reserve when seq + (false, x) => x, + }; + match len_to_extend { None => { let capacity_bound = split_vec.capacity_bound(); diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index 31227a7..4dce1aa 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -1,10 +1,10 @@ use super::par_collect_into::ParCollectIntoCore; -use crate::Params; use crate::collect_into::utils::extend_vec_from_split; use crate::computational_variants::{ParMap, ParXap}; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; +use crate::{ParIter, Params}; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; use orx_split_vec::SplitVec; @@ -29,7 +29,7 @@ where M1: Fn(I::Item) -> O + Sync, O: Send, { - match m.par_len() { + match m.con_iter().try_get_len() { None => { let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); let split_vec = split_vec.m_collect_into(m); diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index da3a32e..3057ea6 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -44,13 +44,6 @@ where (self.orchestrator, self.params, self.iter, self.map1) } - pub(crate) fn par_len(&self) -> Option { - match (self.params.is_sequential(), self.iter.try_get_len()) { - (true, _) => None, // not required to concurrent reserve when seq - (false, x) => x, - } - } - pub(crate) fn par_collect_into

(self, pinned_vec: P) -> (usize, P) where P: IntoConcurrentPinnedVec, diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index 5f2ce9c..0e4adab 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -50,13 +50,6 @@ where (self.orchestrator, self.params, self.iter, self.xap1) } - pub(crate) fn par_len(&self) -> Option { - match (self.params.is_sequential(), self.iter.try_get_len()) { - (true, _) => None, // not required to concurrent reserve when seq - (false, x) => x, - } - } - pub(crate) fn par_collect_into

(self, pinned_vec: P) -> (usize, P) where P: IntoConcurrentPinnedVec, diff --git a/src/using/collect_into/split_vec.rs b/src/using/collect_into/split_vec.rs index 63bf619..23bc9f9 100644 --- a/src/using/collect_into/split_vec.rs +++ b/src/using/collect_into/split_vec.rs @@ -21,7 +21,11 @@ where I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, { - split_vec_reserve(&mut self, m.par_len()); + split_vec_reserve( + &mut self, + m.params().is_sequential(), + m.iter().try_get_len(), + ); let (_num_spawned, pinned_vec) = m.collect_into::(self); pinned_vec } @@ -34,7 +38,11 @@ where Vo: Values, M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, { - split_vec_reserve(&mut self, x.par_len()); + split_vec_reserve( + &mut self, + x.params().is_sequential(), + x.iter().try_get_len(), + ); let (_num_spawned, pinned_vec) = x.collect_into::(self); pinned_vec } diff --git a/src/using/collect_into/vec.rs b/src/using/collect_into/vec.rs index 1f8babe..9b3385e 100644 --- a/src/using/collect_into/vec.rs +++ b/src/using/collect_into/vec.rs @@ -20,7 +20,7 @@ where I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, { - match m.par_len() { + match m.iter().try_get_len() { None => { let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); let split_vec = split_vec.u_m_collect_into::(m); diff --git a/src/using/computations/u_map/m.rs b/src/using/computations/u_map/m.rs index dc46da0..f52b10f 100644 --- a/src/using/computations/u_map/m.rs +++ b/src/using/computations/u_map/m.rs @@ -52,14 +52,7 @@ where self.params = self.params.with_collect_ordering(collect); } - pub fn iter(&self) -> &I { + pub(crate) fn iter(&self) -> &I { &self.iter } - - pub fn par_len(&self) -> Option { - match (self.params.is_sequential(), self.iter.try_get_len()) { - (true, _) => None, // not required to concurrent reserve when seq - (false, x) => x, - } - } } diff --git a/src/using/computations/u_xap/x.rs b/src/using/computations/u_xap/x.rs index ea5b912..dad9109 100644 --- a/src/using/computations/u_xap/x.rs +++ b/src/using/computations/u_xap/x.rs @@ -55,14 +55,7 @@ where self.params = self.params.with_collect_ordering(collect); } - pub fn iter(&self) -> &I { + pub(crate) fn iter(&self) -> &I { &self.iter } - - pub fn par_len(&self) -> Option { - match (self.params.is_sequential(), self.iter.try_get_len()) { - (true, _) => None, // not required to concurrent reserve when seq - (false, x) => x, - } - } } From 2c6b173d88c408cbaa75144a54600599708b0d8b Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 14:33:18 +0200 Subject: [PATCH 064/264] clean up --- src/computational_variants/tests/mod.rs | 1 + .../tests/xap}/collect.rs | 0 .../tests/xap}/find.rs | 0 .../tests/xap}/mod.rs | 0 .../tests/xap}/reduce.rs | 0 src/computations/mod.rs | 2 - src/computations/xap/collect.rs | 67 ------------------- src/computations/xap/mod.rs | 9 --- src/computations/xap/next.rs | 38 ----------- src/computations/xap/reduce.rs | 27 -------- src/computations/xap/x.rs | 59 ---------------- src/runner/parallel_runner_compute/reduce.rs | 6 +- src/using/collect_into/split_vec.rs | 2 +- src/using/computational_variants/u_xap.rs | 2 +- src/using/computations/u_xap/x.rs | 2 +- 15 files changed, 6 insertions(+), 209 deletions(-) rename src/{computations/xap/tests => computational_variants/tests/xap}/collect.rs (100%) rename src/{computations/xap/tests => computational_variants/tests/xap}/find.rs (100%) rename src/{computations/xap/tests => computational_variants/tests/xap}/mod.rs (100%) rename src/{computations/xap/tests => computational_variants/tests/xap}/reduce.rs (100%) delete mode 100644 src/computations/xap/collect.rs delete mode 100644 src/computations/xap/mod.rs delete mode 100644 src/computations/xap/next.rs delete mode 100644 src/computations/xap/reduce.rs delete mode 100644 src/computations/xap/x.rs diff --git a/src/computational_variants/tests/mod.rs b/src/computational_variants/tests/mod.rs index a27cd6a..54f1bfc 100644 --- a/src/computational_variants/tests/mod.rs +++ b/src/computational_variants/tests/mod.rs @@ -11,3 +11,4 @@ mod range; mod slice; mod sum; mod vectors; +mod xap; diff --git a/src/computations/xap/tests/collect.rs b/src/computational_variants/tests/xap/collect.rs similarity index 100% rename from src/computations/xap/tests/collect.rs rename to src/computational_variants/tests/xap/collect.rs diff --git a/src/computations/xap/tests/find.rs b/src/computational_variants/tests/xap/find.rs similarity index 100% rename from src/computations/xap/tests/find.rs rename to src/computational_variants/tests/xap/find.rs diff --git a/src/computations/xap/tests/mod.rs b/src/computational_variants/tests/xap/mod.rs similarity index 100% rename from src/computations/xap/tests/mod.rs rename to src/computational_variants/tests/xap/mod.rs diff --git a/src/computations/xap/tests/reduce.rs b/src/computational_variants/tests/xap/reduce.rs similarity index 100% rename from src/computations/xap/tests/reduce.rs rename to src/computational_variants/tests/xap/reduce.rs diff --git a/src/computations/mod.rs b/src/computations/mod.rs index e7005df..dd6785d 100644 --- a/src/computations/mod.rs +++ b/src/computations/mod.rs @@ -1,7 +1,5 @@ mod default_fns; mod heap_sort; -mod xap; pub(crate) use default_fns::*; pub(crate) use heap_sort::heap_sort_into; -pub(crate) use xap::X; diff --git a/src/computations/xap/collect.rs b/src/computations/xap/collect.rs deleted file mode 100644 index 53825d7..0000000 --- a/src/computations/xap/collect.rs +++ /dev/null @@ -1,67 +0,0 @@ -use super::x::X; -use crate::generic_values::runner_results::{ - Fallibility, Infallible, ParallelCollect, ParallelCollectArbitrary, Stop, -}; -use crate::runner::parallel_runner_compute::{collect_arbitrary, collect_ordered}; -use crate::{ - IterationOrder, - generic_values::Values, - runner::{ParallelRunner, ParallelRunnerCompute}, -}; -use orx_concurrent_iter::ConcurrentIter; -use orx_fixed_vec::IntoConcurrentPinnedVec; - -impl X -where - I: ConcurrentIter, - Vo: Values, - Vo::Item: Send, - M1: Fn(I::Item) -> Vo + Sync, -{ - pub fn try_collect_into( - self, - pinned_vec: P, - ) -> (usize, Result::Error>) - where - R: ParallelRunner, - P: IntoConcurrentPinnedVec, - { - todo!() - // let (len, p) = self.len_and_params(); - // match (p.is_sequential(), p.iteration_order) { - // (true, _) => (0, self.try_sequential(pinned_vec)), - // (false, IterationOrder::Arbitrary) => { - // let (nt, result) = collect_arbitrary::x(R::collection(p, len), self, pinned_vec); - // (nt, result.into_result()) - // } - // (false, IterationOrder::Ordered) => { - // let (nt, result) = collect_ordered::x(R::collection(p, len), self, pinned_vec); - // (nt, result.into_result()) - // } - // } - } - - fn try_sequential

( - self, - mut pinned_vec: P, - ) -> Result::Error> - where - P: IntoConcurrentPinnedVec, - { - let (_, iter, xap1) = self.destruct(); - - let iter = iter.into_seq_iter(); - for i in iter { - let vt = xap1(i); - let done = vt.push_to_pinned_vec(&mut pinned_vec); - if let Some(stop) = Vo::sequential_push_to_stop(done) { - match stop { - Stop::DueToWhile => return Ok(pinned_vec), - Stop::DueToError { error } => return Err(error), - } - } - } - - Ok(pinned_vec) - } -} diff --git a/src/computations/xap/mod.rs b/src/computations/xap/mod.rs deleted file mode 100644 index bf1d741..0000000 --- a/src/computations/xap/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -#[cfg(test)] -mod tests; - -mod collect; -mod next; -mod reduce; -mod x; - -pub use x::X; diff --git a/src/computations/xap/next.rs b/src/computations/xap/next.rs deleted file mode 100644 index 22cd2c7..0000000 --- a/src/computations/xap/next.rs +++ /dev/null @@ -1,38 +0,0 @@ -use super::x::X; -use crate::generic_values::Values; -use crate::generic_values::runner_results::{Fallibility, Infallible}; -use crate::runner::parallel_runner_compute::{next, next_any}; -use crate::runner::{ParallelRunner, ParallelRunnerCompute}; -use orx_concurrent_iter::ConcurrentIter; - -impl X -where - I: ConcurrentIter, - Vo: Values, - M1: Fn(I::Item) -> Vo + Sync, - Vo::Item: Send, -{ - pub fn try_next(self) -> (usize, ResultTryNext) - where - R: ParallelRunner, - { - todo!() - // let (len, p) = self.len_and_params(); - // let (num_threads, result) = next::x(R::early_return(p, len), self); - // let result = result.map(|x| x.map(|y| y.1)); - // (num_threads, result) - } - - pub fn try_next_any(self) -> (usize, ResultTryNext) - where - R: ParallelRunner, - { - todo!() - // let (len, p) = self.len_and_params(); - // let (num_threads, result) = next_any::x(R::early_return(p, len), self); - // (num_threads, result) - } -} - -type ResultTryNext = - Result::Item>, <::Fallibility as Fallibility>::Error>; diff --git a/src/computations/xap/reduce.rs b/src/computations/xap/reduce.rs deleted file mode 100644 index d9dc9c8..0000000 --- a/src/computations/xap/reduce.rs +++ /dev/null @@ -1,27 +0,0 @@ -use super::x::X; -use crate::generic_values::Values; -use crate::generic_values::runner_results::{Fallibility, Infallible}; -use crate::runner::parallel_runner_compute::reduce; -use crate::runner::{ParallelRunner, ParallelRunnerCompute}; -use orx_concurrent_iter::ConcurrentIter; - -impl X -where - I: ConcurrentIter, - Vo: Values, - Vo::Item: Send, - M1: Fn(I::Item) -> Vo + Sync, -{ - pub fn try_reduce(self, reduce: Red) -> (usize, ResultTryReduce) - where - R: ParallelRunner, - Red: Fn(Vo::Item, Vo::Item) -> Vo::Item + Sync, - { - todo!() - // let (len, p) = self.len_and_params(); - // reduce::x(R::reduce(p, len), self, reduce) - } -} - -type ResultTryReduce = - Result::Item>, <::Fallibility as Fallibility>::Error>; diff --git a/src/computations/xap/x.rs b/src/computations/xap/x.rs deleted file mode 100644 index 6227787..0000000 --- a/src/computations/xap/x.rs +++ /dev/null @@ -1,59 +0,0 @@ -use crate::{ChunkSize, IterationOrder, NumThreads, Params, generic_values::Values}; -use orx_concurrent_iter::ConcurrentIter; - -pub struct X -where - I: ConcurrentIter, - Vo: Values, - X1: Fn(I::Item) -> Vo, -{ - params: Params, - iter: I, - xap1: X1, -} - -impl X -where - I: ConcurrentIter, - Vo: Values, - M1: Fn(I::Item) -> Vo, -{ - pub fn new(params: Params, iter: I, xap1: M1) -> Self { - Self { params, iter, xap1 } - } - - pub fn destruct(self) -> (Params, I, M1) { - (self.params, self.iter, self.xap1) - } - - pub fn params(&self) -> Params { - self.params - } - - pub fn len_and_params(&self) -> (Option, Params) { - (self.iter.try_get_len(), self.params) - } - - pub fn num_threads(&mut self, num_threads: impl Into) { - self.params = self.params.with_num_threads(num_threads); - } - - pub fn chunk_size(&mut self, chunk_size: impl Into) { - self.params = self.params.with_chunk_size(chunk_size); - } - - pub fn iteration_order(&mut self, collect: IterationOrder) { - self.params = self.params.with_collect_ordering(collect); - } - - pub fn iter(&self) -> &I { - &self.iter - } - - pub fn par_len(&self) -> Option { - match (self.params.is_sequential(), self.iter.try_get_len()) { - (true, _) => None, // not required to concurrent reserve when seq - (false, x) => x, - } - } -} diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index b494ac9..949fe28 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -1,11 +1,9 @@ use crate::ParallelRunner; use crate::computational_variants::{ParMap, ParXap}; -use crate::computations::X; -use crate::generic_values::TransformableValues; -use crate::generic_values::runner_results::{Fallibility, Infallible, Reduce}; +use crate::generic_values::Values; +use crate::generic_values::runner_results::{Fallibility, Reduce}; use crate::orch::Orchestrator; use crate::runner::{ComputationKind, thread_runner_compute as thread}; -use crate::{generic_values::Values, runner::ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; // m diff --git a/src/using/collect_into/split_vec.rs b/src/using/collect_into/split_vec.rs index 23bc9f9..66cf6fd 100644 --- a/src/using/collect_into/split_vec.rs +++ b/src/using/collect_into/split_vec.rs @@ -41,7 +41,7 @@ where split_vec_reserve( &mut self, x.params().is_sequential(), - x.iter().try_get_len(), + x.con_iter().try_get_len(), ); let (_num_spawned, pinned_vec) = x.collect_into::(self); pinned_vec diff --git a/src/using/computational_variants/u_xap.rs b/src/using/computational_variants/u_xap.rs index 458ec7b..ba4bb4f 100644 --- a/src/using/computational_variants/u_xap.rs +++ b/src/using/computational_variants/u_xap.rs @@ -73,7 +73,7 @@ where type Item = Vo::Item; fn con_iter(&self) -> &impl ConcurrentIter { - self.ux.iter() + self.ux.con_iter() } fn params(&self) -> Params { diff --git a/src/using/computations/u_xap/x.rs b/src/using/computations/u_xap/x.rs index dad9109..184b6e1 100644 --- a/src/using/computations/u_xap/x.rs +++ b/src/using/computations/u_xap/x.rs @@ -55,7 +55,7 @@ where self.params = self.params.with_collect_ordering(collect); } - pub(crate) fn iter(&self) -> &I { + pub(crate) fn con_iter(&self) -> &I { &self.iter } } From 9e90f70a197570532b7a023a385a447faa2bc257 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 14:35:31 +0200 Subject: [PATCH 065/264] clean up and missing docs --- src/computational_variants/fallible_option.rs | 2 ++ src/computational_variants/fallible_result/map_result.rs | 2 ++ src/computational_variants/fallible_result/par_result.rs | 2 ++ src/computational_variants/fallible_result/xap_result.rs | 2 ++ src/computational_variants/mod.rs | 4 ++++ src/runner/parallel_runner_compute/collect_arbitrary.rs | 6 ++---- src/runner/parallel_runner_compute/collect_ordered.rs | 6 ++---- src/runner/parallel_runner_compute/next.rs | 3 +-- src/runner/parallel_runner_compute/next_any.rs | 3 +-- 9 files changed, 18 insertions(+), 12 deletions(-) diff --git a/src/computational_variants/fallible_option.rs b/src/computational_variants/fallible_option.rs index 3bd3670..429ef33 100644 --- a/src/computational_variants/fallible_option.rs +++ b/src/computational_variants/fallible_option.rs @@ -5,6 +5,8 @@ use crate::{ }; use std::marker::PhantomData; +/// A parallel iterator for which the computation either completely succeeds, +/// or fails and **early exits** with None. pub struct ParOption where R: Orchestrator, diff --git a/src/computational_variants/fallible_result/map_result.rs b/src/computational_variants/fallible_result/map_result.rs index 0f50859..3e519db 100644 --- a/src/computational_variants/fallible_result/map_result.rs +++ b/src/computational_variants/fallible_result/map_result.rs @@ -6,6 +6,8 @@ use crate::{IterationOrder, ParCollectInto, ParIter}; use orx_concurrent_iter::ConcurrentIter; use std::marker::PhantomData; +/// A parallel iterator for which the computation either completely succeeds, +/// or fails and **early exits** with an error. pub struct ParMapResult where R: Orchestrator, diff --git a/src/computational_variants/fallible_result/par_result.rs b/src/computational_variants/fallible_result/par_result.rs index 8395e93..7961143 100644 --- a/src/computational_variants/fallible_result/par_result.rs +++ b/src/computational_variants/fallible_result/par_result.rs @@ -6,6 +6,8 @@ use crate::{IterationOrder, ParCollectInto, ParIter}; use orx_concurrent_iter::ConcurrentIter; use std::marker::PhantomData; +/// A parallel iterator for which the computation either completely succeeds, +/// or fails and **early exits** with an error. pub struct ParResult where R: Orchestrator, diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index b13e28e..5686670 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -8,6 +8,8 @@ use crate::{IterationOrder, ParCollectInto, Params}; use orx_concurrent_iter::ConcurrentIter; use std::marker::PhantomData; +/// A parallel iterator for which the computation either completely succeeds, +/// or fails and **early exits** with an error. pub struct ParXapResult where R: Orchestrator, diff --git a/src/computational_variants/mod.rs b/src/computational_variants/mod.rs index 4dc451c..89df0fa 100644 --- a/src/computational_variants/mod.rs +++ b/src/computational_variants/mod.rs @@ -1,7 +1,11 @@ #[cfg(test)] mod tests; +/// A parallel iterator for which the computation either completely succeeds, +/// or fails and **early exits** with None. pub mod fallible_option; +/// A parallel iterator for which the computation either completely succeeds, +/// or fails and **early exits** with an error. pub mod fallible_result; mod map; mod par; diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 294b7ba..5930b6f 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -1,10 +1,8 @@ #[cfg(test)] use crate::computational_variants::ParMap; use crate::computational_variants::ParXap; -use crate::generic_values::runner_results::{ - Infallible, ParallelCollectArbitrary, ThreadCollectArbitrary, -}; -use crate::generic_values::{TransformableValues, Values}; +use crate::generic_values::Values; +use crate::generic_values::runner_results::{ParallelCollectArbitrary, ThreadCollectArbitrary}; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{orch::Orchestrator, runner::ParallelRunner}; use orx_concurrent_bag::ConcurrentBag; diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index 245e263..49ce3ba 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -1,8 +1,6 @@ use crate::computational_variants::{ParMap, ParXap}; -use crate::generic_values::runner_results::{ - Fallibility, Infallible, ParallelCollect, ThreadCollect, -}; -use crate::generic_values::{TransformableValues, Values}; +use crate::generic_values::Values; +use crate::generic_values::runner_results::{Fallibility, ParallelCollect, ThreadCollect}; use crate::orch::Orchestrator; use crate::runner::parallel_runner::ParallelRunner; use crate::runner::{ComputationKind, thread_runner_compute as thread}; diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index 9f5b624..30f28d4 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -1,8 +1,7 @@ use crate::ParallelRunner; use crate::computational_variants::{ParMap, ParXap}; -use crate::generic_values::TransformableValues; use crate::generic_values::Values; -use crate::generic_values::runner_results::{Fallibility, Infallible, NextSuccess, NextWithIdx}; +use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; use crate::orch::Orchestrator; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index 6dc5e02..d8c4bc6 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -1,8 +1,7 @@ use crate::ParallelRunner; use crate::computational_variants::{ParMap, ParXap}; -use crate::generic_values::TransformableValues; use crate::generic_values::Values; -use crate::generic_values::runner_results::{Fallibility, Infallible}; +use crate::generic_values::runner_results::Fallibility; use crate::orch::Orchestrator; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use orx_concurrent_iter::ConcurrentIter; From 3220618660194d9fe2e91cf17e8d120043a49164 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 14:46:54 +0200 Subject: [PATCH 066/264] flatten collect arbitrary --- .../computations/collect.rs | 71 +++++++++++++++++++ .../computations/mod.rs | 1 + .../fallible_result/computations/collect.rs | 3 +- src/computational_variants/mod.rs | 1 + src/computational_variants/xap.rs | 10 ++- .../collect_arbitrary.rs | 12 ++-- 6 files changed, 89 insertions(+), 9 deletions(-) create mode 100644 src/computational_variants/computations/collect.rs create mode 100644 src/computational_variants/computations/mod.rs diff --git a/src/computational_variants/computations/collect.rs b/src/computational_variants/computations/collect.rs new file mode 100644 index 0000000..1ce5d29 --- /dev/null +++ b/src/computational_variants/computations/collect.rs @@ -0,0 +1,71 @@ +use crate::Params; +use crate::computational_variants::ParXap; +use crate::generic_values::runner_results::{ + Fallibility, ParallelCollect, ParallelCollectArbitrary, Stop, +}; +use crate::orch::Orchestrator; +use crate::runner::parallel_runner_compute::{self, collect_arbitrary, collect_ordered}; +use crate::{IterationOrder, generic_values::Values}; +use orx_concurrent_iter::ConcurrentIter; +use orx_fixed_vec::IntoConcurrentPinnedVec; + +// pub fn collect_into( +// orchestrator: R, +// params: Params, +// iter: I, +// xap1: X1, +// pinned_vec: P, +// ) -> (usize, P) +// where +// R: Orchestrator, +// I: ConcurrentIter, +// Vo: Values, +// Vo::Item: Send, +// X1: Fn(I::Item) -> Vo + Sync, +// P: IntoConcurrentPinnedVec, +// { +// match (params.is_sequential(), params.iteration_order) { +// (true, _) => (0, sequential(iter, xap1, pinned_vec)), +// (false, IterationOrder::Arbitrary) => { +// let (num_threads, result) = +// parallel_runner_compute::collect_arbitrary::x(self, pinned_vec); +// let pinned_vec = match result { +// ParallelCollectArbitrary::AllCollected { pinned_vec } => pinned_vec, +// ParallelCollectArbitrary::StoppedByWhileCondition { pinned_vec } => pinned_vec, +// }; +// (num_threads, pinned_vec) +// } +// (false, IterationOrder::Ordered) => { +// let (num_threads, result) = +// parallel_runner_compute::collect_ordered::x(self, pinned_vec); +// let pinned_vec = match result { +// ParallelCollect::AllCollected { pinned_vec } => pinned_vec, +// ParallelCollect::StoppedByWhileCondition { +// pinned_vec, +// stopped_idx: _, +// } => pinned_vec, +// }; +// (num_threads, pinned_vec) +// } +// } +// } + +fn sequential(iter: I, xap1: X1, mut pinned_vec: P) -> P +where + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + X1: Fn(I::Item) -> Vo + Sync, + P: IntoConcurrentPinnedVec, +{ + let iter = iter.into_seq_iter(); + for i in iter { + let vt = xap1(i); + let done = vt.push_to_pinned_vec(&mut pinned_vec); + if Vo::sequential_push_to_stop(done).is_some() { + break; + } + } + + pinned_vec +} diff --git a/src/computational_variants/computations/mod.rs b/src/computational_variants/computations/mod.rs new file mode 100644 index 0000000..10b2a49 --- /dev/null +++ b/src/computational_variants/computations/mod.rs @@ -0,0 +1 @@ +mod collect; diff --git a/src/computational_variants/fallible_result/computations/collect.rs b/src/computational_variants/fallible_result/computations/collect.rs index c38d45b..962ce72 100644 --- a/src/computational_variants/fallible_result/computations/collect.rs +++ b/src/computational_variants/fallible_result/computations/collect.rs @@ -25,8 +25,7 @@ where match (params.is_sequential(), params.iteration_order) { (true, _) => (0, try_sequential(iter, xap1, pinned_vec)), (false, IterationOrder::Arbitrary) => { - let xap = ParXap::new(orchestrator, params, iter, xap1); - let (nt, result) = collect_arbitrary::x(xap, pinned_vec); + let (nt, result) = collect_arbitrary::x(orchestrator, params, iter, xap1, pinned_vec); (nt, result.into_result()) } (false, IterationOrder::Ordered) => { diff --git a/src/computational_variants/mod.rs b/src/computational_variants/mod.rs index 89df0fa..2593058 100644 --- a/src/computational_variants/mod.rs +++ b/src/computational_variants/mod.rs @@ -1,6 +1,7 @@ #[cfg(test)] mod tests; +mod computations; /// A parallel iterator for which the computation either completely succeeds, /// or fails and **early exits** with None. pub mod fallible_option; diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index 0e4adab..101fdb5 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -59,8 +59,14 @@ where match (self.params.is_sequential(), self.params.iteration_order) { (true, _) => (0, self.seq_collect_into(pinned_vec)), (false, IterationOrder::Arbitrary) => { - let (num_threads, result) = - parallel_runner_compute::collect_arbitrary::x(self, pinned_vec); + let (orchestrator, params, iter, x1) = self.destruct(); + let (num_threads, result) = parallel_runner_compute::collect_arbitrary::x( + orchestrator, + params, + iter, + x1, + pinned_vec, + ); let pinned_vec = match result { ParallelCollectArbitrary::AllCollected { pinned_vec } => pinned_vec, ParallelCollectArbitrary::StoppedByWhileCondition { pinned_vec } => pinned_vec, diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 5930b6f..d049de6 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -1,6 +1,6 @@ +use crate::Params; #[cfg(test)] use crate::computational_variants::ParMap; -use crate::computational_variants::ParXap; use crate::generic_values::Values; use crate::generic_values::runner_results::{ParallelCollectArbitrary, ThreadCollectArbitrary}; use crate::runner::{ComputationKind, thread_runner_compute as thread}; @@ -59,8 +59,11 @@ where // x -pub fn x( - x: ParXap, +pub fn x( + orchestrator: C, + params: Params, + iter: I, + xap1: X1, pinned_vec: P, ) -> (usize, ParallelCollectArbitrary) where @@ -68,13 +71,12 @@ where I: ConcurrentIter, Vo: Values, Vo::Item: Send, - M1: Fn(I::Item) -> Vo + Sync, + X1: Fn(I::Item) -> Vo + Sync, P: IntoConcurrentPinnedVec, { let capacity_bound = pinned_vec.capacity_bound(); let offset = pinned_vec.len(); - let (orchestrator, params, iter, xap1) = x.destruct(); let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let mut bag: ConcurrentBag = pinned_vec.into(); From 6b6c4406faa82cb8f4358002153e017d48c6b5c5 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 14:48:14 +0200 Subject: [PATCH 067/264] flatten collect arbitrary m --- src/computational_variants/map.rs | 9 ++++++++- .../parallel_runner_compute/collect_arbitrary.rs | 11 +++++++---- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index 3057ea6..ebef31c 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -53,7 +53,14 @@ where (true, _) => (0, self.seq_collect_into(pinned_vec)), #[cfg(test)] (false, IterationOrder::Arbitrary) => { - parallel_runner_compute::collect_arbitrary::m(self, pinned_vec) + let (orchestrator, params, iter, m1) = self.destruct(); + parallel_runner_compute::collect_arbitrary::m( + orchestrator, + params, + iter, + m1, + pinned_vec, + ) } (false, _) => parallel_runner_compute::collect_ordered::m(self, pinned_vec), } diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index d049de6..17ca503 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -1,6 +1,4 @@ use crate::Params; -#[cfg(test)] -use crate::computational_variants::ParMap; use crate::generic_values::Values; use crate::generic_values::runner_results::{ParallelCollectArbitrary, ThreadCollectArbitrary}; use crate::runner::{ComputationKind, thread_runner_compute as thread}; @@ -12,7 +10,13 @@ use orx_fixed_vec::IntoConcurrentPinnedVec; // m #[cfg(test)] -pub fn m(m: ParMap, pinned_vec: P) -> (usize, P) +pub fn m( + orchestrator: C, + params: Params, + iter: I, + map1: M1, + pinned_vec: P, +) -> (usize, P) where C: Orchestrator, I: ConcurrentIter, @@ -24,7 +28,6 @@ where let capacity_bound = pinned_vec.capacity_bound(); let offset = pinned_vec.len(); - let (orchestrator, params, iter, map1) = m.destruct(); let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let mut bag: ConcurrentBag = pinned_vec.into(); From 3cd84f193cfe652d464579083ff249624fdf193f Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 14:50:01 +0200 Subject: [PATCH 068/264] flatten collect ordered --- .../fallible_result/computations/collect.rs | 4 +--- src/computational_variants/map.rs | 11 +++++++++- src/computational_variants/xap.rs | 10 ++++++++-- .../collect_ordered.rs | 20 ++++++++++++++----- 4 files changed, 34 insertions(+), 11 deletions(-) diff --git a/src/computational_variants/fallible_result/computations/collect.rs b/src/computational_variants/fallible_result/computations/collect.rs index 962ce72..8414230 100644 --- a/src/computational_variants/fallible_result/computations/collect.rs +++ b/src/computational_variants/fallible_result/computations/collect.rs @@ -1,5 +1,4 @@ use crate::Params; -use crate::computational_variants::ParXap; use crate::generic_values::runner_results::{Fallibility, Stop}; use crate::orch::Orchestrator; use crate::runner::parallel_runner_compute::{collect_arbitrary, collect_ordered}; @@ -29,8 +28,7 @@ where (nt, result.into_result()) } (false, IterationOrder::Ordered) => { - let xap = ParXap::new(orchestrator, params, iter, xap1); - let (nt, result) = collect_ordered::x(xap, pinned_vec); + let (nt, result) = collect_ordered::x(orchestrator, params, iter, xap1, pinned_vec); (nt, result.into_result()) } } diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index ebef31c..e3eeb3f 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -62,7 +62,16 @@ where pinned_vec, ) } - (false, _) => parallel_runner_compute::collect_ordered::m(self, pinned_vec), + (false, _) => { + let (orchestrator, params, iter, m1) = self.destruct(); + parallel_runner_compute::collect_ordered::m( + orchestrator, + params, + iter, + m1, + pinned_vec, + ) + } } } diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index 101fdb5..a848dcd 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -74,8 +74,14 @@ where (num_threads, pinned_vec) } (false, IterationOrder::Ordered) => { - let (num_threads, result) = - parallel_runner_compute::collect_ordered::x(self, pinned_vec); + let (orchestrator, params, iter, x1) = self.destruct(); + let (num_threads, result) = parallel_runner_compute::collect_ordered::x( + orchestrator, + params, + iter, + x1, + pinned_vec, + ); let pinned_vec = match result { ParallelCollect::AllCollected { pinned_vec } => pinned_vec, ParallelCollect::StoppedByWhileCondition { diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index 49ce3ba..87e3b30 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -1,4 +1,4 @@ -use crate::computational_variants::{ParMap, ParXap}; +use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect, ThreadCollect}; use crate::orch::Orchestrator; @@ -10,7 +10,13 @@ use orx_fixed_vec::IntoConcurrentPinnedVec; // m -pub fn m(m: ParMap, pinned_vec: P) -> (usize, P) +pub fn m( + orchestrator: C, + params: Params, + iter: I, + map1: M1, + pinned_vec: P, +) -> (usize, P) where C: Orchestrator, I: ConcurrentIter, @@ -19,7 +25,6 @@ where P: IntoConcurrentPinnedVec, { let offset = pinned_vec.len(); - let (orchestrator, params, iter, map1) = m.destruct(); let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let o_bag: ConcurrentOrderedBag = pinned_vec.into(); @@ -51,7 +56,13 @@ where // x -pub fn x(x: ParXap, pinned_vec: P) -> (usize, ParallelCollect) +pub fn x( + orchestrator: C, + params: Params, + iter: I, + xap1: X1, + pinned_vec: P, +) -> (usize, ParallelCollect) where C: Orchestrator, I: ConcurrentIter, @@ -61,7 +72,6 @@ where X1: Fn(I::Item) -> Vo + Sync, P: IntoConcurrentPinnedVec, { - let (orchestrator, params, iter, xap1) = x.destruct(); let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); // compute From 8989a184e1f74fdefa6e0c469b757c70d9e0b8ab Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 16:09:05 +0200 Subject: [PATCH 069/264] further flatten computations --- .../fallible_result/map_result.rs | 11 +++++----- .../fallible_result/par_result.rs | 11 +++++----- .../fallible_result/xap_result.rs | 9 ++++---- src/computational_variants/map.rs | 14 +++++++++---- src/computational_variants/par.rs | 14 +++++++++---- src/computational_variants/tests/map/find.rs | 11 +++++----- .../tests/map/reduce.rs | 21 +++++++++++++------ src/computational_variants/xap.rs | 13 ++++++++---- src/runner/parallel_runner_compute/next.rs | 14 +++++++------ .../parallel_runner_compute/next_any.rs | 14 +++++++------ src/runner/parallel_runner_compute/reduce.rs | 21 +++++++++++++------ 11 files changed, 94 insertions(+), 59 deletions(-) diff --git a/src/computational_variants/fallible_result/map_result.rs b/src/computational_variants/fallible_result/map_result.rs index 3e519db..587bc3f 100644 --- a/src/computational_variants/fallible_result/map_result.rs +++ b/src/computational_variants/fallible_result/map_result.rs @@ -1,4 +1,4 @@ -use crate::computational_variants::{ParMap, ParXap}; +use crate::computational_variants::ParMap; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::runner::parallel_runner_compute; @@ -100,8 +100,7 @@ where { let (orchestrator, params, iter, m1) = self.par.destruct(); let x1 = |i: I::Item| m1(i).into_result(); - let x = ParXap::new(orchestrator, params, iter, x1); - parallel_runner_compute::reduce::x(x, reduce).1 + parallel_runner_compute::reduce::x(orchestrator, params, iter, x1, reduce).1 } // early exit @@ -113,14 +112,14 @@ where { let (orchestrator, params, iter, m1) = self.par.destruct(); let x1 = |i: I::Item| m1(i).into_result(); - let x = ParXap::new(orchestrator, params, iter, x1); match params.iteration_order { IterationOrder::Ordered => { - let (_, result) = parallel_runner_compute::next::x(x); + let (_, result) = parallel_runner_compute::next::x(orchestrator, params, iter, x1); result.map(|x| x.map(|y| y.1)) } IterationOrder::Arbitrary => { - let (_, result) = parallel_runner_compute::next_any::x(x); + let (_, result) = + parallel_runner_compute::next_any::x(orchestrator, params, iter, x1); result } } diff --git a/src/computational_variants/fallible_result/par_result.rs b/src/computational_variants/fallible_result/par_result.rs index 7961143..e1a5349 100644 --- a/src/computational_variants/fallible_result/par_result.rs +++ b/src/computational_variants/fallible_result/par_result.rs @@ -1,4 +1,4 @@ -use crate::computational_variants::{Par, ParXap}; +use crate::computational_variants::Par; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::runner::parallel_runner_compute; @@ -97,8 +97,7 @@ where { let (orchestrator, params, iter) = self.par.destruct(); let x1 = |i: I::Item| i.into_result(); - let x = ParXap::new(orchestrator, params, iter, x1); - parallel_runner_compute::reduce::x(x, reduce).1 + parallel_runner_compute::reduce::x(orchestrator, params, iter, x1, reduce).1 } // early exit @@ -110,14 +109,14 @@ where { let (orchestrator, params, iter) = self.par.destruct(); let x1 = |i: I::Item| i.into_result(); - let x = ParXap::new(orchestrator, params, iter, x1); match params.iteration_order { IterationOrder::Ordered => { - let (_, result) = parallel_runner_compute::next::x(x); + let (_, result) = parallel_runner_compute::next::x(orchestrator, params, iter, x1); result.map(|x| x.map(|y| y.1)) } IterationOrder::Arbitrary => { - let (_, result) = parallel_runner_compute::next_any::x(x); + let (_, result) = + parallel_runner_compute::next_any::x(orchestrator, params, iter, x1); result } } diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index 5686670..da7b3bb 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -111,8 +111,7 @@ where { let (orchestrator, params, iter, x1) = self.destruct(); let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); - let x = ParXap::new(orchestrator, params, iter, x1); - parallel_runner_compute::reduce::x(x, reduce).1 + parallel_runner_compute::reduce::x(orchestrator, params, iter, x1, reduce).1 } // early exit @@ -124,14 +123,14 @@ where { let (orchestrator, params, iter, x1) = self.destruct(); let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); - let x = ParXap::new(orchestrator, params, iter, x1); match params.iteration_order { IterationOrder::Ordered => { - let (_, result) = parallel_runner_compute::next::x(x); + let (_, result) = parallel_runner_compute::next::x(orchestrator, params, iter, x1); result.map(|x| x.map(|y| y.1)) } IterationOrder::Arbitrary => { - let (_, result) = parallel_runner_compute::next_any::x(x); + let (_, result) = + parallel_runner_compute::next_any::x(orchestrator, params, iter, x1); result } } diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index e3eeb3f..e3e8b7b 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -248,7 +248,8 @@ where Self::Item: Send, Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { - parallel_runner_compute::reduce::m(self, reduce).1 + let (orchestrator, params, iter, m1) = self.destruct(); + parallel_runner_compute::reduce::m(orchestrator, params, iter, m1, reduce).1 } // early exit @@ -257,9 +258,14 @@ where where Self::Item: Send, { - match self.params.iteration_order { - IterationOrder::Ordered => parallel_runner_compute::next::m(self).1, - IterationOrder::Arbitrary => parallel_runner_compute::next_any::m(self).1, + let (orchestrator, params, iter, m1) = self.destruct(); + match params.iteration_order { + IterationOrder::Ordered => { + parallel_runner_compute::next::m(orchestrator, params, iter, m1).1 + } + IterationOrder::Arbitrary => { + parallel_runner_compute::next_any::m(orchestrator, params, iter, m1).1 + } } } } diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index a2e32a5..3166257 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -194,15 +194,21 @@ where Self::Item: Send, Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { - parallel_runner_compute::reduce::m(self.into_map(), reduce).1 + let (orchestrator, params, iter) = self.destruct(); + parallel_runner_compute::reduce::m(orchestrator, params, iter, map_self, reduce).1 } // early exit fn first(self) -> Option { - match self.params().iteration_order { - IterationOrder::Ordered => parallel_runner_compute::next::m(self.into_map()).1, - IterationOrder::Arbitrary => parallel_runner_compute::next_any::m(self.into_map()).1, + let (orchestrator, params, iter) = self.destruct(); + match params.iteration_order { + IterationOrder::Ordered => { + parallel_runner_compute::next::m(orchestrator, params, iter, map_self).1 + } + IterationOrder::Arbitrary => { + parallel_runner_compute::next_any::m(orchestrator, params, iter, map_self).1 + } } } } diff --git a/src/computational_variants/tests/map/find.rs b/src/computational_variants/tests/map/find.rs index a727b4e..138f9e0 100644 --- a/src/computational_variants/tests/map/find.rs +++ b/src/computational_variants/tests/map/find.rs @@ -1,6 +1,5 @@ use crate::{ - Params, computational_variants::ParMap, computations::map_self, orch::DefaultOrchestrator, - runner::parallel_runner_compute, + Params, computations::map_self, orch::DefaultOrchestrator, runner::parallel_runner_compute, }; use orx_concurrent_iter::IntoConcurrentIter; use test_case::test_matrix; @@ -22,9 +21,9 @@ fn m_find(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map_self); - let output = parallel_runner_compute::next::m(m).1; + let output = + parallel_runner_compute::next::m(DefaultOrchestrator::default(), params, iter, map_self).1; assert_eq!(expected, output); } @@ -41,8 +40,8 @@ fn m_map_find(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map); - let output = parallel_runner_compute::next::m(m).1; + let output = + parallel_runner_compute::next::m(DefaultOrchestrator::default(), params, iter, map).1; assert_eq!(expected, output); } diff --git a/src/computational_variants/tests/map/reduce.rs b/src/computational_variants/tests/map/reduce.rs index 266f3ca..6c71faa 100644 --- a/src/computational_variants/tests/map/reduce.rs +++ b/src/computational_variants/tests/map/reduce.rs @@ -1,6 +1,5 @@ use crate::{ - Params, computational_variants::ParMap, computations::map_self, orch::DefaultOrchestrator, - runner::parallel_runner_compute, + Params, computations::map_self, orch::DefaultOrchestrator, runner::parallel_runner_compute, }; use orx_concurrent_iter::IntoConcurrentIter; use test_case::test_matrix; @@ -26,8 +25,13 @@ fn m_reduce(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map_self); - let (_, output) = parallel_runner_compute::reduce::m(m, reduce); + let (_, output) = parallel_runner_compute::reduce::m( + DefaultOrchestrator::default(), + params, + iter, + map_self, + reduce, + ); assert_eq!(expected, output); } @@ -49,8 +53,13 @@ fn m_map_reduce(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map); - let (_, output) = parallel_runner_compute::reduce::m(m, reduce); + let (_, output) = parallel_runner_compute::reduce::m( + DefaultOrchestrator::default(), + params, + iter, + map, + reduce, + ); assert_eq!(expected, output); } diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index a848dcd..f1b0dfc 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -287,7 +287,9 @@ where Self::Item: Send, Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { - let (_, Ok(acc)) = parallel_runner_compute::reduce::x(self, reduce); + let (orchestrator, params, iter, x1) = self.destruct(); + let (_, Ok(acc)) = + parallel_runner_compute::reduce::x(orchestrator, params, iter, x1, reduce); acc } @@ -297,13 +299,16 @@ where where Self::Item: Send, { - match self.params.iteration_order { + let (orchestrator, params, iter, x1) = self.destruct(); + match params.iteration_order { IterationOrder::Ordered => { - let (_num_threads, Ok(result)) = parallel_runner_compute::next::x(self); + let (_num_threads, Ok(result)) = + parallel_runner_compute::next::x(orchestrator, params, iter, x1); result.map(|x| x.1) } IterationOrder::Arbitrary => { - let (_num_threads, Ok(result)) = parallel_runner_compute::next_any::x(self); + let (_num_threads, Ok(result)) = + parallel_runner_compute::next_any::x(orchestrator, params, iter, x1); result } } diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index 30f28d4..4a8dcf4 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -1,19 +1,17 @@ -use crate::ParallelRunner; -use crate::computational_variants::{ParMap, ParXap}; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; use crate::orch::Orchestrator; use crate::runner::{ComputationKind, thread_runner_compute as thread}; +use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; -pub fn m(m: ParMap) -> (usize, Option) +pub fn m(orchestrator: C, params: Params, iter: I, map1: M1) -> (usize, Option) where C: Orchestrator, I: ConcurrentIter, O: Send, M1: Fn(I::Item) -> O + Sync, { - let (orchestrator, params, iter, map1) = m.destruct(); let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); @@ -54,7 +52,12 @@ type ResultNext = Result< <::Fallibility as Fallibility>::Error, >; -pub fn x(x: ParXap) -> (usize, ResultNext) +pub fn x( + orchestrator: C, + params: Params, + iter: I, + xap1: X1, +) -> (usize, ResultNext) where C: Orchestrator, I: ConcurrentIter, @@ -62,7 +65,6 @@ where Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, { - let (orchestrator, params, iter, xap1) = x.destruct(); let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index d8c4bc6..d5ae422 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -1,19 +1,17 @@ -use crate::ParallelRunner; -use crate::computational_variants::{ParMap, ParXap}; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; use crate::orch::Orchestrator; use crate::runner::{ComputationKind, thread_runner_compute as thread}; +use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; -pub fn m(m: ParMap) -> (usize, Option) +pub fn m(orchestrator: C, params: Params, iter: I, map1: M1) -> (usize, Option) where C: Orchestrator, I: ConcurrentIter, O: Send, M1: Fn(I::Item) -> O + Sync, { - let (orchestrator, params, iter, map1) = m.destruct(); let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); @@ -47,7 +45,12 @@ where type ResultNextAny = Result::Item>, <::Fallibility as Fallibility>::Error>; -pub fn x(x: ParXap) -> (usize, ResultNextAny) +pub fn x( + orchestrator: C, + params: Params, + iter: I, + xap1: X1, +) -> (usize, ResultNextAny) where C: Orchestrator, I: ConcurrentIter, @@ -55,7 +58,6 @@ where Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, { - let (orchestrator, params, iter, xap1) = x.destruct(); let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index 949fe28..d89ce26 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -1,14 +1,19 @@ -use crate::ParallelRunner; -use crate::computational_variants::{ParMap, ParXap}; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Reduce}; use crate::orch::Orchestrator; use crate::runner::{ComputationKind, thread_runner_compute as thread}; +use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; // m -pub fn m(m: ParMap, reduce: Red) -> (usize, Option) +pub fn m( + orchestrator: C, + params: Params, + iter: I, + map1: M1, + reduce: Red, +) -> (usize, Option) where C: Orchestrator, I: ConcurrentIter, @@ -16,7 +21,6 @@ where Red: Fn(O, O) -> O + Sync, O: Send, { - let (orchestrator, params, iter, map1) = m.destruct(); let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); @@ -58,7 +62,13 @@ where type ResultReduce = Result::Item>, <::Fallibility as Fallibility>::Error>; -pub fn x(x: ParXap, reduce: Red) -> (usize, ResultReduce) +pub fn x( + orchestrator: C, + params: Params, + iter: I, + xap1: X1, + reduce: Red, +) -> (usize, ResultReduce) where C: Orchestrator, I: ConcurrentIter, @@ -67,7 +77,6 @@ where X1: Fn(I::Item) -> Vo + Sync, Red: Fn(Vo::Item, Vo::Item) -> Vo::Item + Sync, { - let (orchestrator, params, iter, xap1) = x.destruct(); let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); From 86e111990a1d887eb9c5221d073f91a9b61f0084 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 16:10:30 +0200 Subject: [PATCH 070/264] module reorganization --- src/collect_into/split_vec.rs | 2 +- .../computations/collect.rs | 55 ++++++++++++++++ .../computations/mod.rs | 2 + .../fallible_result/computations/collect.rs | 62 ------------------- .../fallible_result/computations/mod.rs | 3 - .../fallible_result/mod.rs | 1 - src/computational_variants/mod.rs | 2 +- 7 files changed, 59 insertions(+), 68 deletions(-) delete mode 100644 src/computational_variants/fallible_result/computations/collect.rs delete mode 100644 src/computational_variants/fallible_result/computations/mod.rs diff --git a/src/collect_into/split_vec.rs b/src/collect_into/split_vec.rs index c862eb2..d95c660 100644 --- a/src/collect_into/split_vec.rs +++ b/src/collect_into/split_vec.rs @@ -1,6 +1,6 @@ use super::par_collect_into::ParCollectIntoCore; use crate::collect_into::utils::split_vec_reserve; -use crate::computational_variants::fallible_result::computations::try_collect_into; +use crate::computational_variants::computations::try_collect_into; use crate::computational_variants::{ParMap, ParXap}; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; diff --git a/src/computational_variants/computations/collect.rs b/src/computational_variants/computations/collect.rs index 1ce5d29..5c97183 100644 --- a/src/computational_variants/computations/collect.rs +++ b/src/computational_variants/computations/collect.rs @@ -69,3 +69,58 @@ where pinned_vec } + +pub fn try_collect_into( + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + pinned_vec: P, +) -> (usize, Result::Error>) +where + R: Orchestrator, + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + X1: Fn(I::Item) -> Vo + Sync, + P: IntoConcurrentPinnedVec, +{ + match (params.is_sequential(), params.iteration_order) { + (true, _) => (0, try_sequential(iter, xap1, pinned_vec)), + (false, IterationOrder::Arbitrary) => { + let (nt, result) = collect_arbitrary::x(orchestrator, params, iter, xap1, pinned_vec); + (nt, result.into_result()) + } + (false, IterationOrder::Ordered) => { + let (nt, result) = collect_ordered::x(orchestrator, params, iter, xap1, pinned_vec); + (nt, result.into_result()) + } + } +} + +fn try_sequential( + iter: I, + xap1: X1, + mut pinned_vec: P, +) -> Result::Error> +where + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + X1: Fn(I::Item) -> Vo + Sync, + P: IntoConcurrentPinnedVec, +{ + let iter = iter.into_seq_iter(); + for i in iter { + let vt = xap1(i); + let done = vt.push_to_pinned_vec(&mut pinned_vec); + if let Some(stop) = Vo::sequential_push_to_stop(done) { + match stop { + Stop::DueToWhile => return Ok(pinned_vec), + Stop::DueToError { error } => return Err(error), + } + } + } + + Ok(pinned_vec) +} diff --git a/src/computational_variants/computations/mod.rs b/src/computational_variants/computations/mod.rs index 10b2a49..1581c9f 100644 --- a/src/computational_variants/computations/mod.rs +++ b/src/computational_variants/computations/mod.rs @@ -1 +1,3 @@ mod collect; + +pub use collect::try_collect_into; diff --git a/src/computational_variants/fallible_result/computations/collect.rs b/src/computational_variants/fallible_result/computations/collect.rs deleted file mode 100644 index 8414230..0000000 --- a/src/computational_variants/fallible_result/computations/collect.rs +++ /dev/null @@ -1,62 +0,0 @@ -use crate::Params; -use crate::generic_values::runner_results::{Fallibility, Stop}; -use crate::orch::Orchestrator; -use crate::runner::parallel_runner_compute::{collect_arbitrary, collect_ordered}; -use crate::{IterationOrder, generic_values::Values}; -use orx_concurrent_iter::ConcurrentIter; -use orx_fixed_vec::IntoConcurrentPinnedVec; - -pub fn try_collect_into( - orchestrator: R, - params: Params, - iter: I, - xap1: X1, - pinned_vec: P, -) -> (usize, Result::Error>) -where - R: Orchestrator, - I: ConcurrentIter, - Vo: Values, - Vo::Item: Send, - X1: Fn(I::Item) -> Vo + Sync, - P: IntoConcurrentPinnedVec, -{ - match (params.is_sequential(), params.iteration_order) { - (true, _) => (0, try_sequential(iter, xap1, pinned_vec)), - (false, IterationOrder::Arbitrary) => { - let (nt, result) = collect_arbitrary::x(orchestrator, params, iter, xap1, pinned_vec); - (nt, result.into_result()) - } - (false, IterationOrder::Ordered) => { - let (nt, result) = collect_ordered::x(orchestrator, params, iter, xap1, pinned_vec); - (nt, result.into_result()) - } - } -} - -fn try_sequential( - iter: I, - xap1: X1, - mut pinned_vec: P, -) -> Result::Error> -where - I: ConcurrentIter, - Vo: Values, - Vo::Item: Send, - X1: Fn(I::Item) -> Vo + Sync, - P: IntoConcurrentPinnedVec, -{ - let iter = iter.into_seq_iter(); - for i in iter { - let vt = xap1(i); - let done = vt.push_to_pinned_vec(&mut pinned_vec); - if let Some(stop) = Vo::sequential_push_to_stop(done) { - match stop { - Stop::DueToWhile => return Ok(pinned_vec), - Stop::DueToError { error } => return Err(error), - } - } - } - - Ok(pinned_vec) -} diff --git a/src/computational_variants/fallible_result/computations/mod.rs b/src/computational_variants/fallible_result/computations/mod.rs deleted file mode 100644 index 1581c9f..0000000 --- a/src/computational_variants/fallible_result/computations/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod collect; - -pub use collect::try_collect_into; diff --git a/src/computational_variants/fallible_result/mod.rs b/src/computational_variants/fallible_result/mod.rs index d2c9286..6197ee5 100644 --- a/src/computational_variants/fallible_result/mod.rs +++ b/src/computational_variants/fallible_result/mod.rs @@ -1,4 +1,3 @@ -pub(crate) mod computations; mod map_result; mod par_result; mod xap_result; diff --git a/src/computational_variants/mod.rs b/src/computational_variants/mod.rs index 2593058..8367340 100644 --- a/src/computational_variants/mod.rs +++ b/src/computational_variants/mod.rs @@ -1,7 +1,7 @@ #[cfg(test)] mod tests; -mod computations; +pub(crate) mod computations; /// A parallel iterator for which the computation either completely succeeds, /// or fails and **early exits** with None. pub mod fallible_option; From 6f7ab8c41a250d429edd8e0eba215ef2c09374b5 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 16:12:47 +0200 Subject: [PATCH 071/264] minor --- .../computations/collect.rs | 111 +++++++++++------- .../computations/mod.rs | 2 +- 2 files changed, 67 insertions(+), 46 deletions(-) diff --git a/src/computational_variants/computations/collect.rs b/src/computational_variants/computations/collect.rs index 5c97183..8c87e01 100644 --- a/src/computational_variants/computations/collect.rs +++ b/src/computational_variants/computations/collect.rs @@ -1,54 +1,63 @@ use crate::Params; -use crate::computational_variants::ParXap; use crate::generic_values::runner_results::{ - Fallibility, ParallelCollect, ParallelCollectArbitrary, Stop, + Fallibility, Infallible, ParallelCollect, ParallelCollectArbitrary, Stop, }; use crate::orch::Orchestrator; -use crate::runner::parallel_runner_compute::{self, collect_arbitrary, collect_ordered}; +use crate::runner::parallel_runner_compute; use crate::{IterationOrder, generic_values::Values}; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; -// pub fn collect_into( -// orchestrator: R, -// params: Params, -// iter: I, -// xap1: X1, -// pinned_vec: P, -// ) -> (usize, P) -// where -// R: Orchestrator, -// I: ConcurrentIter, -// Vo: Values, -// Vo::Item: Send, -// X1: Fn(I::Item) -> Vo + Sync, -// P: IntoConcurrentPinnedVec, -// { -// match (params.is_sequential(), params.iteration_order) { -// (true, _) => (0, sequential(iter, xap1, pinned_vec)), -// (false, IterationOrder::Arbitrary) => { -// let (num_threads, result) = -// parallel_runner_compute::collect_arbitrary::x(self, pinned_vec); -// let pinned_vec = match result { -// ParallelCollectArbitrary::AllCollected { pinned_vec } => pinned_vec, -// ParallelCollectArbitrary::StoppedByWhileCondition { pinned_vec } => pinned_vec, -// }; -// (num_threads, pinned_vec) -// } -// (false, IterationOrder::Ordered) => { -// let (num_threads, result) = -// parallel_runner_compute::collect_ordered::x(self, pinned_vec); -// let pinned_vec = match result { -// ParallelCollect::AllCollected { pinned_vec } => pinned_vec, -// ParallelCollect::StoppedByWhileCondition { -// pinned_vec, -// stopped_idx: _, -// } => pinned_vec, -// }; -// (num_threads, pinned_vec) -// } -// } -// } +pub fn collect_into( + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + pinned_vec: P, +) -> (usize, P) +where + R: Orchestrator, + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + X1: Fn(I::Item) -> Vo + Sync, + P: IntoConcurrentPinnedVec, +{ + match (params.is_sequential(), params.iteration_order) { + (true, _) => (0, sequential(iter, xap1, pinned_vec)), + (false, IterationOrder::Arbitrary) => { + let (num_threads, result) = parallel_runner_compute::collect_arbitrary::x( + orchestrator, + params, + iter, + xap1, + pinned_vec, + ); + let pinned_vec = match result { + ParallelCollectArbitrary::AllCollected { pinned_vec } => pinned_vec, + ParallelCollectArbitrary::StoppedByWhileCondition { pinned_vec } => pinned_vec, + }; + (num_threads, pinned_vec) + } + (false, IterationOrder::Ordered) => { + let (num_threads, result) = parallel_runner_compute::collect_ordered::x( + orchestrator, + params, + iter, + xap1, + pinned_vec, + ); + let pinned_vec = match result { + ParallelCollect::AllCollected { pinned_vec } => pinned_vec, + ParallelCollect::StoppedByWhileCondition { + pinned_vec, + stopped_idx: _, + } => pinned_vec, + }; + (num_threads, pinned_vec) + } + } +} fn sequential(iter: I, xap1: X1, mut pinned_vec: P) -> P where @@ -88,11 +97,23 @@ where match (params.is_sequential(), params.iteration_order) { (true, _) => (0, try_sequential(iter, xap1, pinned_vec)), (false, IterationOrder::Arbitrary) => { - let (nt, result) = collect_arbitrary::x(orchestrator, params, iter, xap1, pinned_vec); + let (nt, result) = parallel_runner_compute::collect_arbitrary::x( + orchestrator, + params, + iter, + xap1, + pinned_vec, + ); (nt, result.into_result()) } (false, IterationOrder::Ordered) => { - let (nt, result) = collect_ordered::x(orchestrator, params, iter, xap1, pinned_vec); + let (nt, result) = parallel_runner_compute::collect_ordered::x( + orchestrator, + params, + iter, + xap1, + pinned_vec, + ); (nt, result.into_result()) } } diff --git a/src/computational_variants/computations/mod.rs b/src/computational_variants/computations/mod.rs index 1581c9f..e978123 100644 --- a/src/computational_variants/computations/mod.rs +++ b/src/computational_variants/computations/mod.rs @@ -1,3 +1,3 @@ mod collect; -pub use collect::try_collect_into; +pub use collect::{collect_into, try_collect_into}; From 410840ea7db207aca5b0d9b306d7535373c46516 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 16:15:40 +0200 Subject: [PATCH 072/264] flatten x collection for infallible --- src/collect_into/fixed_vec.rs | 12 +++-- src/collect_into/par_collect_into.rs | 10 +++- src/collect_into/split_vec.rs | 20 ++++---- src/collect_into/vec.rs | 10 +++- src/computational_variants/xap.rs | 70 ++-------------------------- 5 files changed, 39 insertions(+), 83 deletions(-) diff --git a/src/collect_into/fixed_vec.rs b/src/collect_into/fixed_vec.rs index 9629eed..bda5f2e 100644 --- a/src/collect_into/fixed_vec.rs +++ b/src/collect_into/fixed_vec.rs @@ -1,6 +1,6 @@ use super::par_collect_into::ParCollectIntoCore; use crate::Params; -use crate::computational_variants::{ParMap, ParXap}; +use crate::computational_variants::ParMap; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; @@ -31,7 +31,13 @@ where FixedVec::from(vec.m_collect_into(m)) } - fn x_collect_into(self, x: ParXap) -> Self + fn x_collect_into( + self, + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + ) -> Self where R: Orchestrator, I: ConcurrentIter, @@ -39,7 +45,7 @@ where X1: Fn(I::Item) -> Vo + Sync, { let vec = Vec::from(self); - FixedVec::from(vec.x_collect_into(x)) + FixedVec::from(vec.x_collect_into(orchestrator, params, iter, xap1)) } fn x_try_collect_into( diff --git a/src/collect_into/par_collect_into.rs b/src/collect_into/par_collect_into.rs index 34c69b9..7d88bac 100644 --- a/src/collect_into/par_collect_into.rs +++ b/src/collect_into/par_collect_into.rs @@ -1,5 +1,5 @@ use crate::Params; -use crate::computational_variants::{ParMap, ParXap}; +use crate::computational_variants::ParMap; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; @@ -19,7 +19,13 @@ pub trait ParCollectIntoCore: Collection { I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync; - fn x_collect_into(self, x: ParXap) -> Self + fn x_collect_into( + self, + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + ) -> Self where R: Orchestrator, I: ConcurrentIter, diff --git a/src/collect_into/split_vec.rs b/src/collect_into/split_vec.rs index d95c660..427ece7 100644 --- a/src/collect_into/split_vec.rs +++ b/src/collect_into/split_vec.rs @@ -1,7 +1,7 @@ use super::par_collect_into::ParCollectIntoCore; use crate::collect_into::utils::split_vec_reserve; -use crate::computational_variants::computations::try_collect_into; -use crate::computational_variants::{ParMap, ParXap}; +use crate::computational_variants::ParMap; +use crate::computational_variants::computations::{collect_into, try_collect_into}; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; @@ -41,19 +41,21 @@ where pinned_vec } - fn x_collect_into(mut self, x: ParXap) -> Self + fn x_collect_into( + mut self, + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + ) -> Self where R: Orchestrator, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(I::Item) -> Vo + Sync, { - split_vec_reserve( - &mut self, - x.params().is_sequential(), - x.con_iter().try_get_len(), - ); - let (_num_spawned, pinned_vec) = x.par_collect_into(self); + split_vec_reserve(&mut self, params.is_sequential(), iter.try_get_len()); + let (_num_spawned, pinned_vec) = collect_into(orchestrator, params, iter, xap1, self); pinned_vec } diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index 4dce1aa..dfb9764 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -44,7 +44,13 @@ where } } - fn x_collect_into(self, x: ParXap) -> Self + fn x_collect_into( + self, + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + ) -> Self where R: Orchestrator, I: ConcurrentIter, @@ -52,7 +58,7 @@ where X1: Fn(I::Item) -> Vo + Sync, { let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let split_vec = split_vec.x_collect_into(x); + let split_vec = split_vec.x_collect_into(orchestrator, params, iter, xap1); extend_vec_from_split(self, split_vec) } diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index f1b0dfc..a6f9e94 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -1,8 +1,6 @@ use crate::ParIterResult; use crate::computational_variants::fallible_result::ParXapResult; -use crate::generic_values::runner_results::{ - Infallible, ParallelCollect, ParallelCollectArbitrary, -}; +use crate::generic_values::runner_results::Infallible; use crate::generic_values::{TransformableValues, Values}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; @@ -12,7 +10,6 @@ use crate::{ using::{UsingClone, UsingFun, computational_variants::UParXap}, }; use orx_concurrent_iter::ConcurrentIter; -use orx_fixed_vec::IntoConcurrentPinnedVec; /// A parallel iterator that xaps inputs. /// @@ -49,68 +46,6 @@ where pub(crate) fn destruct(self) -> (R, Params, I, X1) { (self.orchestrator, self.params, self.iter, self.xap1) } - - pub(crate) fn par_collect_into

(self, pinned_vec: P) -> (usize, P) - where - P: IntoConcurrentPinnedVec, - Vo: TransformableValues, - Vo::Item: Send, - { - match (self.params.is_sequential(), self.params.iteration_order) { - (true, _) => (0, self.seq_collect_into(pinned_vec)), - (false, IterationOrder::Arbitrary) => { - let (orchestrator, params, iter, x1) = self.destruct(); - let (num_threads, result) = parallel_runner_compute::collect_arbitrary::x( - orchestrator, - params, - iter, - x1, - pinned_vec, - ); - let pinned_vec = match result { - ParallelCollectArbitrary::AllCollected { pinned_vec } => pinned_vec, - ParallelCollectArbitrary::StoppedByWhileCondition { pinned_vec } => pinned_vec, - }; - (num_threads, pinned_vec) - } - (false, IterationOrder::Ordered) => { - let (orchestrator, params, iter, x1) = self.destruct(); - let (num_threads, result) = parallel_runner_compute::collect_ordered::x( - orchestrator, - params, - iter, - x1, - pinned_vec, - ); - let pinned_vec = match result { - ParallelCollect::AllCollected { pinned_vec } => pinned_vec, - ParallelCollect::StoppedByWhileCondition { - pinned_vec, - stopped_idx: _, - } => pinned_vec, - }; - (num_threads, pinned_vec) - } - } - } - - pub(crate) fn seq_collect_into

(self, mut pinned_vec: P) -> P - where - P: IntoConcurrentPinnedVec, - { - let (_, _, iter, xap1) = self.destruct(); - - let iter = iter.into_seq_iter(); - for i in iter { - let vt = xap1(i); - let done = vt.push_to_pinned_vec(&mut pinned_vec); - if Vo::sequential_push_to_stop(done).is_some() { - break; - } - } - - pinned_vec - } } unsafe impl Send for ParXap @@ -277,7 +212,8 @@ where where C: ParCollectInto, { - output.x_collect_into(self) + let (orchestrator, params, iter, x1) = self.destruct(); + output.x_collect_into(orchestrator, params, iter, x1) } // reduce From cf8fa4ccbcf208ff25c1278aeb43495936c5832a Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 16:23:44 +0200 Subject: [PATCH 073/264] flatten collect into computations --- src/collect_into/fixed_vec.rs | 5 +- src/collect_into/par_collect_into.rs | 3 +- src/collect_into/split_vec.rs | 27 +++++---- src/collect_into/vec.rs | 15 +++-- .../computations/collect.rs | 60 +++++++++++++++++-- .../computations/mod.rs | 2 +- src/computational_variants/map.rs | 3 +- src/computational_variants/par.rs | 3 +- 8 files changed, 88 insertions(+), 30 deletions(-) diff --git a/src/collect_into/fixed_vec.rs b/src/collect_into/fixed_vec.rs index bda5f2e..a0e0e6e 100644 --- a/src/collect_into/fixed_vec.rs +++ b/src/collect_into/fixed_vec.rs @@ -1,6 +1,5 @@ use super::par_collect_into::ParCollectIntoCore; use crate::Params; -use crate::computational_variants::ParMap; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; @@ -20,7 +19,7 @@ where vec.into() } - fn m_collect_into(self, m: ParMap) -> Self + fn m_collect_into(self, orchestrator: R, params: Params, iter: I, map1: M1) -> Self where R: Orchestrator, I: ConcurrentIter, @@ -28,7 +27,7 @@ where O: Send, { let vec = Vec::from(self); - FixedVec::from(vec.m_collect_into(m)) + FixedVec::from(vec.m_collect_into(orchestrator, params, iter, map1)) } fn x_collect_into( diff --git a/src/collect_into/par_collect_into.rs b/src/collect_into/par_collect_into.rs index 7d88bac..0e7e8ab 100644 --- a/src/collect_into/par_collect_into.rs +++ b/src/collect_into/par_collect_into.rs @@ -1,5 +1,4 @@ use crate::Params; -use crate::computational_variants::ParMap; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; @@ -13,7 +12,7 @@ pub trait ParCollectIntoCore: Collection { fn empty(iter_len: Option) -> Self; - fn m_collect_into(self, m: ParMap) -> Self + fn m_collect_into(self, orchestrator: R, params: Params, iter: I, map1: M1) -> Self where R: Orchestrator, I: ConcurrentIter, diff --git a/src/collect_into/split_vec.rs b/src/collect_into/split_vec.rs index 427ece7..1a4d052 100644 --- a/src/collect_into/split_vec.rs +++ b/src/collect_into/split_vec.rs @@ -1,11 +1,12 @@ use super::par_collect_into::ParCollectIntoCore; +use crate::Params; use crate::collect_into::utils::split_vec_reserve; -use crate::computational_variants::ParMap; -use crate::computational_variants::computations::{collect_into, try_collect_into}; +use crate::computational_variants::computations::{ + map_collect_into, xap_collect_into, xap_try_collect_into, +}; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; -use crate::{ParIter, Params}; use orx_concurrent_iter::ConcurrentIter; #[cfg(test)] use orx_pinned_vec::PinnedVec; @@ -25,19 +26,21 @@ where vec } - fn m_collect_into(mut self, m: ParMap) -> Self + fn m_collect_into( + mut self, + orchestrator: R, + params: Params, + iter: I, + map1: M1, + ) -> Self where R: Orchestrator, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, O: Send, { - split_vec_reserve( - &mut self, - m.params().is_sequential(), - m.con_iter().try_get_len(), - ); - let (_, pinned_vec) = m.par_collect_into(self); + split_vec_reserve(&mut self, params.is_sequential(), iter.try_get_len()); + let (_, pinned_vec) = map_collect_into(orchestrator, params, iter, map1, self); pinned_vec } @@ -55,7 +58,7 @@ where X1: Fn(I::Item) -> Vo + Sync, { split_vec_reserve(&mut self, params.is_sequential(), iter.try_get_len()); - let (_num_spawned, pinned_vec) = collect_into(orchestrator, params, iter, xap1, self); + let (_num_spawned, pinned_vec) = xap_collect_into(orchestrator, params, iter, xap1, self); pinned_vec } @@ -74,7 +77,7 @@ where Self: Sized, { split_vec_reserve(&mut self, params.is_sequential(), iter.try_get_len()); - let (_num_spawned, result) = try_collect_into(orchestrator, params, iter, xap1, self); + let (_num_spawned, result) = xap_try_collect_into(orchestrator, params, iter, xap1, self); result } diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index dfb9764..d4c1dc4 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -1,5 +1,6 @@ use super::par_collect_into::ParCollectIntoCore; use crate::collect_into::utils::extend_vec_from_split; +use crate::computational_variants::computations::map_collect_into; use crate::computational_variants::{ParMap, ParXap}; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; @@ -22,23 +23,29 @@ where } } - fn m_collect_into(mut self, m: ParMap) -> Self + fn m_collect_into( + mut self, + orchestrator: R, + params: Params, + iter: I, + map1: M1, + ) -> Self where R: Orchestrator, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, O: Send, { - match m.con_iter().try_get_len() { + match iter.try_get_len() { None => { let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let split_vec = split_vec.m_collect_into(m); + let split_vec = split_vec.m_collect_into(orchestrator, params, iter, map1); extend_vec_from_split(self, split_vec) } Some(len) => { self.reserve(len); let fixed_vec = FixedVec::from(self); - let (_, fixed_vec) = m.par_collect_into(fixed_vec); + let (_, fixed_vec) = map_collect_into(orchestrator, params, iter, map1, fixed_vec); Vec::from(fixed_vec) } } diff --git a/src/computational_variants/computations/collect.rs b/src/computational_variants/computations/collect.rs index 8c87e01..2cad1f5 100644 --- a/src/computational_variants/computations/collect.rs +++ b/src/computational_variants/computations/collect.rs @@ -8,7 +8,55 @@ use crate::{IterationOrder, generic_values::Values}; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; -pub fn collect_into( +pub fn map_collect_into( + orchestrator: R, + params: Params, + iter: I, + map1: M1, + pinned_vec: P, +) -> (usize, P) +where + R: Orchestrator, + I: ConcurrentIter, + M1: Fn(I::Item) -> O + Sync, + O: Send, + P: IntoConcurrentPinnedVec, +{ + match (params.is_sequential(), params.iteration_order) { + (true, _) => (0, map_collect_into_seq(iter, map1, pinned_vec)), + #[cfg(test)] + (false, IterationOrder::Arbitrary) => parallel_runner_compute::collect_arbitrary::m( + orchestrator, + params, + iter, + map1, + pinned_vec, + ), + (false, _) => parallel_runner_compute::collect_ordered::m( + orchestrator, + params, + iter, + map1, + pinned_vec, + ), + } +} + +fn map_collect_into_seq(iter: I, map1: M1, mut pinned_vec: P) -> P +where + I: ConcurrentIter, + M1: Fn(I::Item) -> O + Sync, + O: Send, + P: IntoConcurrentPinnedVec, +{ + let iter = iter.into_seq_iter(); + for i in iter { + pinned_vec.push(map1(i)); + } + pinned_vec +} + +pub fn xap_collect_into( orchestrator: R, params: Params, iter: I, @@ -24,7 +72,7 @@ where P: IntoConcurrentPinnedVec, { match (params.is_sequential(), params.iteration_order) { - (true, _) => (0, sequential(iter, xap1, pinned_vec)), + (true, _) => (0, xap_collect_into_seq(iter, xap1, pinned_vec)), (false, IterationOrder::Arbitrary) => { let (num_threads, result) = parallel_runner_compute::collect_arbitrary::x( orchestrator, @@ -59,7 +107,7 @@ where } } -fn sequential(iter: I, xap1: X1, mut pinned_vec: P) -> P +fn xap_collect_into_seq(iter: I, xap1: X1, mut pinned_vec: P) -> P where I: ConcurrentIter, Vo: Values, @@ -79,7 +127,7 @@ where pinned_vec } -pub fn try_collect_into( +pub fn xap_try_collect_into( orchestrator: R, params: Params, iter: I, @@ -95,7 +143,7 @@ where P: IntoConcurrentPinnedVec, { match (params.is_sequential(), params.iteration_order) { - (true, _) => (0, try_sequential(iter, xap1, pinned_vec)), + (true, _) => (0, xap_try_collect_into_seq(iter, xap1, pinned_vec)), (false, IterationOrder::Arbitrary) => { let (nt, result) = parallel_runner_compute::collect_arbitrary::x( orchestrator, @@ -119,7 +167,7 @@ where } } -fn try_sequential( +fn xap_try_collect_into_seq( iter: I, xap1: X1, mut pinned_vec: P, diff --git a/src/computational_variants/computations/mod.rs b/src/computational_variants/computations/mod.rs index e978123..7f9b9dd 100644 --- a/src/computational_variants/computations/mod.rs +++ b/src/computational_variants/computations/mod.rs @@ -1,3 +1,3 @@ mod collect; -pub use collect::{collect_into, try_collect_into}; +pub use collect::{map_collect_into, xap_collect_into, xap_try_collect_into}; diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index e3e8b7b..4931c2d 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -238,7 +238,8 @@ where where C: ParCollectInto, { - output.m_collect_into(self) + let (orchestrator, params, iter, m1) = self.destruct(); + output.m_collect_into(orchestrator, params, iter, m1) } // reduce diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 3166257..7a28169 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -184,7 +184,8 @@ where where C: ParCollectInto, { - output.m_collect_into(self.into_map()) + let (orchestrator, params, iter) = self.destruct(); + output.m_collect_into(orchestrator, params, iter, map_self) } // reduce From 611fb9617b9d2d2761941618b0ff6a51939c61e8 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 16:25:39 +0200 Subject: [PATCH 074/264] clean up --- src/collect_into/vec.rs | 3 +- src/computational_variants/map.rs | 45 ------------------- src/computational_variants/par.rs | 5 --- .../tests/map/collect.rs | 10 +++-- 4 files changed, 7 insertions(+), 56 deletions(-) diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index d4c1dc4..d02708a 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -1,11 +1,10 @@ use super::par_collect_into::ParCollectIntoCore; +use crate::Params; use crate::collect_into::utils::extend_vec_from_split; use crate::computational_variants::computations::map_collect_into; -use crate::computational_variants::{ParMap, ParXap}; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; -use crate::{ParIter, Params}; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; use orx_split_vec::SplitVec; diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index 4931c2d..2a5e2b5 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -43,51 +43,6 @@ where pub(crate) fn destruct(self) -> (R, Params, I, M1) { (self.orchestrator, self.params, self.iter, self.map1) } - - pub(crate) fn par_collect_into

(self, pinned_vec: P) -> (usize, P) - where - P: IntoConcurrentPinnedVec, - O: Send, - { - match (self.params.is_sequential(), self.params.iteration_order) { - (true, _) => (0, self.seq_collect_into(pinned_vec)), - #[cfg(test)] - (false, IterationOrder::Arbitrary) => { - let (orchestrator, params, iter, m1) = self.destruct(); - parallel_runner_compute::collect_arbitrary::m( - orchestrator, - params, - iter, - m1, - pinned_vec, - ) - } - (false, _) => { - let (orchestrator, params, iter, m1) = self.destruct(); - parallel_runner_compute::collect_ordered::m( - orchestrator, - params, - iter, - m1, - pinned_vec, - ) - } - } - } - - fn seq_collect_into

(self, mut pinned_vec: P) -> P - where - P: IntoConcurrentPinnedVec, - { - let (_, _, iter, map1) = self.destruct(); - - let iter = iter.into_seq_iter(); - for i in iter { - pinned_vec.push(map1(i)); - } - - pinned_vec - } } unsafe impl Send for ParMap diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 7a28169..5267011 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -43,11 +43,6 @@ where pub(crate) fn destruct(self) -> (R, Params, I) { (self.orchestrator, self.params, self.iter) } - - fn into_map(self) -> ParMap I::Item, R> { - let (orchestrator, params, iter) = self.destruct(); - ParMap::new(orchestrator, params, iter, map_self) - } } unsafe impl Send for Par diff --git a/src/computational_variants/tests/map/collect.rs b/src/computational_variants/tests/map/collect.rs index c9e6715..798ff36 100644 --- a/src/computational_variants/tests/map/collect.rs +++ b/src/computational_variants/tests/map/collect.rs @@ -1,4 +1,7 @@ -use crate::{IterationOrder, Params, computational_variants::ParMap, orch::DefaultOrchestrator}; +use crate::{ + IterationOrder, Params, computational_variants::computations::map_collect_into, + orch::DefaultOrchestrator, +}; use orx_concurrent_iter::IntoConcurrentIter; use orx_pinned_vec::PinnedVec; use orx_split_vec::SplitVec; @@ -33,9 +36,8 @@ fn m_map_collect(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { let params = Params::new(nt, chunk, ordering); let iter = input.into_con_iter(); - let m = ParMap::new(DefaultOrchestrator::default(), params, iter, map); - - let (_, mut output) = m.par_collect_into(output); + let (_, mut output) = + map_collect_into(DefaultOrchestrator::default(), params, iter, map, output); if !params.is_sequential() && matches!(params.iteration_order, IterationOrder::Arbitrary) { expected.sort(); From e1d7529af3f976449facaed956d589b242f25d5e Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 16:30:29 +0200 Subject: [PATCH 075/264] alias parallel_runner_compute as prc --- .../computations/collect.rs | 56 +++++-------------- .../fallible_result/map_result.rs | 9 ++- .../fallible_result/par_result.rs | 9 ++- .../fallible_result/xap_result.rs | 9 ++- src/computational_variants/map.rs | 13 ++--- src/computational_variants/par.rs | 12 ++-- src/computational_variants/xap.rs | 11 ++-- 7 files changed, 37 insertions(+), 82 deletions(-) diff --git a/src/computational_variants/computations/collect.rs b/src/computational_variants/computations/collect.rs index 2cad1f5..ef1caa8 100644 --- a/src/computational_variants/computations/collect.rs +++ b/src/computational_variants/computations/collect.rs @@ -3,7 +3,7 @@ use crate::generic_values::runner_results::{ Fallibility, Infallible, ParallelCollect, ParallelCollectArbitrary, Stop, }; use crate::orch::Orchestrator; -use crate::runner::parallel_runner_compute; +use crate::runner::parallel_runner_compute as prc; use crate::{IterationOrder, generic_values::Values}; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; @@ -25,20 +25,10 @@ where match (params.is_sequential(), params.iteration_order) { (true, _) => (0, map_collect_into_seq(iter, map1, pinned_vec)), #[cfg(test)] - (false, IterationOrder::Arbitrary) => parallel_runner_compute::collect_arbitrary::m( - orchestrator, - params, - iter, - map1, - pinned_vec, - ), - (false, _) => parallel_runner_compute::collect_ordered::m( - orchestrator, - params, - iter, - map1, - pinned_vec, - ), + (false, IterationOrder::Arbitrary) => { + prc::collect_arbitrary::m(orchestrator, params, iter, map1, pinned_vec) + } + (false, _) => prc::collect_ordered::m(orchestrator, params, iter, map1, pinned_vec), } } @@ -74,13 +64,8 @@ where match (params.is_sequential(), params.iteration_order) { (true, _) => (0, xap_collect_into_seq(iter, xap1, pinned_vec)), (false, IterationOrder::Arbitrary) => { - let (num_threads, result) = parallel_runner_compute::collect_arbitrary::x( - orchestrator, - params, - iter, - xap1, - pinned_vec, - ); + let (num_threads, result) = + prc::collect_arbitrary::x(orchestrator, params, iter, xap1, pinned_vec); let pinned_vec = match result { ParallelCollectArbitrary::AllCollected { pinned_vec } => pinned_vec, ParallelCollectArbitrary::StoppedByWhileCondition { pinned_vec } => pinned_vec, @@ -88,13 +73,8 @@ where (num_threads, pinned_vec) } (false, IterationOrder::Ordered) => { - let (num_threads, result) = parallel_runner_compute::collect_ordered::x( - orchestrator, - params, - iter, - xap1, - pinned_vec, - ); + let (num_threads, result) = + prc::collect_ordered::x(orchestrator, params, iter, xap1, pinned_vec); let pinned_vec = match result { ParallelCollect::AllCollected { pinned_vec } => pinned_vec, ParallelCollect::StoppedByWhileCondition { @@ -145,23 +125,13 @@ where match (params.is_sequential(), params.iteration_order) { (true, _) => (0, xap_try_collect_into_seq(iter, xap1, pinned_vec)), (false, IterationOrder::Arbitrary) => { - let (nt, result) = parallel_runner_compute::collect_arbitrary::x( - orchestrator, - params, - iter, - xap1, - pinned_vec, - ); + let (nt, result) = + prc::collect_arbitrary::x(orchestrator, params, iter, xap1, pinned_vec); (nt, result.into_result()) } (false, IterationOrder::Ordered) => { - let (nt, result) = parallel_runner_compute::collect_ordered::x( - orchestrator, - params, - iter, - xap1, - pinned_vec, - ); + let (nt, result) = + prc::collect_ordered::x(orchestrator, params, iter, xap1, pinned_vec); (nt, result.into_result()) } } diff --git a/src/computational_variants/fallible_result/map_result.rs b/src/computational_variants/fallible_result/map_result.rs index 587bc3f..3bbb440 100644 --- a/src/computational_variants/fallible_result/map_result.rs +++ b/src/computational_variants/fallible_result/map_result.rs @@ -1,7 +1,7 @@ use crate::computational_variants::ParMap; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; -use crate::runner::parallel_runner_compute; +use crate::runner::parallel_runner_compute as prc; use crate::{IterationOrder, ParCollectInto, ParIter}; use orx_concurrent_iter::ConcurrentIter; use std::marker::PhantomData; @@ -100,7 +100,7 @@ where { let (orchestrator, params, iter, m1) = self.par.destruct(); let x1 = |i: I::Item| m1(i).into_result(); - parallel_runner_compute::reduce::x(orchestrator, params, iter, x1, reduce).1 + prc::reduce::x(orchestrator, params, iter, x1, reduce).1 } // early exit @@ -114,12 +114,11 @@ where let x1 = |i: I::Item| m1(i).into_result(); match params.iteration_order { IterationOrder::Ordered => { - let (_, result) = parallel_runner_compute::next::x(orchestrator, params, iter, x1); + let (_, result) = prc::next::x(orchestrator, params, iter, x1); result.map(|x| x.map(|y| y.1)) } IterationOrder::Arbitrary => { - let (_, result) = - parallel_runner_compute::next_any::x(orchestrator, params, iter, x1); + let (_, result) = prc::next_any::x(orchestrator, params, iter, x1); result } } diff --git a/src/computational_variants/fallible_result/par_result.rs b/src/computational_variants/fallible_result/par_result.rs index e1a5349..0305af7 100644 --- a/src/computational_variants/fallible_result/par_result.rs +++ b/src/computational_variants/fallible_result/par_result.rs @@ -1,7 +1,7 @@ use crate::computational_variants::Par; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; -use crate::runner::parallel_runner_compute; +use crate::runner::parallel_runner_compute as prc; use crate::{IterationOrder, ParCollectInto, ParIter}; use orx_concurrent_iter::ConcurrentIter; use std::marker::PhantomData; @@ -97,7 +97,7 @@ where { let (orchestrator, params, iter) = self.par.destruct(); let x1 = |i: I::Item| i.into_result(); - parallel_runner_compute::reduce::x(orchestrator, params, iter, x1, reduce).1 + prc::reduce::x(orchestrator, params, iter, x1, reduce).1 } // early exit @@ -111,12 +111,11 @@ where let x1 = |i: I::Item| i.into_result(); match params.iteration_order { IterationOrder::Ordered => { - let (_, result) = parallel_runner_compute::next::x(orchestrator, params, iter, x1); + let (_, result) = prc::next::x(orchestrator, params, iter, x1); result.map(|x| x.map(|y| y.1)) } IterationOrder::Arbitrary => { - let (_, result) = - parallel_runner_compute::next_any::x(orchestrator, params, iter, x1); + let (_, result) = prc::next_any::x(orchestrator, params, iter, x1); result } } diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index da7b3bb..919fd7f 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -3,7 +3,7 @@ use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; -use crate::runner::parallel_runner_compute; +use crate::runner::parallel_runner_compute as prc; use crate::{IterationOrder, ParCollectInto, Params}; use orx_concurrent_iter::ConcurrentIter; use std::marker::PhantomData; @@ -111,7 +111,7 @@ where { let (orchestrator, params, iter, x1) = self.destruct(); let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); - parallel_runner_compute::reduce::x(orchestrator, params, iter, x1, reduce).1 + prc::reduce::x(orchestrator, params, iter, x1, reduce).1 } // early exit @@ -125,12 +125,11 @@ where let x1 = |i: I::Item| x1(i).map_while_ok(|x| x.into_result()); match params.iteration_order { IterationOrder::Ordered => { - let (_, result) = parallel_runner_compute::next::x(orchestrator, params, iter, x1); + let (_, result) = prc::next::x(orchestrator, params, iter, x1); result.map(|x| x.map(|y| y.1)) } IterationOrder::Arbitrary => { - let (_, result) = - parallel_runner_compute::next_any::x(orchestrator, params, iter, x1); + let (_, result) = prc::next_any::x(orchestrator, params, iter, x1); result } } diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index 2a5e2b5..b70a2f9 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -4,13 +4,12 @@ use crate::computational_variants::fallible_result::ParMapResult; use crate::generic_values::{Vector, WhilstAtom}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; -use crate::runner::parallel_runner_compute; +use crate::runner::parallel_runner_compute as prc; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsing, Params, using::{UsingClone, UsingFun, computational_variants::UParMap}, }; use orx_concurrent_iter::ConcurrentIter; -use orx_fixed_vec::IntoConcurrentPinnedVec; /// A parallel iterator that maps inputs. pub struct ParMap @@ -205,7 +204,7 @@ where Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { let (orchestrator, params, iter, m1) = self.destruct(); - parallel_runner_compute::reduce::m(orchestrator, params, iter, m1, reduce).1 + prc::reduce::m(orchestrator, params, iter, m1, reduce).1 } // early exit @@ -216,12 +215,8 @@ where { let (orchestrator, params, iter, m1) = self.destruct(); match params.iteration_order { - IterationOrder::Ordered => { - parallel_runner_compute::next::m(orchestrator, params, iter, m1).1 - } - IterationOrder::Arbitrary => { - parallel_runner_compute::next_any::m(orchestrator, params, iter, m1).1 - } + IterationOrder::Ordered => prc::next::m(orchestrator, params, iter, m1).1, + IterationOrder::Arbitrary => prc::next_any::m(orchestrator, params, iter, m1).1, } } } diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 5267011..a07f8d3 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -3,7 +3,7 @@ use crate::computational_variants::fallible_result::ParResult; use crate::generic_values::{Vector, WhilstAtom}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; -use crate::runner::parallel_runner_compute; +use crate::runner::parallel_runner_compute as prc; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsing, Params, computations::map_self, @@ -191,7 +191,7 @@ where Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { let (orchestrator, params, iter) = self.destruct(); - parallel_runner_compute::reduce::m(orchestrator, params, iter, map_self, reduce).1 + prc::reduce::m(orchestrator, params, iter, map_self, reduce).1 } // early exit @@ -199,12 +199,8 @@ where fn first(self) -> Option { let (orchestrator, params, iter) = self.destruct(); match params.iteration_order { - IterationOrder::Ordered => { - parallel_runner_compute::next::m(orchestrator, params, iter, map_self).1 - } - IterationOrder::Arbitrary => { - parallel_runner_compute::next_any::m(orchestrator, params, iter, map_self).1 - } + IterationOrder::Ordered => prc::next::m(orchestrator, params, iter, map_self).1, + IterationOrder::Arbitrary => prc::next_any::m(orchestrator, params, iter, map_self).1, } } } diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index a6f9e94..1d97e9c 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -4,7 +4,7 @@ use crate::generic_values::runner_results::Infallible; use crate::generic_values::{TransformableValues, Values}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; -use crate::runner::parallel_runner_compute; +use crate::runner::parallel_runner_compute as prc; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsing, Params, using::{UsingClone, UsingFun, computational_variants::UParXap}, @@ -224,8 +224,7 @@ where Reduce: Fn(Self::Item, Self::Item) -> Self::Item + Sync, { let (orchestrator, params, iter, x1) = self.destruct(); - let (_, Ok(acc)) = - parallel_runner_compute::reduce::x(orchestrator, params, iter, x1, reduce); + let (_, Ok(acc)) = prc::reduce::x(orchestrator, params, iter, x1, reduce); acc } @@ -238,13 +237,11 @@ where let (orchestrator, params, iter, x1) = self.destruct(); match params.iteration_order { IterationOrder::Ordered => { - let (_num_threads, Ok(result)) = - parallel_runner_compute::next::x(orchestrator, params, iter, x1); + let (_num_threads, Ok(result)) = prc::next::x(orchestrator, params, iter, x1); result.map(|x| x.1) } IterationOrder::Arbitrary => { - let (_num_threads, Ok(result)) = - parallel_runner_compute::next_any::x(orchestrator, params, iter, x1); + let (_num_threads, Ok(result)) = prc::next_any::x(orchestrator, params, iter, x1); result } } From f074e2ca535f6bdc25b6461848f53544ed99387e Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 16:35:18 +0200 Subject: [PATCH 076/264] init scope and handle --- src/orch/implementations/std_orchestrator.rs | 38 ++++++++++++++++++++ src/orch/mod.rs | 3 ++ src/orch/orchestrator.rs | 9 +++++ src/orch/par_handle.rs | 7 ++++ src/orch/par_scope.rs | 16 +++++++++ 5 files changed, 73 insertions(+) create mode 100644 src/orch/par_handle.rs create mode 100644 src/orch/par_scope.rs diff --git a/src/orch/implementations/std_orchestrator.rs b/src/orch/implementations/std_orchestrator.rs index 9851236..a8f9d2b 100644 --- a/src/orch/implementations/std_orchestrator.rs +++ b/src/orch/implementations/std_orchestrator.rs @@ -1,6 +1,32 @@ +use super::super::par_handle::{JoinResult, ParHandle}; +use super::super::par_scope::ParScope; use crate::{ParallelRunner, orch::Orchestrator}; use std::marker::PhantomData; +pub struct StdHandle<'scope, T>(std::thread::ScopedJoinHandle<'scope, T>); + +impl<'scope, T> ParHandle<'scope, T> for StdHandle<'scope, T> { + fn join(self) -> JoinResult { + self.0.join() + } +} + +impl<'env, 'scope> ParScope<'env, 'scope> for std::thread::Scope<'scope, 'env> { + type Handle + = StdHandle<'scope, T> + where + Self: 'scope, + T: 'scope; + + fn spawn(&'scope self, f: F) -> Self::Handle + where + F: FnOnce() -> T + Send + 'scope, + T: Send + 'scope, + { + StdHandle(self.spawn(f)) + } +} + pub struct StdOrchestrator where R: ParallelRunner, @@ -22,4 +48,16 @@ where R: ParallelRunner, { type Runner = R; + + type Scope<'env, 'scope> + = std::thread::Scope<'scope, 'env> + where + 'env: 'scope; + + fn scope<'env, F, T>(f: F) -> T + where + F: for<'scope> FnOnce(&'scope std::thread::Scope<'scope, 'env>) -> T, + { + std::thread::scope(|s| f(s)) + } } diff --git a/src/orch/mod.rs b/src/orch/mod.rs index 181e557..422f1bc 100644 --- a/src/orch/mod.rs +++ b/src/orch/mod.rs @@ -1,6 +1,9 @@ mod implementations; mod orchestrator; +mod par_handle; +mod par_scope; pub use crate::orch::implementations::StdOrchestrator; pub use orchestrator::Orchestrator; + pub type DefaultOrchestrator = StdOrchestrator; diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index d54741a..fc00512 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -1,8 +1,13 @@ +use super::par_scope::ParScope; use crate::{ParallelRunner, Params, runner::ComputationKind}; pub trait Orchestrator { type Runner: ParallelRunner; + type Scope<'env, 'scope>: ParScope<'env, 'scope> + where + 'env: 'scope; + fn new_runner( &self, kind: ComputationKind, @@ -11,4 +16,8 @@ pub trait Orchestrator { ) -> Self::Runner { ::new(kind, params, initial_input_len) } + + fn scope<'env, F, T>(f: F) -> T + where + F: for<'scope> FnOnce(&'scope Self::Scope<'env, 'scope>) -> T; } diff --git a/src/orch/par_handle.rs b/src/orch/par_handle.rs new file mode 100644 index 0000000..cba65cc --- /dev/null +++ b/src/orch/par_handle.rs @@ -0,0 +1,7 @@ +use std::any::Any; + +pub type JoinResult = Result>; + +pub trait ParHandle<'scope, T> { + fn join(self) -> JoinResult; +} diff --git a/src/orch/par_scope.rs b/src/orch/par_scope.rs new file mode 100644 index 0000000..2941955 --- /dev/null +++ b/src/orch/par_scope.rs @@ -0,0 +1,16 @@ +use super::par_handle::ParHandle; + +pub trait ParScope<'env, 'scope> +where + 'env: 'scope, +{ + type Handle: ParHandle<'scope, T> + where + Self: 'scope, + T: 'scope; + + fn spawn(&'scope self, f: F) -> Self::Handle + where + F: FnOnce() -> T + Send + 'scope, + T: Send + 'scope; +} From 88940a1ca09b7e62f2ed8afa294e64339f7d9cd1 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 16:44:51 +0200 Subject: [PATCH 077/264] orchestrator is used to spawn threads --- src/orch/implementations/std_orchestrator.rs | 4 ++++ src/orch/mod.rs | 2 ++ src/orch/par_handle.rs | 2 ++ src/runner/parallel_runner_compute/collect_arbitrary.rs | 7 ++++--- src/runner/parallel_runner_compute/collect_ordered.rs | 7 ++++--- src/runner/parallel_runner_compute/next.rs | 6 +++--- src/runner/parallel_runner_compute/next_any.rs | 6 +++--- src/runner/parallel_runner_compute/reduce.rs | 6 +++--- 8 files changed, 25 insertions(+), 15 deletions(-) diff --git a/src/orch/implementations/std_orchestrator.rs b/src/orch/implementations/std_orchestrator.rs index a8f9d2b..f6b2239 100644 --- a/src/orch/implementations/std_orchestrator.rs +++ b/src/orch/implementations/std_orchestrator.rs @@ -9,6 +9,10 @@ impl<'scope, T> ParHandle<'scope, T> for StdHandle<'scope, T> { fn join(self) -> JoinResult { self.0.join() } + + fn is_finished(&self) -> bool { + self.0.is_finished() + } } impl<'env, 'scope> ParScope<'env, 'scope> for std::thread::Scope<'scope, 'env> { diff --git a/src/orch/mod.rs b/src/orch/mod.rs index 422f1bc..bed188b 100644 --- a/src/orch/mod.rs +++ b/src/orch/mod.rs @@ -5,5 +5,7 @@ mod par_scope; pub use crate::orch::implementations::StdOrchestrator; pub use orchestrator::Orchestrator; +pub use par_handle::ParHandle; +pub use par_scope::ParScope; pub type DefaultOrchestrator = StdOrchestrator; diff --git a/src/orch/par_handle.rs b/src/orch/par_handle.rs index cba65cc..8f38f7c 100644 --- a/src/orch/par_handle.rs +++ b/src/orch/par_handle.rs @@ -4,4 +4,6 @@ pub type JoinResult = Result>; pub trait ParHandle<'scope, T> { fn join(self) -> JoinResult; + + fn is_finished(&self) -> bool; } diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 17ca503..b86c699 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -1,8 +1,9 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{ParallelCollectArbitrary, ThreadCollectArbitrary}; +use crate::orch::{Orchestrator, ParHandle, ParScope}; +use crate::runner::ParallelRunner; use crate::runner::{ComputationKind, thread_runner_compute as thread}; -use crate::{orch::Orchestrator, runner::ParallelRunner}; use orx_concurrent_bag::ConcurrentBag; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; @@ -42,7 +43,7 @@ where let shared_state = &state; let mut num_spawned = 0; - std::thread::scope(|s| { + C::scope(|s| { while runner.do_spawn_new(num_spawned, shared_state, &iter) { num_spawned += 1; s.spawn(|| { @@ -94,7 +95,7 @@ where let shared_state = &state; let mut num_spawned = 0; - let result: ThreadCollectArbitrary = std::thread::scope(|s| { + let result: ThreadCollectArbitrary = C::scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index 87e3b30..21c95c3 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect, ThreadCollect}; -use crate::orch::Orchestrator; +use crate::orch::{Orchestrator, ParHandle, ParScope}; use crate::runner::parallel_runner::ParallelRunner; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use orx_concurrent_iter::ConcurrentIter; @@ -34,7 +34,8 @@ where let shared_state = &state; let mut num_spawned = 0; - std::thread::scope(|s| { + + C::scope(|s| { while runner.do_spawn_new(num_spawned, shared_state, &iter) { num_spawned += 1; s.spawn(|| { @@ -80,7 +81,7 @@ where let mut num_spawned = 0; let result: Result>, ::Error> = - std::thread::scope(|s| { + C::scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index 4a8dcf4..ed8c16f 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -1,6 +1,6 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; -use crate::orch::Orchestrator; +use crate::orch::{Orchestrator, ParHandle, ParScope}; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; @@ -18,7 +18,7 @@ where let shared_state = &state; let mut num_spawned = 0; - let results = std::thread::scope(|s| { + let results = C::scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { @@ -71,7 +71,7 @@ where let shared_state = &state; let mut num_spawned = 0; - let result: Result>, _> = std::thread::scope(|s| { + let result: Result>, _> = C::scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index d5ae422..16174b7 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -1,6 +1,6 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; -use crate::orch::Orchestrator; +use crate::orch::{Orchestrator, ParHandle, ParScope}; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; @@ -18,7 +18,7 @@ where let shared_state = &state; let mut num_spawned = 0; - let result = std::thread::scope(|s| { + let result = C::scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { @@ -64,7 +64,7 @@ where let shared_state = &state; let mut num_spawned = 0; - let result = std::thread::scope(|s| { + let result = C::scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index d89ce26..e7963da 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -1,6 +1,6 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Reduce}; -use crate::orch::Orchestrator; +use crate::orch::{Orchestrator, ParHandle, ParScope}; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; @@ -27,7 +27,7 @@ where let shared_state = &state; let mut num_spawned = 0; - let results = std::thread::scope(|s| { + let results = C::scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { @@ -83,7 +83,7 @@ where let shared_state = &state; let mut num_spawned = 0; - let result: Result, _> = std::thread::scope(|s| { + let result: Result, _> = C::scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { From c7145c9b2ad7e64b3b012e03e6b69a0e82401801 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 16:48:25 +0200 Subject: [PATCH 078/264] minor --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index e2e5bd5..91c6140 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,7 +35,7 @@ rayon = "1.10.0" test-case = "3.3.1" [[bench]] -name = "find" +name = "reduce_map_filter" harness = false [features] From 66a992b598e6e9a3a7de5662e6893c7ecf58c309 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 17:47:08 +0200 Subject: [PATCH 079/264] redesign thread-pool and orchestrator traits --- .../default_std_orchestrator.rs | 17 +++++++ src/orch/implementations/mod.rs | 5 +- src/orch/implementations/std_thread_pool.rs | 49 +++++++++++++++++++ src/orch/mod.rs | 10 ++-- src/orch/orchestrator.rs | 12 ++--- src/orch/thread_pool/implementations/mod.rs | 3 ++ .../std_default_thread_pool.rs} | 37 +++----------- src/orch/thread_pool/mod.rs | 8 +++ src/orch/{ => thread_pool}/par_handle.rs | 0 src/orch/{ => thread_pool}/par_scope.rs | 2 +- src/orch/thread_pool/par_thread_pool.rs | 11 +++++ .../collect_arbitrary.rs | 10 ++-- .../collect_ordered.rs | 10 ++-- src/runner/parallel_runner_compute/next.rs | 10 ++-- .../parallel_runner_compute/next_any.rs | 10 ++-- src/runner/parallel_runner_compute/reduce.rs | 10 ++-- 16 files changed, 132 insertions(+), 72 deletions(-) create mode 100644 src/orch/implementations/default_std_orchestrator.rs create mode 100644 src/orch/implementations/std_thread_pool.rs create mode 100644 src/orch/thread_pool/implementations/mod.rs rename src/orch/{implementations/std_orchestrator.rs => thread_pool/implementations/std_default_thread_pool.rs} (53%) create mode 100644 src/orch/thread_pool/mod.rs rename src/orch/{ => thread_pool}/par_handle.rs (100%) rename src/orch/{ => thread_pool}/par_scope.rs (89%) create mode 100644 src/orch/thread_pool/par_thread_pool.rs diff --git a/src/orch/implementations/default_std_orchestrator.rs b/src/orch/implementations/default_std_orchestrator.rs new file mode 100644 index 0000000..1221f14 --- /dev/null +++ b/src/orch/implementations/default_std_orchestrator.rs @@ -0,0 +1,17 @@ +use crate::{ + DefaultRunner, + orch::{Orchestrator, thread_pool::implementations::StdDefaultThreadPool}, +}; + +#[derive(Default)] +pub struct DefaultStdOrchestrator(StdDefaultThreadPool); + +impl Orchestrator for DefaultStdOrchestrator { + type Runner = DefaultRunner; + + type ThreadPool = StdDefaultThreadPool; + + fn thread_pool(&self) -> &Self::ThreadPool { + &self.0 + } +} diff --git a/src/orch/implementations/mod.rs b/src/orch/implementations/mod.rs index 8e679a7..c657d69 100644 --- a/src/orch/implementations/mod.rs +++ b/src/orch/implementations/mod.rs @@ -1,3 +1,4 @@ -mod std_orchestrator; +mod default_std_orchestrator; +mod std_thread_pool; -pub use std_orchestrator::StdOrchestrator; +pub use default_std_orchestrator::DefaultStdOrchestrator; diff --git a/src/orch/implementations/std_thread_pool.rs b/src/orch/implementations/std_thread_pool.rs new file mode 100644 index 0000000..afb317b --- /dev/null +++ b/src/orch/implementations/std_thread_pool.rs @@ -0,0 +1,49 @@ +// use super::super::{ +// par_handle::{JoinResult, ParHandle}, +// par_scope::ParScope, +// par_thread_pool::ParThreadPool, +// }; + +// pub struct StdHandle<'scope, T>(std::thread::ScopedJoinHandle<'scope, T>); + +// impl<'scope, T> ParHandle<'scope, T> for StdHandle<'scope, T> { +// fn join(self) -> JoinResult { +// self.0.join() +// } + +// fn is_finished(&self) -> bool { +// self.0.is_finished() +// } +// } + +// impl<'env, 'scope> ParScope<'env, 'scope> for std::thread::Scope<'scope, 'env> { +// type Handle +// = StdHandle<'scope, T> +// where +// Self: 'scope, +// T: 'scope; + +// fn spawn(&'scope self, f: F) -> Self::Handle +// where +// F: FnOnce() -> T + Send + 'scope, +// T: Send + 'scope, +// { +// StdHandle(self.spawn(f)) +// } +// } + +// pub struct StdThreadPool; + +// impl ParThreadPool for StdThreadPool { +// type Scope<'env, 'scope> +// = std::thread::Scope<'scope, 'env> +// where +// 'env: 'scope; + +// fn scope<'env, F, T>(f: F) -> T +// where +// F: for<'scope> FnOnce(&'scope std::thread::Scope<'scope, 'env>) -> T, +// { +// std::thread::scope(f) +// } +// } diff --git a/src/orch/mod.rs b/src/orch/mod.rs index bed188b..90963dc 100644 --- a/src/orch/mod.rs +++ b/src/orch/mod.rs @@ -1,11 +1,9 @@ mod implementations; mod orchestrator; -mod par_handle; -mod par_scope; +mod thread_pool; -pub use crate::orch::implementations::StdOrchestrator; +pub use crate::orch::implementations::DefaultStdOrchestrator; pub use orchestrator::Orchestrator; -pub use par_handle::ParHandle; -pub use par_scope::ParScope; +pub use thread_pool::{ParHandle, ParScope, ParThreadPool}; -pub type DefaultOrchestrator = StdOrchestrator; +pub type DefaultOrchestrator = DefaultStdOrchestrator; diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index fc00512..7af4233 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -1,15 +1,11 @@ -use super::par_scope::ParScope; -use crate::{ParallelRunner, Params, runner::ComputationKind}; +use crate::{ParallelRunner, Params, orch::thread_pool::ParThreadPool, runner::ComputationKind}; pub trait Orchestrator { type Runner: ParallelRunner; - type Scope<'env, 'scope>: ParScope<'env, 'scope> - where - 'env: 'scope; + type ThreadPool: ParThreadPool; fn new_runner( - &self, kind: ComputationKind, params: Params, initial_input_len: Option, @@ -17,7 +13,5 @@ pub trait Orchestrator { ::new(kind, params, initial_input_len) } - fn scope<'env, F, T>(f: F) -> T - where - F: for<'scope> FnOnce(&'scope Self::Scope<'env, 'scope>) -> T; + fn thread_pool(&self) -> &Self::ThreadPool; } diff --git a/src/orch/thread_pool/implementations/mod.rs b/src/orch/thread_pool/implementations/mod.rs new file mode 100644 index 0000000..e06ed95 --- /dev/null +++ b/src/orch/thread_pool/implementations/mod.rs @@ -0,0 +1,3 @@ +mod std_default_thread_pool; + +pub use std_default_thread_pool::StdDefaultThreadPool; diff --git a/src/orch/implementations/std_orchestrator.rs b/src/orch/thread_pool/implementations/std_default_thread_pool.rs similarity index 53% rename from src/orch/implementations/std_orchestrator.rs rename to src/orch/thread_pool/implementations/std_default_thread_pool.rs index f6b2239..afcf9bb 100644 --- a/src/orch/implementations/std_orchestrator.rs +++ b/src/orch/thread_pool/implementations/std_default_thread_pool.rs @@ -1,7 +1,4 @@ -use super::super::par_handle::{JoinResult, ParHandle}; -use super::super::par_scope::ParScope; -use crate::{ParallelRunner, orch::Orchestrator}; -use std::marker::PhantomData; +use crate::orch::{ParHandle, ParScope, ParThreadPool, thread_pool::par_handle::JoinResult}; pub struct StdHandle<'scope, T>(std::thread::ScopedJoinHandle<'scope, T>); @@ -15,7 +12,7 @@ impl<'scope, T> ParHandle<'scope, T> for StdHandle<'scope, T> { } } -impl<'env, 'scope> ParScope<'env, 'scope> for std::thread::Scope<'scope, 'env> { +impl<'scope, 'env> ParScope<'scope, 'env> for std::thread::Scope<'scope, 'env> { type Handle = StdHandle<'scope, T> where @@ -31,37 +28,19 @@ impl<'env, 'scope> ParScope<'env, 'scope> for std::thread::Scope<'scope, 'env> { } } -pub struct StdOrchestrator -where - R: ParallelRunner, -{ - r: PhantomData, -} - -impl Default for StdOrchestrator -where - R: ParallelRunner, -{ - fn default() -> Self { - Self { r: PhantomData } - } -} - -impl Orchestrator for StdOrchestrator -where - R: ParallelRunner, -{ - type Runner = R; +#[derive(Default)] +pub struct StdDefaultThreadPool; - type Scope<'env, 'scope> +impl ParThreadPool for StdDefaultThreadPool { + type Scope<'scope, 'env> = std::thread::Scope<'scope, 'env> where 'env: 'scope; - fn scope<'env, F, T>(f: F) -> T + fn scope<'env, F, T>(&'env self, f: F) -> T where F: for<'scope> FnOnce(&'scope std::thread::Scope<'scope, 'env>) -> T, { - std::thread::scope(|s| f(s)) + std::thread::scope(f) } } diff --git a/src/orch/thread_pool/mod.rs b/src/orch/thread_pool/mod.rs new file mode 100644 index 0000000..57e4e61 --- /dev/null +++ b/src/orch/thread_pool/mod.rs @@ -0,0 +1,8 @@ +pub mod implementations; +mod par_handle; +mod par_scope; +mod par_thread_pool; + +pub use par_handle::ParHandle; +pub use par_scope::ParScope; +pub use par_thread_pool::ParThreadPool; diff --git a/src/orch/par_handle.rs b/src/orch/thread_pool/par_handle.rs similarity index 100% rename from src/orch/par_handle.rs rename to src/orch/thread_pool/par_handle.rs diff --git a/src/orch/par_scope.rs b/src/orch/thread_pool/par_scope.rs similarity index 89% rename from src/orch/par_scope.rs rename to src/orch/thread_pool/par_scope.rs index 2941955..b9918c5 100644 --- a/src/orch/par_scope.rs +++ b/src/orch/thread_pool/par_scope.rs @@ -1,6 +1,6 @@ use super::par_handle::ParHandle; -pub trait ParScope<'env, 'scope> +pub trait ParScope<'scope, 'env> where 'env: 'scope, { diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs new file mode 100644 index 0000000..28cc399 --- /dev/null +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -0,0 +1,11 @@ +use super::par_scope::ParScope; + +pub trait ParThreadPool { + type Scope<'scope, 'env>: ParScope<'scope, 'env> + where + 'env: 'scope; + + fn scope<'env, F, T>(&'env self, f: F) -> T + where + F: for<'scope> FnOnce(&'scope Self::Scope<'scope, 'env>) -> T; +} diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index b86c699..f01e7b6 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{ParallelCollectArbitrary, ThreadCollectArbitrary}; -use crate::orch::{Orchestrator, ParHandle, ParScope}; +use crate::orch::{Orchestrator, ParHandle, ParScope, ParThreadPool}; use crate::runner::ParallelRunner; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use orx_concurrent_bag::ConcurrentBag; @@ -29,7 +29,7 @@ where let capacity_bound = pinned_vec.capacity_bound(); let offset = pinned_vec.len(); - let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); + let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); let mut bag: ConcurrentBag = pinned_vec.into(); match iter.try_get_len() { @@ -43,7 +43,7 @@ where let shared_state = &state; let mut num_spawned = 0; - C::scope(|s| { + orchestrator.thread_pool().scope(|s| { while runner.do_spawn_new(num_spawned, shared_state, &iter) { num_spawned += 1; s.spawn(|| { @@ -81,7 +81,7 @@ where let capacity_bound = pinned_vec.capacity_bound(); let offset = pinned_vec.len(); - let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); + let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); let mut bag: ConcurrentBag = pinned_vec.into(); match iter.try_get_len() { @@ -95,7 +95,7 @@ where let shared_state = &state; let mut num_spawned = 0; - let result: ThreadCollectArbitrary = C::scope(|s| { + let result: ThreadCollectArbitrary = orchestrator.thread_pool().scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index 21c95c3..5e60cc9 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect, ThreadCollect}; -use crate::orch::{Orchestrator, ParHandle, ParScope}; +use crate::orch::{Orchestrator, ParHandle, ParScope, ParThreadPool}; use crate::runner::parallel_runner::ParallelRunner; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use orx_concurrent_iter::ConcurrentIter; @@ -25,7 +25,7 @@ where P: IntoConcurrentPinnedVec, { let offset = pinned_vec.len(); - let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); + let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); let o_bag: ConcurrentOrderedBag = pinned_vec.into(); @@ -35,7 +35,7 @@ where let mut num_spawned = 0; - C::scope(|s| { + orchestrator.thread_pool().scope(|s| { while runner.do_spawn_new(num_spawned, shared_state, &iter) { num_spawned += 1; s.spawn(|| { @@ -73,7 +73,7 @@ where X1: Fn(I::Item) -> Vo + Sync, P: IntoConcurrentPinnedVec, { - let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); + let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); // compute let state = runner.new_shared_state(); @@ -81,7 +81,7 @@ where let mut num_spawned = 0; let result: Result>, ::Error> = - C::scope(|s| { + orchestrator.thread_pool().scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index ed8c16f..0bef436 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -1,6 +1,6 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; -use crate::orch::{Orchestrator, ParHandle, ParScope}; +use crate::orch::{Orchestrator, ParHandle, ParScope, ParThreadPool}; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; @@ -12,13 +12,13 @@ where O: Send, M1: Fn(I::Item) -> O + Sync, { - let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); + let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); let shared_state = &state; let mut num_spawned = 0; - let results = C::scope(|s| { + let results = orchestrator.thread_pool().scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { @@ -65,13 +65,13 @@ where Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, { - let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); + let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); let shared_state = &state; let mut num_spawned = 0; - let result: Result>, _> = C::scope(|s| { + let result: Result>, _> = orchestrator.thread_pool().scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index 16174b7..8ba5472 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -1,6 +1,6 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; -use crate::orch::{Orchestrator, ParHandle, ParScope}; +use crate::orch::{Orchestrator, ParHandle, ParScope, ParThreadPool}; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; @@ -12,13 +12,13 @@ where O: Send, M1: Fn(I::Item) -> O + Sync, { - let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); + let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); let shared_state = &state; let mut num_spawned = 0; - let result = C::scope(|s| { + let result = orchestrator.thread_pool().scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { @@ -58,13 +58,13 @@ where Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, { - let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); + let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); let shared_state = &state; let mut num_spawned = 0; - let result = C::scope(|s| { + let result = orchestrator.thread_pool().scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index e7963da..4538a6f 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -1,6 +1,6 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Reduce}; -use crate::orch::{Orchestrator, ParHandle, ParScope}; +use crate::orch::{Orchestrator, ParHandle, ParScope, ParThreadPool}; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; @@ -21,13 +21,13 @@ where Red: Fn(O, O) -> O + Sync, O: Send, { - let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); + let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); let shared_state = &state; let mut num_spawned = 0; - let results = C::scope(|s| { + let results = orchestrator.thread_pool().scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { @@ -77,13 +77,13 @@ where X1: Fn(I::Item) -> Vo + Sync, Red: Fn(Vo::Item, Vo::Item) -> Vo::Item + Sync, { - let runner = orchestrator.new_runner(ComputationKind::Collect, params, iter.try_get_len()); + let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); let shared_state = &state; let mut num_spawned = 0; - let result: Result, _> = C::scope(|s| { + let result: Result, _> = orchestrator.thread_pool().scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { From 9591fce4a3904e5a50e3bf99b97850a23c9442f3 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 11 Sep 2025 17:53:06 +0200 Subject: [PATCH 080/264] reorganize default os pool --- .../default_std_orchestrator.rs | 6 +-- src/orch/thread_pool/implementations/mod.rs | 5 +- .../std_default_thread_pool.rs | 46 ------------------- .../implementations/std_os_thread_pool.rs | 18 ++++++++ .../implementations/std_scoped_threads.rs | 27 +++++++++++ 5 files changed, 51 insertions(+), 51 deletions(-) delete mode 100644 src/orch/thread_pool/implementations/std_default_thread_pool.rs create mode 100644 src/orch/thread_pool/implementations/std_os_thread_pool.rs create mode 100644 src/orch/thread_pool/implementations/std_scoped_threads.rs diff --git a/src/orch/implementations/default_std_orchestrator.rs b/src/orch/implementations/default_std_orchestrator.rs index 1221f14..a996ab2 100644 --- a/src/orch/implementations/default_std_orchestrator.rs +++ b/src/orch/implementations/default_std_orchestrator.rs @@ -1,15 +1,15 @@ use crate::{ DefaultRunner, - orch::{Orchestrator, thread_pool::implementations::StdDefaultThreadPool}, + orch::{Orchestrator, thread_pool::implementations::StdOsThreadPool}, }; #[derive(Default)] -pub struct DefaultStdOrchestrator(StdDefaultThreadPool); +pub struct DefaultStdOrchestrator(StdOsThreadPool); impl Orchestrator for DefaultStdOrchestrator { type Runner = DefaultRunner; - type ThreadPool = StdDefaultThreadPool; + type ThreadPool = StdOsThreadPool; fn thread_pool(&self) -> &Self::ThreadPool { &self.0 diff --git a/src/orch/thread_pool/implementations/mod.rs b/src/orch/thread_pool/implementations/mod.rs index e06ed95..4785a33 100644 --- a/src/orch/thread_pool/implementations/mod.rs +++ b/src/orch/thread_pool/implementations/mod.rs @@ -1,3 +1,4 @@ -mod std_default_thread_pool; +mod std_os_thread_pool; +mod std_scoped_threads; -pub use std_default_thread_pool::StdDefaultThreadPool; +pub use std_os_thread_pool::StdOsThreadPool; diff --git a/src/orch/thread_pool/implementations/std_default_thread_pool.rs b/src/orch/thread_pool/implementations/std_default_thread_pool.rs deleted file mode 100644 index afcf9bb..0000000 --- a/src/orch/thread_pool/implementations/std_default_thread_pool.rs +++ /dev/null @@ -1,46 +0,0 @@ -use crate::orch::{ParHandle, ParScope, ParThreadPool, thread_pool::par_handle::JoinResult}; - -pub struct StdHandle<'scope, T>(std::thread::ScopedJoinHandle<'scope, T>); - -impl<'scope, T> ParHandle<'scope, T> for StdHandle<'scope, T> { - fn join(self) -> JoinResult { - self.0.join() - } - - fn is_finished(&self) -> bool { - self.0.is_finished() - } -} - -impl<'scope, 'env> ParScope<'scope, 'env> for std::thread::Scope<'scope, 'env> { - type Handle - = StdHandle<'scope, T> - where - Self: 'scope, - T: 'scope; - - fn spawn(&'scope self, f: F) -> Self::Handle - where - F: FnOnce() -> T + Send + 'scope, - T: Send + 'scope, - { - StdHandle(self.spawn(f)) - } -} - -#[derive(Default)] -pub struct StdDefaultThreadPool; - -impl ParThreadPool for StdDefaultThreadPool { - type Scope<'scope, 'env> - = std::thread::Scope<'scope, 'env> - where - 'env: 'scope; - - fn scope<'env, F, T>(&'env self, f: F) -> T - where - F: for<'scope> FnOnce(&'scope std::thread::Scope<'scope, 'env>) -> T, - { - std::thread::scope(f) - } -} diff --git a/src/orch/thread_pool/implementations/std_os_thread_pool.rs b/src/orch/thread_pool/implementations/std_os_thread_pool.rs new file mode 100644 index 0000000..0f79fc5 --- /dev/null +++ b/src/orch/thread_pool/implementations/std_os_thread_pool.rs @@ -0,0 +1,18 @@ +use crate::orch::ParThreadPool; + +#[derive(Default)] +pub struct StdOsThreadPool; + +impl ParThreadPool for StdOsThreadPool { + type Scope<'scope, 'env> + = std::thread::Scope<'scope, 'env> + where + 'env: 'scope; + + fn scope<'env, F, T>(&'env self, f: F) -> T + where + F: for<'scope> FnOnce(&'scope std::thread::Scope<'scope, 'env>) -> T, + { + std::thread::scope(f) + } +} diff --git a/src/orch/thread_pool/implementations/std_scoped_threads.rs b/src/orch/thread_pool/implementations/std_scoped_threads.rs new file mode 100644 index 0000000..fb9ee20 --- /dev/null +++ b/src/orch/thread_pool/implementations/std_scoped_threads.rs @@ -0,0 +1,27 @@ +use crate::orch::{ParHandle, ParScope, thread_pool::par_handle::JoinResult}; + +impl<'scope, T> ParHandle<'scope, T> for std::thread::ScopedJoinHandle<'scope, T> { + fn join(self) -> JoinResult { + std::thread::ScopedJoinHandle::join(self) + } + + fn is_finished(&self) -> bool { + std::thread::ScopedJoinHandle::is_finished(self) + } +} + +impl<'scope, 'env> ParScope<'scope, 'env> for std::thread::Scope<'scope, 'env> { + type Handle + = std::thread::ScopedJoinHandle<'scope, T> + where + Self: 'scope, + T: 'scope; + + fn spawn(&'scope self, f: F) -> Self::Handle + where + F: FnOnce() -> T + Send + 'scope, + T: Send + 'scope, + { + self.spawn(f) + } +} From 0533c94b87c91ca9b9c124663c55ca70e3364ca3 Mon Sep 17 00:00:00 2001 From: orxfun Date: Fri, 12 Sep 2025 11:15:43 +0200 Subject: [PATCH 081/264] pool wip --- Cargo.toml | 5 +- .../implementations/impl_scoped_threadpool.rs | 82 +++++++++++++++++++ .../implementations/impl_threadpool.rs | 62 ++++++++++++++ src/orch/thread_pool/implementations/mod.rs | 6 ++ 4 files changed, 154 insertions(+), 1 deletion(-) create mode 100644 src/orch/thread_pool/implementations/impl_scoped_threadpool.rs create mode 100644 src/orch/thread_pool/implementations/impl_threadpool.rs diff --git a/Cargo.toml b/Cargo.toml index 91c6140..31da669 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,6 +22,9 @@ orx-iterable = { version = "1.3.0", default-features = false } orx-priority-queue = { version = "1.7.0", default-features = false } orx-concurrent-iter = { version = "3.1.0", default-features = false } rayon = { version = "1.10.0", optional = true } +# optional thread pool dependencies +threadpool = { version = "1.8.1", optional = true } +scoped_threadpool = { version = "0.1.9", optional = true } [dev-dependencies] chrono = "0.4.39" @@ -39,5 +42,5 @@ name = "reduce_map_filter" harness = false [features] -default = [] +default = ["scoped_threadpool"] generic_iterator = ["rayon"] diff --git a/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs b/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs new file mode 100644 index 0000000..0768979 --- /dev/null +++ b/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs @@ -0,0 +1,82 @@ +use crate::orch::{ParHandle, ParScope, ParThreadPool, thread_pool::par_handle::JoinResult}; +use orx_concurrent_bag::ConcurrentBag; +use scoped_threadpool::{Pool, Scope}; + +pub struct ThreadPoolHandle<'scope, T> { + idx: usize, + result: Option, + bag: &'scope ConcurrentBag, +} + +impl<'scope, T> ParHandle<'scope, T> for ThreadPoolHandle<'scope, T> { + fn join(self) -> JoinResult { + todo!() + } + + fn is_finished(&self) -> bool { + todo!() + } +} + +pub struct ScopedThreadPoolScope<'scope, 'env> +where + 'env: 'scope, +{ + inner_scope: Scope<'env, 'scope>, + bag: ConcurrentBag, +} + +impl<'scope, 'env> ParScope<'scope, 'env> for ScopedThreadPoolScope<'scope, 'env> +where + 'env: 'scope, +{ + type Handle + = ThreadPoolHandle<'scope, T> + where + Self: 'scope, + T: 'scope; + + fn spawn(&'scope self, f: F) -> Self::Handle + where + F: FnOnce() -> T + Send + 'scope, + T: Send + 'scope, + { + todo!() + } +} + +// impl ParThreadPool for Pool { +// type Scope<'scope, 'env> +// = ScopedThreadPoolScope<'scope, 'env> +// where +// 'env: 'scope; + +// fn scope<'env, F, T>(&'env self, f: F) -> T +// where +// F: for<'scope> FnOnce(&'scope ScopedThreadPoolScope<'scope, 'env>) -> T, +// { +// self.scoped(|s| {}); +// todo!() +// } +// } + +fn main() { + // Create a threadpool holding 4 threads + let mut pool = Pool::new(4); + + let mut vec = vec![0, 1, 2, 3, 4, 5, 6, 7]; + + // Use the threads as scoped threads that can + // reference anything outside this closure + pool.scoped(|scoped| { + // Create references to each element in the vector ... + for e in &mut vec { + // ... and add 1 to it in a seperate thread + scoped.execute(move || { + *e += 1; + }); + } + }); + + assert_eq!(vec, vec![1, 2, 3, 4, 5, 6, 7, 8]); +} diff --git a/src/orch/thread_pool/implementations/impl_threadpool.rs b/src/orch/thread_pool/implementations/impl_threadpool.rs new file mode 100644 index 0000000..a4ab0bb --- /dev/null +++ b/src/orch/thread_pool/implementations/impl_threadpool.rs @@ -0,0 +1,62 @@ +use crate::orch::{ParHandle, ParScope, ParThreadPool, thread_pool::par_handle::JoinResult}; +use orx_concurrent_bag::ConcurrentBag; +use std::marker::PhantomData; +use threadpool::ThreadPool; + +pub struct ThreadPoolHandle<'scope, T> { + idx: usize, + result: Option, + bag: &'scope ConcurrentBag, +} + +impl<'scope, T> ParHandle<'scope, T> for ThreadPoolHandle<'scope, T> { + fn join(self) -> JoinResult { + todo!() + } + + fn is_finished(&self) -> bool { + todo!() + } +} + +pub struct ThreadPoolScope<'scope, 'env> +where + 'env: 'scope, +{ + pool: &'env ThreadPool, + bag: ConcurrentBag, + p: PhantomData<&'scope ()>, +} + +impl<'scope, 'env> ParScope<'scope, 'env> for ThreadPoolScope<'scope, 'env> +where + 'env: 'scope, +{ + type Handle + = ThreadPoolHandle<'scope, T> + where + Self: 'scope, + T: 'scope; + + fn spawn(&'scope self, f: F) -> Self::Handle + where + F: FnOnce() -> T + Send + 'scope, + T: Send + 'scope, + { + todo!() + } +} + +impl ParThreadPool for ThreadPool { + type Scope<'scope, 'env> + = ThreadPoolScope<'scope, 'env> + where + 'env: 'scope; + + fn scope<'env, F, T>(&'env self, f: F) -> T + where + F: for<'scope> FnOnce(&'scope ThreadPoolScope<'scope, 'env>) -> T, + { + todo!() + } +} diff --git a/src/orch/thread_pool/implementations/mod.rs b/src/orch/thread_pool/implementations/mod.rs index 4785a33..6130842 100644 --- a/src/orch/thread_pool/implementations/mod.rs +++ b/src/orch/thread_pool/implementations/mod.rs @@ -1,4 +1,10 @@ mod std_os_thread_pool; mod std_scoped_threads; +#[cfg(feature = "threadpool")] +mod impl_threadpool; + +#[cfg(feature = "scoped_threadpool")] +mod impl_scoped_threadpool; + pub use std_os_thread_pool::StdOsThreadPool; From 35e63d260f683811f913ab41423b009b62db7af2 Mon Sep 17 00:00:00 2001 From: orxfun Date: Fri, 12 Sep 2025 11:17:52 +0200 Subject: [PATCH 082/264] thread pool gets a mut reference --- src/orch/implementations/default_std_orchestrator.rs | 4 ++-- src/orch/orchestrator.rs | 2 +- src/orch/thread_pool/implementations/std_os_thread_pool.rs | 2 +- src/orch/thread_pool/par_thread_pool.rs | 2 +- src/runner/parallel_runner_compute/collect_arbitrary.rs | 4 ++-- src/runner/parallel_runner_compute/collect_ordered.rs | 4 ++-- src/runner/parallel_runner_compute/next.rs | 4 ++-- src/runner/parallel_runner_compute/next_any.rs | 4 ++-- src/runner/parallel_runner_compute/reduce.rs | 4 ++-- 9 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/orch/implementations/default_std_orchestrator.rs b/src/orch/implementations/default_std_orchestrator.rs index a996ab2..ecbf7e9 100644 --- a/src/orch/implementations/default_std_orchestrator.rs +++ b/src/orch/implementations/default_std_orchestrator.rs @@ -11,7 +11,7 @@ impl Orchestrator for DefaultStdOrchestrator { type ThreadPool = StdOsThreadPool; - fn thread_pool(&self) -> &Self::ThreadPool { - &self.0 + fn thread_pool(&mut self) -> &mut Self::ThreadPool { + &mut self.0 } } diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 7af4233..44e5066 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -13,5 +13,5 @@ pub trait Orchestrator { ::new(kind, params, initial_input_len) } - fn thread_pool(&self) -> &Self::ThreadPool; + fn thread_pool(&mut self) -> &mut Self::ThreadPool; } diff --git a/src/orch/thread_pool/implementations/std_os_thread_pool.rs b/src/orch/thread_pool/implementations/std_os_thread_pool.rs index 0f79fc5..a93f67b 100644 --- a/src/orch/thread_pool/implementations/std_os_thread_pool.rs +++ b/src/orch/thread_pool/implementations/std_os_thread_pool.rs @@ -9,7 +9,7 @@ impl ParThreadPool for StdOsThreadPool { where 'env: 'scope; - fn scope<'env, F, T>(&'env self, f: F) -> T + fn scope<'env, F, T>(&'env mut self, f: F) -> T where F: for<'scope> FnOnce(&'scope std::thread::Scope<'scope, 'env>) -> T, { diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs index 28cc399..bded15f 100644 --- a/src/orch/thread_pool/par_thread_pool.rs +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -5,7 +5,7 @@ pub trait ParThreadPool { where 'env: 'scope; - fn scope<'env, F, T>(&'env self, f: F) -> T + fn scope<'env, F, T>(&'env mut self, f: F) -> T where F: for<'scope> FnOnce(&'scope Self::Scope<'scope, 'env>) -> T; } diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index f01e7b6..e1c723e 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -12,7 +12,7 @@ use orx_fixed_vec::IntoConcurrentPinnedVec; #[cfg(test)] pub fn m( - orchestrator: C, + mut orchestrator: C, params: Params, iter: I, map1: M1, @@ -64,7 +64,7 @@ where // x pub fn x( - orchestrator: C, + mut orchestrator: C, params: Params, iter: I, xap1: X1, diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index 5e60cc9..ceced76 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -11,7 +11,7 @@ use orx_fixed_vec::IntoConcurrentPinnedVec; // m pub fn m( - orchestrator: C, + mut orchestrator: C, params: Params, iter: I, map1: M1, @@ -58,7 +58,7 @@ where // x pub fn x( - orchestrator: C, + mut orchestrator: C, params: Params, iter: I, xap1: X1, diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index 0bef436..6b38dd7 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -5,7 +5,7 @@ use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; -pub fn m(orchestrator: C, params: Params, iter: I, map1: M1) -> (usize, Option) +pub fn m(mut orchestrator: C, params: Params, iter: I, map1: M1) -> (usize, Option) where C: Orchestrator, I: ConcurrentIter, @@ -53,7 +53,7 @@ type ResultNext = Result< >; pub fn x( - orchestrator: C, + mut orchestrator: C, params: Params, iter: I, xap1: X1, diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index 8ba5472..7d1b6f2 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -5,7 +5,7 @@ use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; -pub fn m(orchestrator: C, params: Params, iter: I, map1: M1) -> (usize, Option) +pub fn m(mut orchestrator: C, params: Params, iter: I, map1: M1) -> (usize, Option) where C: Orchestrator, I: ConcurrentIter, @@ -46,7 +46,7 @@ type ResultNextAny = Result::Item>, <::Fallibility as Fallibility>::Error>; pub fn x( - orchestrator: C, + mut orchestrator: C, params: Params, iter: I, xap1: X1, diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index 4538a6f..fffa44a 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -8,7 +8,7 @@ use orx_concurrent_iter::ConcurrentIter; // m pub fn m( - orchestrator: C, + mut orchestrator: C, params: Params, iter: I, map1: M1, @@ -63,7 +63,7 @@ type ResultReduce = Result::Item>, <::Fallibility as Fallibility>::Error>; pub fn x( - orchestrator: C, + mut orchestrator: C, params: Params, iter: I, xap1: X1, From b57f765edc8b3a02f812b74837e174241fee8591 Mon Sep 17 00:00:00 2001 From: orxfun Date: Fri, 12 Sep 2025 12:01:20 +0200 Subject: [PATCH 083/264] wip --- Cargo.toml | 2 +- .../implementations/impl_rayon_threadpool.rs | 52 +++++++++++++++++++ .../implementations/impl_scoped_threadpool.rs | 44 ++++++++-------- .../implementations/impl_threadpool.rs | 2 +- src/orch/thread_pool/implementations/mod.rs | 3 ++ 5 files changed, 80 insertions(+), 23 deletions(-) create mode 100644 src/orch/thread_pool/implementations/impl_rayon_threadpool.rs diff --git a/Cargo.toml b/Cargo.toml index 31da669..9da1175 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,5 +42,5 @@ name = "reduce_map_filter" harness = false [features] -default = ["scoped_threadpool"] +default = ["threadpool", "scoped_threadpool", "rayon"] generic_iterator = ["rayon"] diff --git a/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs b/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs new file mode 100644 index 0000000..bfb4cce --- /dev/null +++ b/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs @@ -0,0 +1,52 @@ +use crate::orch::{ParHandle, ParScope, ParThreadPool, thread_pool::par_handle::JoinResult}; +use orx_concurrent_bag::ConcurrentBag; +use rayon::{Scope, ThreadPool}; + +pub struct ThreadPoolHandle<'scope, T> { + idx: usize, + result: Option, + bag: &'scope ConcurrentBag, +} + +impl<'scope, T> ParHandle<'scope, T> for ThreadPoolHandle<'scope, T> { + fn join(self) -> JoinResult { + todo!() + } + + fn is_finished(&self) -> bool { + todo!() + } +} + +impl<'scope, 'env> ParScope<'scope, 'env> for Scope<'scope> +where + 'env: 'scope, +{ + type Handle + = ThreadPoolHandle<'scope, T> + where + Self: 'scope, + T: 'scope; + + fn spawn(&'scope self, f: F) -> Self::Handle + where + F: FnOnce() -> T + Send + 'scope, + T: Send + 'scope, + { + todo!() + } +} + +// impl ParThreadPool for ThreadPool { +// type Scope<'scope, 'env> +// = Scope<'scope> +// where +// 'env: 'scope; + +// fn scope<'env, F, T>(&'env mut self, f: F) -> T +// where +// F: for<'scope> FnOnce(&'scope Scope<'scope>) -> T, +// { +// todo!() +// } +// } diff --git a/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs b/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs index 0768979..919af5d 100644 --- a/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs +++ b/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs @@ -18,15 +18,7 @@ impl<'scope, T> ParHandle<'scope, T> for ThreadPoolHandle<'scope, T> { } } -pub struct ScopedThreadPoolScope<'scope, 'env> -where - 'env: 'scope, -{ - inner_scope: Scope<'env, 'scope>, - bag: ConcurrentBag, -} - -impl<'scope, 'env> ParScope<'scope, 'env> for ScopedThreadPoolScope<'scope, 'env> +impl<'scope, 'env> ParScope<'scope, 'env> for Scope<'env, 'scope> where 'env: 'scope, { @@ -45,21 +37,31 @@ where } } -// impl ParThreadPool for Pool { -// type Scope<'scope, 'env> -// = ScopedThreadPoolScope<'scope, 'env> -// where -// 'env: 'scope; +impl ParThreadPool for Pool { + type Scope<'scope, 'env> + = Scope<'env, 'scope> + where + 'env: 'scope; -// fn scope<'env, F, T>(&'env self, f: F) -> T -// where -// F: for<'scope> FnOnce(&'scope ScopedThreadPoolScope<'scope, 'env>) -> T, -// { -// self.scoped(|s| {}); -// todo!() -// } + fn scope<'env, F, T>(&'env mut self, f: F) -> T + where + F: for<'scope> FnOnce(&'scope Scope<'env, 'scope>) -> T, + { + // self.scoped(f); + todo!() + } +} + +// fn turn<'scope, 'env: 'scope, T>( +// f: impl FnOnce(&'scope Scope<'env, 'scope>) -> T, +// ) -> impl FnOnce(&Scope<'env, 'scope>) -> T { +// f // } +// pub fn scoped<'pool, 'scope, F, R>(&'pool mut self, f: F) -> R +// where +// F: FnOnce(&Scope<'pool, 'scope>) -> R, + fn main() { // Create a threadpool holding 4 threads let mut pool = Pool::new(4); diff --git a/src/orch/thread_pool/implementations/impl_threadpool.rs b/src/orch/thread_pool/implementations/impl_threadpool.rs index a4ab0bb..28812c3 100644 --- a/src/orch/thread_pool/implementations/impl_threadpool.rs +++ b/src/orch/thread_pool/implementations/impl_threadpool.rs @@ -53,7 +53,7 @@ impl ParThreadPool for ThreadPool { where 'env: 'scope; - fn scope<'env, F, T>(&'env self, f: F) -> T + fn scope<'env, F, T>(&'env mut self, f: F) -> T where F: for<'scope> FnOnce(&'scope ThreadPoolScope<'scope, 'env>) -> T, { diff --git a/src/orch/thread_pool/implementations/mod.rs b/src/orch/thread_pool/implementations/mod.rs index 6130842..ef9954c 100644 --- a/src/orch/thread_pool/implementations/mod.rs +++ b/src/orch/thread_pool/implementations/mod.rs @@ -7,4 +7,7 @@ mod impl_threadpool; #[cfg(feature = "scoped_threadpool")] mod impl_scoped_threadpool; +#[cfg(feature = "rayon")] +mod impl_rayon_threadpool; + pub use std_os_thread_pool::StdOsThreadPool; From 03f5ab5d3af8515ef4264fc36ab72497de23f013 Mon Sep 17 00:00:00 2001 From: orxfun Date: Fri, 12 Sep 2025 16:10:04 +0200 Subject: [PATCH 084/264] immut --- src/orch/thread_pool/implementations/impl_scoped_threadpool.rs | 2 +- src/orch/thread_pool/implementations/impl_threadpool.rs | 2 +- src/orch/thread_pool/implementations/std_os_thread_pool.rs | 2 +- src/orch/thread_pool/par_thread_pool.rs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs b/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs index 919af5d..8500bcb 100644 --- a/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs +++ b/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs @@ -43,7 +43,7 @@ impl ParThreadPool for Pool { where 'env: 'scope; - fn scope<'env, F, T>(&'env mut self, f: F) -> T + fn scope<'env, F, T>(&'env self, f: F) -> T where F: for<'scope> FnOnce(&'scope Scope<'env, 'scope>) -> T, { diff --git a/src/orch/thread_pool/implementations/impl_threadpool.rs b/src/orch/thread_pool/implementations/impl_threadpool.rs index 28812c3..a4ab0bb 100644 --- a/src/orch/thread_pool/implementations/impl_threadpool.rs +++ b/src/orch/thread_pool/implementations/impl_threadpool.rs @@ -53,7 +53,7 @@ impl ParThreadPool for ThreadPool { where 'env: 'scope; - fn scope<'env, F, T>(&'env mut self, f: F) -> T + fn scope<'env, F, T>(&'env self, f: F) -> T where F: for<'scope> FnOnce(&'scope ThreadPoolScope<'scope, 'env>) -> T, { diff --git a/src/orch/thread_pool/implementations/std_os_thread_pool.rs b/src/orch/thread_pool/implementations/std_os_thread_pool.rs index a93f67b..0f79fc5 100644 --- a/src/orch/thread_pool/implementations/std_os_thread_pool.rs +++ b/src/orch/thread_pool/implementations/std_os_thread_pool.rs @@ -9,7 +9,7 @@ impl ParThreadPool for StdOsThreadPool { where 'env: 'scope; - fn scope<'env, F, T>(&'env mut self, f: F) -> T + fn scope<'env, F, T>(&'env self, f: F) -> T where F: for<'scope> FnOnce(&'scope std::thread::Scope<'scope, 'env>) -> T, { diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs index bded15f..28cc399 100644 --- a/src/orch/thread_pool/par_thread_pool.rs +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -5,7 +5,7 @@ pub trait ParThreadPool { where 'env: 'scope; - fn scope<'env, F, T>(&'env mut self, f: F) -> T + fn scope<'env, F, T>(&'env self, f: F) -> T where F: for<'scope> FnOnce(&'scope Self::Scope<'scope, 'env>) -> T; } From 2308366e7a6cf8105747dc24d3e951f10675a6cd Mon Sep 17 00:00:00 2001 From: orxfun Date: Sat, 13 Sep 2025 22:36:26 +0200 Subject: [PATCH 085/264] wip --- .../default_std_orchestrator.rs | 6 +- .../implementations/impl_scoped_threadpool.rs | 4 +- .../implementations/impl_threadpool.rs | 26 +++---- src/orch/thread_pool/implementations/mod.rs | 16 ++-- .../implementations/std_default_pool.rs | 41 ++++++++++ .../implementations/std_os_thread_pool.rs | 18 ----- src/orch/thread_pool/par_thread_pool.rs | 27 ++++++- .../collect_arbitrary.rs | 75 ++++++++++--------- .../collect_ordered.rs | 4 +- src/runner/parallel_runner_compute/next.rs | 4 +- .../parallel_runner_compute/next_any.rs | 4 +- src/runner/parallel_runner_compute/reduce.rs | 4 +- 12 files changed, 137 insertions(+), 92 deletions(-) create mode 100644 src/orch/thread_pool/implementations/std_default_pool.rs delete mode 100644 src/orch/thread_pool/implementations/std_os_thread_pool.rs diff --git a/src/orch/implementations/default_std_orchestrator.rs b/src/orch/implementations/default_std_orchestrator.rs index ecbf7e9..7e117e7 100644 --- a/src/orch/implementations/default_std_orchestrator.rs +++ b/src/orch/implementations/default_std_orchestrator.rs @@ -1,15 +1,15 @@ use crate::{ DefaultRunner, - orch::{Orchestrator, thread_pool::implementations::StdOsThreadPool}, + orch::{Orchestrator, thread_pool::implementations::StdDefaultPool}, }; #[derive(Default)] -pub struct DefaultStdOrchestrator(StdOsThreadPool); +pub struct DefaultStdOrchestrator(StdDefaultPool); impl Orchestrator for DefaultStdOrchestrator { type Runner = DefaultRunner; - type ThreadPool = StdOsThreadPool; + type ThreadPool = StdDefaultPool; fn thread_pool(&mut self) -> &mut Self::ThreadPool { &mut self.0 diff --git a/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs b/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs index 8500bcb..965f103 100644 --- a/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs +++ b/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs @@ -38,12 +38,12 @@ where } impl ParThreadPool for Pool { - type Scope<'scope, 'env> + type ScopeZzz<'scope, 'env> = Scope<'env, 'scope> where 'env: 'scope; - fn scope<'env, F, T>(&'env self, f: F) -> T + fn scope_zzz<'env, F, T>(&'env self, f: F) -> T where F: for<'scope> FnOnce(&'scope Scope<'env, 'scope>) -> T, { diff --git a/src/orch/thread_pool/implementations/impl_threadpool.rs b/src/orch/thread_pool/implementations/impl_threadpool.rs index a4ab0bb..76f8b01 100644 --- a/src/orch/thread_pool/implementations/impl_threadpool.rs +++ b/src/orch/thread_pool/implementations/impl_threadpool.rs @@ -47,16 +47,16 @@ where } } -impl ParThreadPool for ThreadPool { - type Scope<'scope, 'env> - = ThreadPoolScope<'scope, 'env> - where - 'env: 'scope; - - fn scope<'env, F, T>(&'env self, f: F) -> T - where - F: for<'scope> FnOnce(&'scope ThreadPoolScope<'scope, 'env>) -> T, - { - todo!() - } -} +// impl ParThreadPool for ThreadPool { +// type ScopeZzz<'scope, 'env> +// = ThreadPoolScope<'scope, 'env> +// where +// 'env: 'scope; + +// fn scope_zzz<'env, F, T>(&'env self, f: F) -> T +// where +// F: for<'scope> FnOnce(&'scope ThreadPoolScope<'scope, 'env>) -> T, +// { +// todo!() +// } +// } diff --git a/src/orch/thread_pool/implementations/mod.rs b/src/orch/thread_pool/implementations/mod.rs index ef9954c..2653be5 100644 --- a/src/orch/thread_pool/implementations/mod.rs +++ b/src/orch/thread_pool/implementations/mod.rs @@ -1,13 +1,13 @@ -mod std_os_thread_pool; +mod std_default_pool; mod std_scoped_threads; -#[cfg(feature = "threadpool")] -mod impl_threadpool; +// #[cfg(feature = "threadpool")] +// mod impl_threadpool; -#[cfg(feature = "scoped_threadpool")] -mod impl_scoped_threadpool; +// #[cfg(feature = "scoped_threadpool")] +// mod impl_scoped_threadpool; -#[cfg(feature = "rayon")] -mod impl_rayon_threadpool; +// #[cfg(feature = "rayon")] +// mod impl_rayon_threadpool; -pub use std_os_thread_pool::StdOsThreadPool; +pub use std_default_pool::StdDefaultPool; diff --git a/src/orch/thread_pool/implementations/std_default_pool.rs b/src/orch/thread_pool/implementations/std_default_pool.rs new file mode 100644 index 0000000..b6d8d2c --- /dev/null +++ b/src/orch/thread_pool/implementations/std_default_pool.rs @@ -0,0 +1,41 @@ +use crate::orch::ParThreadPool; + +#[derive(Default)] +pub struct StdDefaultPool; + +impl ParThreadPool for StdDefaultPool { + type ScopeZzz<'scope, 'env> + = std::thread::Scope<'scope, 'env> + where + 'env: 'scope; + + fn scope_zzz<'env, F, T>(&'env self, f: F) -> T + where + F: for<'scope> FnOnce(&'scope std::thread::Scope<'scope, 'env>) -> T, + { + std::thread::scope(f) + } + + type ScopeRef<'s, 'env, 'scope> + = &'s std::thread::Scope<'s, 'env> + where + 'scope: 's, + 'env: 'scope + 's; + + fn scope<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(&'s std::thread::Scope<'s, 'env>) + Send, + { + std::thread::scope(|s| f(&s)) + } + + fn run_in_scope2<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Sync + 'scope + 'env, + { + s.spawn(|| work()); + } +} diff --git a/src/orch/thread_pool/implementations/std_os_thread_pool.rs b/src/orch/thread_pool/implementations/std_os_thread_pool.rs deleted file mode 100644 index 0f79fc5..0000000 --- a/src/orch/thread_pool/implementations/std_os_thread_pool.rs +++ /dev/null @@ -1,18 +0,0 @@ -use crate::orch::ParThreadPool; - -#[derive(Default)] -pub struct StdOsThreadPool; - -impl ParThreadPool for StdOsThreadPool { - type Scope<'scope, 'env> - = std::thread::Scope<'scope, 'env> - where - 'env: 'scope; - - fn scope<'env, F, T>(&'env self, f: F) -> T - where - F: for<'scope> FnOnce(&'scope std::thread::Scope<'scope, 'env>) -> T, - { - std::thread::scope(f) - } -} diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs index 28cc399..fba7c7b 100644 --- a/src/orch/thread_pool/par_thread_pool.rs +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -1,11 +1,32 @@ use super::par_scope::ParScope; pub trait ParThreadPool { - type Scope<'scope, 'env>: ParScope<'scope, 'env> + type ScopeZzz<'scope, 'env>: ParScope<'scope, 'env> where 'env: 'scope; - fn scope<'env, F, T>(&'env self, f: F) -> T + fn scope_zzz<'env, F, T>(&'env self, f: F) -> T where - F: for<'scope> FnOnce(&'scope Self::Scope<'scope, 'env>) -> T; + F: for<'scope> FnOnce(&'scope Self::ScopeZzz<'scope, 'env>) -> T; + + type ScopeRef<'s, 'env, 'scope> + where + 'scope: 's, + 'env: 'scope + 's; + + fn run_in_scope2<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Sync + 'scope + 'env, + { + } + + fn scope<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(Self::ScopeRef<'s, 'env, 'scope>) + Send, + { + todo!() + } } diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index e1c723e..473e6f3 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -43,7 +43,7 @@ where let shared_state = &state; let mut num_spawned = 0; - orchestrator.thread_pool().scope(|s| { + orchestrator.thread_pool().scope_zzz(|s| { while runner.do_spawn_new(num_spawned, shared_state, &iter) { num_spawned += 1; s.spawn(|| { @@ -95,50 +95,51 @@ where let shared_state = &state; let mut num_spawned = 0; - let result: ThreadCollectArbitrary = orchestrator.thread_pool().scope(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned += 1; - handles.push(s.spawn(|| { - thread::collect_arbitrary::x( - runner.new_thread_runner(shared_state), - &iter, - shared_state, - &xap1, - &bag, - ) - })); - } - - let mut early_exit_result = None; - while !handles.is_empty() { - let mut finished_idx = None; - for (h, handle) in handles.iter().enumerate() { - if handle.is_finished() { - finished_idx = Some(h); - break; - } + let result: ThreadCollectArbitrary = + orchestrator.thread_pool().scope_zzz(|s| { + let mut handles = vec![]; + + while runner.do_spawn_new(num_spawned, shared_state, &iter) { + num_spawned += 1; + handles.push(s.spawn(|| { + thread::collect_arbitrary::x( + runner.new_thread_runner(shared_state), + &iter, + shared_state, + &xap1, + &bag, + ) + })); } - if let Some(h) = finished_idx { - let handle = handles.remove(h); - let result = handle.join().expect("failed to join the thread"); - match &result { - ThreadCollectArbitrary::AllCollected => {} - ThreadCollectArbitrary::StoppedByError { error: _ } => { - early_exit_result = Some(result); + let mut early_exit_result = None; + while !handles.is_empty() { + let mut finished_idx = None; + for (h, handle) in handles.iter().enumerate() { + if handle.is_finished() { + finished_idx = Some(h); break; } - ThreadCollectArbitrary::StoppedByWhileCondition => { - early_exit_result = Some(result); + } + + if let Some(h) = finished_idx { + let handle = handles.remove(h); + let result = handle.join().expect("failed to join the thread"); + match &result { + ThreadCollectArbitrary::AllCollected => {} + ThreadCollectArbitrary::StoppedByError { error: _ } => { + early_exit_result = Some(result); + break; + } + ThreadCollectArbitrary::StoppedByWhileCondition => { + early_exit_result = Some(result); + } } } } - } - early_exit_result.unwrap_or(ThreadCollectArbitrary::AllCollected) - }); + early_exit_result.unwrap_or(ThreadCollectArbitrary::AllCollected) + }); ( num_spawned, diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index ceced76..49fb693 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -35,7 +35,7 @@ where let mut num_spawned = 0; - orchestrator.thread_pool().scope(|s| { + orchestrator.thread_pool().scope_zzz(|s| { while runner.do_spawn_new(num_spawned, shared_state, &iter) { num_spawned += 1; s.spawn(|| { @@ -81,7 +81,7 @@ where let mut num_spawned = 0; let result: Result>, ::Error> = - orchestrator.thread_pool().scope(|s| { + orchestrator.thread_pool().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index 6b38dd7..e7df3c7 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -18,7 +18,7 @@ where let shared_state = &state; let mut num_spawned = 0; - let results = orchestrator.thread_pool().scope(|s| { + let results = orchestrator.thread_pool().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { @@ -71,7 +71,7 @@ where let shared_state = &state; let mut num_spawned = 0; - let result: Result>, _> = orchestrator.thread_pool().scope(|s| { + let result: Result>, _> = orchestrator.thread_pool().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index 7d1b6f2..709514e 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -18,7 +18,7 @@ where let shared_state = &state; let mut num_spawned = 0; - let result = orchestrator.thread_pool().scope(|s| { + let result = orchestrator.thread_pool().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { @@ -64,7 +64,7 @@ where let shared_state = &state; let mut num_spawned = 0; - let result = orchestrator.thread_pool().scope(|s| { + let result = orchestrator.thread_pool().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index fffa44a..0dafccd 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -27,7 +27,7 @@ where let shared_state = &state; let mut num_spawned = 0; - let results = orchestrator.thread_pool().scope(|s| { + let results = orchestrator.thread_pool().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { @@ -83,7 +83,7 @@ where let shared_state = &state; let mut num_spawned = 0; - let result: Result, _> = orchestrator.thread_pool().scope(|s| { + let result: Result, _> = orchestrator.thread_pool().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { From f15eead7c074ff52b8eac13650dbb5a6daa94f8c Mon Sep 17 00:00:00 2001 From: orxfun Date: Sat, 13 Sep 2025 23:13:50 +0200 Subject: [PATCH 086/264] NumSpawned new type --- .../computations/collect.rs | 26 ++++++++++++++----- src/orch/mod.rs | 2 ++ src/orch/num_spawned.rs | 24 +++++++++++++++++ .../implementations/std_default_pool.rs | 2 +- src/orch/thread_pool/par_thread_pool.rs | 20 +++++++++++++- .../fixed_chunk_runner/parallel_runner.rs | 4 ++- src/runner/parallel_runner.rs | 4 +-- .../collect_arbitrary.rs | 13 +++++----- .../collect_ordered.rs | 14 +++++----- src/runner/parallel_runner_compute/next.rs | 19 +++++++++----- .../parallel_runner_compute/next_any.rs | 19 +++++++++----- src/runner/parallel_runner_compute/reduce.rs | 14 +++++----- src/using/computations/u_map/collect.rs | 5 ++-- src/using/computations/u_map/next.rs | 5 ++-- src/using/computations/u_map/reduce.rs | 3 ++- src/using/computations/u_xap/collect.rs | 5 ++-- src/using/computations/u_xap/next.rs | 5 ++-- src/using/computations/u_xap/reduce.rs | 3 ++- .../u_collect_arbitrary.rs | 17 ++++++------ .../u_collect_ordered.rs | 17 ++++++------ .../runner/parallel_runner_compute/u_next.rs | 17 ++++++------ .../parallel_runner_compute/u_next_any.rs | 17 ++++++------ .../parallel_runner_compute/u_reduce.rs | 21 +++++++++------ 23 files changed, 180 insertions(+), 96 deletions(-) create mode 100644 src/orch/num_spawned.rs diff --git a/src/computational_variants/computations/collect.rs b/src/computational_variants/computations/collect.rs index ef1caa8..fd1330f 100644 --- a/src/computational_variants/computations/collect.rs +++ b/src/computational_variants/computations/collect.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::generic_values::runner_results::{ Fallibility, Infallible, ParallelCollect, ParallelCollectArbitrary, Stop, }; -use crate::orch::Orchestrator; +use crate::orch::{NumSpawned, Orchestrator}; use crate::runner::parallel_runner_compute as prc; use crate::{IterationOrder, generic_values::Values}; use orx_concurrent_iter::ConcurrentIter; @@ -14,7 +14,7 @@ pub fn map_collect_into( iter: I, map1: M1, pinned_vec: P, -) -> (usize, P) +) -> (NumSpawned, P) where R: Orchestrator, I: ConcurrentIter, @@ -23,7 +23,10 @@ where P: IntoConcurrentPinnedVec, { match (params.is_sequential(), params.iteration_order) { - (true, _) => (0, map_collect_into_seq(iter, map1, pinned_vec)), + (true, _) => ( + NumSpawned::zero(), + map_collect_into_seq(iter, map1, pinned_vec), + ), #[cfg(test)] (false, IterationOrder::Arbitrary) => { prc::collect_arbitrary::m(orchestrator, params, iter, map1, pinned_vec) @@ -52,7 +55,7 @@ pub fn xap_collect_into( iter: I, xap1: X1, pinned_vec: P, -) -> (usize, P) +) -> (NumSpawned, P) where R: Orchestrator, I: ConcurrentIter, @@ -62,7 +65,10 @@ where P: IntoConcurrentPinnedVec, { match (params.is_sequential(), params.iteration_order) { - (true, _) => (0, xap_collect_into_seq(iter, xap1, pinned_vec)), + (true, _) => ( + NumSpawned::zero(), + xap_collect_into_seq(iter, xap1, pinned_vec), + ), (false, IterationOrder::Arbitrary) => { let (num_threads, result) = prc::collect_arbitrary::x(orchestrator, params, iter, xap1, pinned_vec); @@ -113,7 +119,10 @@ pub fn xap_try_collect_into( iter: I, xap1: X1, pinned_vec: P, -) -> (usize, Result::Error>) +) -> ( + NumSpawned, + Result::Error>, +) where R: Orchestrator, I: ConcurrentIter, @@ -123,7 +132,10 @@ where P: IntoConcurrentPinnedVec, { match (params.is_sequential(), params.iteration_order) { - (true, _) => (0, xap_try_collect_into_seq(iter, xap1, pinned_vec)), + (true, _) => ( + NumSpawned::zero(), + xap_try_collect_into_seq(iter, xap1, pinned_vec), + ), (false, IterationOrder::Arbitrary) => { let (nt, result) = prc::collect_arbitrary::x(orchestrator, params, iter, xap1, pinned_vec); diff --git a/src/orch/mod.rs b/src/orch/mod.rs index 90963dc..91050ed 100644 --- a/src/orch/mod.rs +++ b/src/orch/mod.rs @@ -1,8 +1,10 @@ mod implementations; +mod num_spawned; mod orchestrator; mod thread_pool; pub use crate::orch::implementations::DefaultStdOrchestrator; +pub use num_spawned::NumSpawned; pub use orchestrator::Orchestrator; pub use thread_pool::{ParHandle, ParScope, ParThreadPool}; diff --git a/src/orch/num_spawned.rs b/src/orch/num_spawned.rs new file mode 100644 index 0000000..e009196 --- /dev/null +++ b/src/orch/num_spawned.rs @@ -0,0 +1,24 @@ +#[derive(Clone, Copy)] +pub struct NumSpawned(usize); + +impl NumSpawned { + pub fn zero() -> Self { + Self(0) + } + + pub fn increment(&mut self) { + self.0 += 1; + } + + pub fn into_inner(self) -> usize { + self.0 + } +} + +impl core::ops::Rem for NumSpawned { + type Output = usize; + + fn rem(self, rhs: Self) -> Self::Output { + self.0 % rhs.0 + } +} diff --git a/src/orch/thread_pool/implementations/std_default_pool.rs b/src/orch/thread_pool/implementations/std_default_pool.rs index b6d8d2c..e885ed2 100644 --- a/src/orch/thread_pool/implementations/std_default_pool.rs +++ b/src/orch/thread_pool/implementations/std_default_pool.rs @@ -30,7 +30,7 @@ impl ParThreadPool for StdDefaultPool { std::thread::scope(|s| f(&s)) } - fn run_in_scope2<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) where 'scope: 's, 'env: 'scope + 's, diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs index fba7c7b..562a5a0 100644 --- a/src/orch/thread_pool/par_thread_pool.rs +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -1,4 +1,5 @@ use super::par_scope::ParScope; +use crate::orch::num_spawned::NumSpawned; pub trait ParThreadPool { type ScopeZzz<'scope, 'env>: ParScope<'scope, 'env> @@ -14,7 +15,7 @@ pub trait ParThreadPool { 'scope: 's, 'env: 'scope + 's; - fn run_in_scope2<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) where 'scope: 's, 'env: 'scope + 's, @@ -29,4 +30,21 @@ pub trait ParThreadPool { { todo!() } + + // derived + + fn run(&mut self, do_spawn: S, work: W) -> NumSpawned + where + S: Fn(NumSpawned) -> bool + Sync, + W: Fn() + Sync, + { + let mut nt = NumSpawned::zero(); + self.scope(|s| { + while do_spawn(nt) { + nt.increment(); + Self::run_in_scope(&s, &work); + } + }); + nt + } } diff --git a/src/runner/fixed_chunk_runner/parallel_runner.rs b/src/runner/fixed_chunk_runner/parallel_runner.rs index 4d2c510..7cb5879 100644 --- a/src/runner/fixed_chunk_runner/parallel_runner.rs +++ b/src/runner/fixed_chunk_runner/parallel_runner.rs @@ -3,6 +3,7 @@ use super::{ thread_runner::FixedChunkThreadRunner, }; use crate::{ + orch::NumSpawned, parameters::Params, runner::{computation_kind::ComputationKind, parallel_runner::ParallelRunner}, }; @@ -92,10 +93,11 @@ impl ParallelRunner for FixedChunkRunner { fn new_shared_state(&self) -> Self::SharedState {} - fn do_spawn_new(&self, num_spawned: usize, _: &Self::SharedState, iter: &I) -> bool + fn do_spawn_new(&self, num_spawned: NumSpawned, _: &Self::SharedState, iter: &I) -> bool where I: ConcurrentIter, { + let num_spawned = num_spawned.into_inner(); if num_spawned % LAG_PERIODICITY == 0 { match self.next_chunk(num_spawned, iter.try_get_len()) { Some(c) => self.current_chunk_size.store(c, Ordering::Relaxed), diff --git a/src/runner/parallel_runner.rs b/src/runner/parallel_runner.rs index 175b597..982462d 100644 --- a/src/runner/parallel_runner.rs +++ b/src/runner/parallel_runner.rs @@ -1,5 +1,5 @@ use super::{computation_kind::ComputationKind, thread_runner::ThreadRunner}; -use crate::parameters::Params; +use crate::{orch::NumSpawned, parameters::Params}; use orx_concurrent_iter::ConcurrentIter; /// A parallel runner which is responsible for taking a computation defined as a composition @@ -25,7 +25,7 @@ pub trait ParallelRunner: Sized + Sync + 'static { /// * `shared_state` is the current parallel execution state. fn do_spawn_new( &self, - num_spawned: usize, + num_spawned: NumSpawned, shared_state: &Self::SharedState, iter: &I, ) -> bool diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 473e6f3..4355c8a 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -1,6 +1,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{ParallelCollectArbitrary, ThreadCollectArbitrary}; +use crate::orch::NumSpawned; use crate::orch::{Orchestrator, ParHandle, ParScope, ParThreadPool}; use crate::runner::ParallelRunner; use crate::runner::{ComputationKind, thread_runner_compute as thread}; @@ -17,7 +18,7 @@ pub fn m( iter: I, map1: M1, pinned_vec: P, -) -> (usize, P) +) -> (NumSpawned, P) where C: Orchestrator, I: ConcurrentIter, @@ -42,10 +43,10 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); orchestrator.thread_pool().scope_zzz(|s| { while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned += 1; + num_spawned.increment(); s.spawn(|| { thread::collect_arbitrary::m( runner.new_thread_runner(shared_state), @@ -69,7 +70,7 @@ pub fn x( iter: I, xap1: X1, pinned_vec: P, -) -> (usize, ParallelCollectArbitrary) +) -> (NumSpawned, ParallelCollectArbitrary) where C: Orchestrator, I: ConcurrentIter, @@ -94,13 +95,13 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); let result: ThreadCollectArbitrary = orchestrator.thread_pool().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned += 1; + num_spawned.increment(); handles.push(s.spawn(|| { thread::collect_arbitrary::x( runner.new_thread_runner(shared_state), diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index 49fb693..0e91a4c 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect, ThreadCollect}; -use crate::orch::{Orchestrator, ParHandle, ParScope, ParThreadPool}; +use crate::orch::{NumSpawned, Orchestrator, ParHandle, ParScope, ParThreadPool}; use crate::runner::parallel_runner::ParallelRunner; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use orx_concurrent_iter::ConcurrentIter; @@ -16,7 +16,7 @@ pub fn m( iter: I, map1: M1, pinned_vec: P, -) -> (usize, P) +) -> (NumSpawned, P) where C: Orchestrator, I: ConcurrentIter, @@ -33,11 +33,11 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); orchestrator.thread_pool().scope_zzz(|s| { while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned += 1; + num_spawned.increment(); s.spawn(|| { thread::collect_ordered::m( runner.new_thread_runner(shared_state), @@ -63,7 +63,7 @@ pub fn x( iter: I, xap1: X1, pinned_vec: P, -) -> (usize, ParallelCollect) +) -> (NumSpawned, ParallelCollect) where C: Orchestrator, I: ConcurrentIter, @@ -79,13 +79,13 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); let result: Result>, ::Error> = orchestrator.thread_pool().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned += 1; + num_spawned.increment(); handles.push(s.spawn(|| { thread::collect_ordered::x( runner.new_thread_runner(shared_state), diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index e7df3c7..dd780f7 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -1,11 +1,16 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; -use crate::orch::{Orchestrator, ParHandle, ParScope, ParThreadPool}; +use crate::orch::{NumSpawned, Orchestrator, ParHandle, ParScope, ParThreadPool}; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; -pub fn m(mut orchestrator: C, params: Params, iter: I, map1: M1) -> (usize, Option) +pub fn m( + mut orchestrator: C, + params: Params, + iter: I, + map1: M1, +) -> (NumSpawned, Option) where C: Orchestrator, I: ConcurrentIter, @@ -17,12 +22,12 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); let results = orchestrator.thread_pool().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned += 1; + num_spawned.increment(); handles.push(s.spawn(|| { thread::next::m( runner.new_thread_runner(shared_state), @@ -57,7 +62,7 @@ pub fn x( params: Params, iter: I, xap1: X1, -) -> (usize, ResultNext) +) -> (NumSpawned, ResultNext) where C: Orchestrator, I: ConcurrentIter, @@ -70,12 +75,12 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); let result: Result>, _> = orchestrator.thread_pool().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned += 1; + num_spawned.increment(); handles.push(s.spawn(|| { thread::next::x( runner.new_thread_runner(shared_state), diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index 709514e..9775c18 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -1,11 +1,16 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; -use crate::orch::{Orchestrator, ParHandle, ParScope, ParThreadPool}; +use crate::orch::{NumSpawned, Orchestrator, ParHandle, ParScope, ParThreadPool}; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; -pub fn m(mut orchestrator: C, params: Params, iter: I, map1: M1) -> (usize, Option) +pub fn m( + mut orchestrator: C, + params: Params, + iter: I, + map1: M1, +) -> (NumSpawned, Option) where C: Orchestrator, I: ConcurrentIter, @@ -17,12 +22,12 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); let result = orchestrator.thread_pool().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned += 1; + num_spawned.increment(); handles.push(s.spawn(|| { thread::next_any::m( runner.new_thread_runner(shared_state), @@ -50,7 +55,7 @@ pub fn x( params: Params, iter: I, xap1: X1, -) -> (usize, ResultNextAny) +) -> (NumSpawned, ResultNextAny) where C: Orchestrator, I: ConcurrentIter, @@ -63,12 +68,12 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); let result = orchestrator.thread_pool().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned += 1; + num_spawned.increment(); handles.push(s.spawn(|| { thread::next_any::x( runner.new_thread_runner(shared_state), diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index 0dafccd..130ebc0 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -1,6 +1,6 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Reduce}; -use crate::orch::{Orchestrator, ParHandle, ParScope, ParThreadPool}; +use crate::orch::{NumSpawned, Orchestrator, ParHandle, ParScope, ParThreadPool}; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; @@ -13,7 +13,7 @@ pub fn m( iter: I, map1: M1, reduce: Red, -) -> (usize, Option) +) -> (NumSpawned, Option) where C: Orchestrator, I: ConcurrentIter, @@ -26,12 +26,12 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); let results = orchestrator.thread_pool().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned += 1; + num_spawned.increment(); handles.push(s.spawn(|| { thread::reduce::m( runner.new_thread_runner(shared_state), @@ -68,7 +68,7 @@ pub fn x( iter: I, xap1: X1, reduce: Red, -) -> (usize, ResultReduce) +) -> (NumSpawned, ResultReduce) where C: Orchestrator, I: ConcurrentIter, @@ -82,12 +82,12 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); let result: Result, _> = orchestrator.thread_pool().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned += 1; + num_spawned.increment(); handles.push(s.spawn(|| { thread::reduce::x( runner.new_thread_runner(shared_state), diff --git a/src/using/computations/u_map/collect.rs b/src/using/computations/u_map/collect.rs index 83e0701..ec95b16 100644 --- a/src/using/computations/u_map/collect.rs +++ b/src/using/computations/u_map/collect.rs @@ -1,6 +1,7 @@ use super::m::UM; #[cfg(test)] use crate::IterationOrder; +use crate::orch::NumSpawned; use crate::runner::{ParallelRunner, ParallelRunnerCompute}; use crate::using::Using; #[cfg(test)] @@ -16,7 +17,7 @@ where O: Send, M1: Fn(&mut U::Item, I::Item) -> O + Sync, { - pub fn collect_into(self, pinned_vec: P) -> (usize, P) + pub fn collect_into(self, pinned_vec: P) -> (NumSpawned, P) where R: ParallelRunner, P: IntoConcurrentPinnedVec, @@ -24,7 +25,7 @@ where let len = self.iter().try_get_len(); let p = self.params(); match (p.is_sequential(), p.iteration_order) { - (true, _) => (0, self.sequential(pinned_vec)), + (true, _) => (NumSpawned::zero(), self.sequential(pinned_vec)), #[cfg(test)] (false, IterationOrder::Arbitrary) => { u_collect_arbitrary::u_m(R::collection(p, len), self, pinned_vec) diff --git a/src/using/computations/u_map/next.rs b/src/using/computations/u_map/next.rs index 0fdaf08..b88927c 100644 --- a/src/using/computations/u_map/next.rs +++ b/src/using/computations/u_map/next.rs @@ -1,4 +1,5 @@ use super::m::UM; +use crate::orch::NumSpawned; use crate::runner::{ParallelRunner, ParallelRunnerCompute}; use crate::using::Using; use crate::using::runner::parallel_runner_compute::{u_next, u_next_any}; @@ -11,7 +12,7 @@ where M1: Fn(&mut U::Item, I::Item) -> O + Sync, O: Send, { - pub fn next(self) -> (usize, Option) + pub fn next(self) -> (NumSpawned, Option) where R: ParallelRunner, { @@ -19,7 +20,7 @@ where u_next::u_m(R::early_return(p, len), self) } - pub fn next_any(self) -> (usize, Option) + pub fn next_any(self) -> (NumSpawned, Option) where R: ParallelRunner, { diff --git a/src/using/computations/u_map/reduce.rs b/src/using/computations/u_map/reduce.rs index 4c7acee..b8e64dd 100644 --- a/src/using/computations/u_map/reduce.rs +++ b/src/using/computations/u_map/reduce.rs @@ -1,3 +1,4 @@ +use crate::orch::NumSpawned; use crate::runner::{ParallelRunner, ParallelRunnerCompute}; use crate::using::Using; use crate::using::computations::UM; @@ -11,7 +12,7 @@ where O: Send, M1: Fn(&mut U::Item, I::Item) -> O + Sync, { - pub fn reduce(self, reduce: X) -> (usize, Option) + pub fn reduce(self, reduce: X) -> (NumSpawned, Option) where R: ParallelRunner, X: Fn(&mut U::Item, O, O) -> O + Sync, diff --git a/src/using/computations/u_xap/collect.rs b/src/using/computations/u_xap/collect.rs index b0399c1..619e837 100644 --- a/src/using/computations/u_xap/collect.rs +++ b/src/using/computations/u_xap/collect.rs @@ -1,6 +1,7 @@ use crate::generic_values::runner_results::{ Infallible, ParallelCollect, ParallelCollectArbitrary, }; +use crate::orch::NumSpawned; use crate::using::Using; use crate::using::computations::UX; use crate::using::runner::parallel_runner_compute::{u_collect_arbitrary, u_collect_ordered}; @@ -20,7 +21,7 @@ where Vo::Item: Send, M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, { - pub fn collect_into(self, pinned_vec: P) -> (usize, P) + pub fn collect_into(self, pinned_vec: P) -> (NumSpawned, P) where R: ParallelRunner, P: IntoConcurrentPinnedVec, @@ -29,7 +30,7 @@ where let (len, p) = self.len_and_params(); match (p.is_sequential(), p.iteration_order) { - (true, _) => (0, self.sequential(pinned_vec)), + (true, _) => (NumSpawned::zero(), self.sequential(pinned_vec)), (false, IterationOrder::Arbitrary) => { let (num_threads, result) = u_collect_arbitrary::u_x(R::collection(p, len), self, pinned_vec); diff --git a/src/using/computations/u_xap/next.rs b/src/using/computations/u_xap/next.rs index 87b5479..498ee79 100644 --- a/src/using/computations/u_xap/next.rs +++ b/src/using/computations/u_xap/next.rs @@ -1,5 +1,6 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::Infallible; +use crate::orch::NumSpawned; use crate::runner::{ParallelRunner, ParallelRunnerCompute}; use crate::using::Using; use crate::using::computations::UX; @@ -14,7 +15,7 @@ where M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, Vo::Item: Send, { - pub fn next(self) -> (usize, Option) + pub fn next(self) -> (NumSpawned, Option) where R: ParallelRunner, Vo: Values, @@ -24,7 +25,7 @@ where (num_threads, result.map(|x| x.1)) } - pub fn next_any(self) -> (usize, Option) + pub fn next_any(self) -> (NumSpawned, Option) where R: ParallelRunner, Vo: Values, diff --git a/src/using/computations/u_xap/reduce.rs b/src/using/computations/u_xap/reduce.rs index 1026014..ae679b1 100644 --- a/src/using/computations/u_xap/reduce.rs +++ b/src/using/computations/u_xap/reduce.rs @@ -1,5 +1,6 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::Infallible; +use crate::orch::NumSpawned; use crate::runner::{ParallelRunner, ParallelRunnerCompute}; use crate::using::Using; use crate::using::computations::UX; @@ -14,7 +15,7 @@ where Vo::Item: Send, M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, { - pub fn reduce(self, reduce: Red) -> (usize, Option) + pub fn reduce(self, reduce: Red) -> (NumSpawned, Option) where R: ParallelRunner, Red: Fn(&mut U::Item, Vo::Item, Vo::Item) -> Vo::Item + Sync, diff --git a/src/using/runner/parallel_runner_compute/u_collect_arbitrary.rs b/src/using/runner/parallel_runner_compute/u_collect_arbitrary.rs index a122fb0..b44ba15 100644 --- a/src/using/runner/parallel_runner_compute/u_collect_arbitrary.rs +++ b/src/using/runner/parallel_runner_compute/u_collect_arbitrary.rs @@ -1,6 +1,7 @@ use super::super::thread_runner_compute as thread; use crate::generic_values::Values; use crate::generic_values::runner_results::{ParallelCollectArbitrary, ThreadCollectArbitrary}; +use crate::orch::NumSpawned; use crate::runner::ParallelRunnerCompute; use crate::using::Using; #[cfg(test)] @@ -13,7 +14,7 @@ use orx_fixed_vec::IntoConcurrentPinnedVec; // m #[cfg(test)] -pub fn u_m(runner: C, m: UM, pinned_vec: P) -> (usize, P) +pub fn u_m(runner: C, m: UM, pinned_vec: P) -> (NumSpawned, P) where C: ParallelRunnerCompute, U: Using, @@ -37,11 +38,11 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); std::thread::scope(|s| { while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned); - num_spawned += 1; + let u = using.create(num_spawned.into_inner()); + num_spawned.increment(); s.spawn(|| { thread::u_collect_arbitrary::u_m( runner.new_thread_runner(shared_state), @@ -64,7 +65,7 @@ pub fn u_x( runner: C, x: UX, pinned_vec: P, -) -> (usize, ParallelCollectArbitrary) +) -> (NumSpawned, ParallelCollectArbitrary) where C: ParallelRunnerCompute, U: Using, @@ -89,13 +90,13 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); let result: ThreadCollectArbitrary = std::thread::scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned); - num_spawned += 1; + let u = using.create(num_spawned.into_inner()); + num_spawned.increment(); handles.push(s.spawn(|| { thread::u_collect_arbitrary::u_x( runner.new_thread_runner(shared_state), diff --git a/src/using/runner/parallel_runner_compute/u_collect_ordered.rs b/src/using/runner/parallel_runner_compute/u_collect_ordered.rs index 5050d85..7ac1bf2 100644 --- a/src/using/runner/parallel_runner_compute/u_collect_ordered.rs +++ b/src/using/runner/parallel_runner_compute/u_collect_ordered.rs @@ -1,6 +1,7 @@ use super::super::thread_runner_compute as thread; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect, ThreadCollect}; +use crate::orch::NumSpawned; use crate::runner::ParallelRunnerCompute; use crate::using::Using; use crate::using::computations::{UM, UX}; @@ -10,7 +11,7 @@ use orx_fixed_vec::IntoConcurrentPinnedVec; // m -pub fn u_m(runner: C, m: UM, pinned_vec: P) -> (usize, P) +pub fn u_m(runner: C, m: UM, pinned_vec: P) -> (NumSpawned, P) where C: ParallelRunnerCompute, U: Using, @@ -28,11 +29,11 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); std::thread::scope(|s| { while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned); - num_spawned += 1; + let u = using.create(num_spawned.into_inner()); + num_spawned.increment(); s.spawn(|| { thread::u_collect_ordered::u_m( runner.new_thread_runner(shared_state), @@ -57,7 +58,7 @@ pub fn u_x( runner: C, x: UX, pinned_vec: P, -) -> (usize, ParallelCollect) +) -> (NumSpawned, ParallelCollect) where C: ParallelRunnerCompute, U: Using, @@ -73,14 +74,14 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); let result: Result>, ::Error> = std::thread::scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned); - num_spawned += 1; + let u = using.create(num_spawned.into_inner()); + num_spawned.increment(); handles.push(s.spawn(|| { thread::u_collect_ordered::u_x( runner.new_thread_runner(shared_state), diff --git a/src/using/runner/parallel_runner_compute/u_next.rs b/src/using/runner/parallel_runner_compute/u_next.rs index b2ef9e3..eee33aa 100644 --- a/src/using/runner/parallel_runner_compute/u_next.rs +++ b/src/using/runner/parallel_runner_compute/u_next.rs @@ -1,12 +1,13 @@ use super::super::thread_runner_compute as thread; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; +use crate::orch::NumSpawned; use crate::runner::ParallelRunnerCompute; use crate::using::Using; use crate::using::computations::{UM, UX}; use orx_concurrent_iter::ConcurrentIter; -pub fn u_m(runner: C, m: UM) -> (usize, Option) +pub fn u_m(runner: C, m: UM) -> (NumSpawned, Option) where C: ParallelRunnerCompute, U: Using, @@ -19,13 +20,13 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); let results = std::thread::scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned); - num_spawned += 1; + let u = using.create(num_spawned.into_inner()); + num_spawned.increment(); handles.push(s.spawn(|| { thread::u_next::u_m( runner.new_thread_runner(shared_state), @@ -56,7 +57,7 @@ type ResultNext = Result< <::Fallibility as Fallibility>::Error, >; -pub fn u_x(runner: C, x: UX) -> (usize, ResultNext) +pub fn u_x(runner: C, x: UX) -> (NumSpawned, ResultNext) where C: ParallelRunnerCompute, U: Using, @@ -70,13 +71,13 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); let result: Result>, _> = std::thread::scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned); - num_spawned += 1; + let u = using.create(num_spawned.into_inner()); + num_spawned.increment(); handles.push(s.spawn(|| { thread::u_next::u_x( runner.new_thread_runner(shared_state), diff --git a/src/using/runner/parallel_runner_compute/u_next_any.rs b/src/using/runner/parallel_runner_compute/u_next_any.rs index 42f022a..e4c1d42 100644 --- a/src/using/runner/parallel_runner_compute/u_next_any.rs +++ b/src/using/runner/parallel_runner_compute/u_next_any.rs @@ -1,11 +1,12 @@ use super::super::thread_runner_compute as thread; use crate::generic_values::runner_results::Fallibility; +use crate::orch::NumSpawned; use crate::using::Using; use crate::using::computations::{UM, UX}; use crate::{generic_values::Values, runner::ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; -pub fn u_m(runner: C, m: UM) -> (usize, Option) +pub fn u_m(runner: C, m: UM) -> (NumSpawned, Option) where C: ParallelRunnerCompute, U: Using, @@ -18,13 +19,13 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); let result = std::thread::scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned); - num_spawned += 1; + let u = using.create(num_spawned.into_inner()); + num_spawned.increment(); handles.push(s.spawn(|| { thread::u_next_any::u_m( runner.new_thread_runner(shared_state), @@ -48,7 +49,7 @@ where type ResultNextAny = Result::Item>, <::Fallibility as Fallibility>::Error>; -pub fn u_x(runner: C, x: UX) -> (usize, ResultNextAny) +pub fn u_x(runner: C, x: UX) -> (NumSpawned, ResultNextAny) where C: ParallelRunnerCompute, U: Using, @@ -62,13 +63,13 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); let result = std::thread::scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned); - num_spawned += 1; + let u = using.create(num_spawned.into_inner()); + num_spawned.increment(); handles.push(s.spawn(|| { thread::u_next_any::u_x( runner.new_thread_runner(shared_state), diff --git a/src/using/runner/parallel_runner_compute/u_reduce.rs b/src/using/runner/parallel_runner_compute/u_reduce.rs index 2032fbf..6333b6c 100644 --- a/src/using/runner/parallel_runner_compute/u_reduce.rs +++ b/src/using/runner/parallel_runner_compute/u_reduce.rs @@ -1,6 +1,7 @@ use super::super::thread_runner_compute as thread; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Reduce}; +use crate::orch::NumSpawned; use crate::runner::ParallelRunnerCompute; use crate::using::Using; use crate::using::computations::{UM, UX}; @@ -8,7 +9,11 @@ use orx_concurrent_iter::ConcurrentIter; // m -pub fn u_m(runner: C, m: UM, reduce: Red) -> (usize, Option) +pub fn u_m( + runner: C, + m: UM, + reduce: Red, +) -> (NumSpawned, Option) where C: ParallelRunnerCompute, U: Using, @@ -22,13 +27,13 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); let results = std::thread::scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned); - num_spawned += 1; + let u = using.create(num_spawned.into_inner()); + num_spawned.increment(); handles.push(s.spawn(|| { thread::u_reduce::u_m( runner.new_thread_runner(shared_state), @@ -65,7 +70,7 @@ pub fn u_x( runner: C, x: UX, reduce: Red, -) -> (usize, ResultReduce) +) -> (NumSpawned, ResultReduce) where C: ParallelRunnerCompute, U: Using, @@ -80,13 +85,13 @@ where let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = 0; + let mut num_spawned = NumSpawned::zero(); let result: Result, _> = std::thread::scope(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned); - num_spawned += 1; + let u = using.create(num_spawned.into_inner()); + num_spawned.increment(); handles.push(s.spawn(|| { thread::u_reduce::u_x( runner.new_thread_runner(shared_state), From 2a6eab3125fb5981c3c98a6da43bd94d6ecda765 Mon Sep 17 00:00:00 2001 From: orxfun Date: Sat, 13 Sep 2025 23:22:05 +0200 Subject: [PATCH 087/264] use orchestrator run for ordered collection --- .../collect_arbitrary.rs | 2 -- .../collect_ordered.rs | 32 ++++++++----------- 2 files changed, 13 insertions(+), 21 deletions(-) diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 4355c8a..dbb371a 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -26,8 +26,6 @@ where M1: Fn(I::Item) -> O + Sync, P: IntoConcurrentPinnedVec, { - use crate::runner::ComputationKind; - let capacity_bound = pinned_vec.capacity_bound(); let offset = pinned_vec.len(); let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index 0e91a4c..4e7dd99 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -26,30 +26,24 @@ where { let offset = pinned_vec.len(); let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); - let o_bag: ConcurrentOrderedBag = pinned_vec.into(); - - // compute let state = runner.new_shared_state(); let shared_state = &state; - let mut num_spawned = NumSpawned::zero(); + let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, shared_state, &iter); + + let work = || { + thread::collect_ordered::m( + runner.new_thread_runner(shared_state), + &iter, + shared_state, + &map1, + &o_bag, + offset, + ); + }; - orchestrator.thread_pool().scope_zzz(|s| { - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned.increment(); - s.spawn(|| { - thread::collect_ordered::m( - runner.new_thread_runner(shared_state), - &iter, - shared_state, - &map1, - &o_bag, - offset, - ); - }); - } - }); + let num_spawned = orchestrator.thread_pool().run(do_spawn, work); let values = unsafe { o_bag.into_inner().unwrap_only_if_counts_match() }; (num_spawned, values) From 03ac473f5ad7d320af6ac1d505518371fb76b037 Mon Sep 17 00:00:00 2001 From: orxfun Date: Sat, 13 Sep 2025 23:24:42 +0200 Subject: [PATCH 088/264] minor revision --- src/runner/parallel_runner_compute/collect_ordered.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index 4e7dd99..cb59348 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -28,15 +28,14 @@ where let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); let o_bag: ConcurrentOrderedBag = pinned_vec.into(); let state = runner.new_shared_state(); - let shared_state = &state; - let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, shared_state, &iter); + let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); let work = || { thread::collect_ordered::m( - runner.new_thread_runner(shared_state), + runner.new_thread_runner(&state), &iter, - shared_state, + &state, &map1, &o_bag, offset, From 54e7a56b94d7320f272bbe99c8896e0aa25ac7ff Mon Sep 17 00:00:00 2001 From: orxfun Date: Sat, 13 Sep 2025 23:48:53 +0200 Subject: [PATCH 089/264] orchestrator run is defined, ordered collection is simplified --- src/orch/mod.rs | 2 ++ src/orch/orchestrator.rs | 36 ++++++++++++++++++- .../collect_ordered.rs | 22 ++++-------- 3 files changed, 43 insertions(+), 17 deletions(-) diff --git a/src/orch/mod.rs b/src/orch/mod.rs index 91050ed..f2ac98d 100644 --- a/src/orch/mod.rs +++ b/src/orch/mod.rs @@ -3,6 +3,8 @@ mod num_spawned; mod orchestrator; mod thread_pool; +pub(crate) use orchestrator::{SharedStateOf, ThreadRunnerOf}; + pub use crate::orch::implementations::DefaultStdOrchestrator; pub use num_spawned::NumSpawned; pub use orchestrator::Orchestrator; diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 44e5066..d58c0a0 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -1,4 +1,9 @@ -use crate::{ParallelRunner, Params, orch::thread_pool::ParThreadPool, runner::ComputationKind}; +use crate::{ + ParallelRunner, Params, + orch::{NumSpawned, thread_pool::ParThreadPool}, + runner::ComputationKind, +}; +use orx_concurrent_iter::ConcurrentIter; pub trait Orchestrator { type Runner: ParallelRunner; @@ -14,4 +19,33 @@ pub trait Orchestrator { } fn thread_pool(&mut self) -> &mut Self::ThreadPool; + + // derived + + fn run( + &mut self, + params: Params, + iter: I, + kind: ComputationKind, + thread_work: F, + ) -> NumSpawned + where + I: ConcurrentIter, + F: Fn( + &I, + &::SharedState, + ::ThreadRunner, + ) + Sync, + { + let runner = Self::new_runner(kind, params, iter.try_get_len()); + let state = runner.new_shared_state(); + let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); + let work = || { + thread_work(&iter, &state, runner.new_thread_runner(&state)); + }; + self.thread_pool().run(do_spawn, work) + } } + +pub(crate) type SharedStateOf = <::Runner as ParallelRunner>::SharedState; +pub(crate) type ThreadRunnerOf = <::Runner as ParallelRunner>::ThreadRunner; diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index cb59348..9ca2976 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -1,7 +1,9 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect, ThreadCollect}; -use crate::orch::{NumSpawned, Orchestrator, ParHandle, ParScope, ParThreadPool}; +use crate::orch::{ + NumSpawned, Orchestrator, ParHandle, ParScope, ParThreadPool, SharedStateOf, ThreadRunnerOf, +}; use crate::runner::parallel_runner::ParallelRunner; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use orx_concurrent_iter::ConcurrentIter; @@ -25,24 +27,12 @@ where P: IntoConcurrentPinnedVec, { let offset = pinned_vec.len(); - let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); let o_bag: ConcurrentOrderedBag = pinned_vec.into(); - let state = runner.new_shared_state(); - let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); - - let work = || { - thread::collect_ordered::m( - runner.new_thread_runner(&state), - &iter, - &state, - &map1, - &o_bag, - offset, - ); + let thread_work = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + thread::collect_ordered::m(thread_runner, iter, state, &map1, &o_bag, offset); }; - - let num_spawned = orchestrator.thread_pool().run(do_spawn, work); + let num_spawned = orchestrator.run(params, iter, ComputationKind::Collect, thread_work); let values = unsafe { o_bag.into_inner().unwrap_only_if_counts_match() }; (num_spawned, values) From d66b321b750dbd8f87c69db50fd5e0d5c0f2fd69 Mon Sep 17 00:00:00 2001 From: orxfun Date: Sat, 13 Sep 2025 23:53:06 +0200 Subject: [PATCH 090/264] arbitrary collection is simplified --- .../collect_arbitrary.rs | 27 +++++-------------- 1 file changed, 6 insertions(+), 21 deletions(-) diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index dbb371a..13960ab 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -26,36 +26,21 @@ where M1: Fn(I::Item) -> O + Sync, P: IntoConcurrentPinnedVec, { + use crate::orch::{SharedStateOf, ThreadRunnerOf}; + let capacity_bound = pinned_vec.capacity_bound(); let offset = pinned_vec.len(); - let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); - let mut bag: ConcurrentBag = pinned_vec.into(); match iter.try_get_len() { Some(iter_len) => bag.reserve_maximum_capacity(offset + iter_len), None => bag.reserve_maximum_capacity(capacity_bound), }; - // compute - - let state = runner.new_shared_state(); - let shared_state = &state; + let thread_work = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + thread::collect_arbitrary::m(thread_runner, iter, state, &map1, &bag); + }; + let num_spawned = orchestrator.run(params, iter, ComputationKind::Collect, thread_work); - let mut num_spawned = NumSpawned::zero(); - orchestrator.thread_pool().scope_zzz(|s| { - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned.increment(); - s.spawn(|| { - thread::collect_arbitrary::m( - runner.new_thread_runner(shared_state), - &iter, - shared_state, - &map1, - &bag, - ); - }); - } - }); let values = bag.into_inner(); (num_spawned, values) } From 9478a44703f3e6d496edf82504ea2fe26a20e56a Mon Sep 17 00:00:00 2001 From: orxfun Date: Sun, 14 Sep 2025 16:03:28 +0200 Subject: [PATCH 091/264] generic map for all pools --- src/orch/thread_pool/par_thread_pool.rs | 31 +++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs index 562a5a0..70f81f0 100644 --- a/src/orch/thread_pool/par_thread_pool.rs +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -1,3 +1,5 @@ +use orx_concurrent_bag::ConcurrentBag; + use super::par_scope::ParScope; use crate::orch::num_spawned::NumSpawned; @@ -47,4 +49,33 @@ pub trait ParThreadPool { }); nt } + + fn map(&mut self, do_spawn: S, map: M) -> (NumSpawned, Result, E>) + where + S: Fn(NumSpawned) -> bool + Sync, + M: Fn() -> Result + Sync, + T: Send, + E: Send, + { + let mut nt = NumSpawned::zero(); + + let thread_results = ConcurrentBag::new(); + let work = || _ = thread_results.push(map()); + self.scope(|s| { + while do_spawn(nt) { + nt.increment(); + Self::run_in_scope(&s, &work); + } + }); + + let mut results = vec![]; + for r in thread_results.into_inner() { + match r { + Ok(x) => results.push(x), + Err(e) => return (nt, Err(e)), + } + } + + (nt, Ok(results)) + } } From a9cb60402f713dceccf4fbcc9684e019bd0223be Mon Sep 17 00:00:00 2001 From: orxfun Date: Sun, 14 Sep 2025 16:11:55 +0200 Subject: [PATCH 092/264] minor renaming --- .../thread_pool/implementations/std_default_pool.rs | 2 +- src/orch/thread_pool/par_thread_pool.rs | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/orch/thread_pool/implementations/std_default_pool.rs b/src/orch/thread_pool/implementations/std_default_pool.rs index e885ed2..4336df2 100644 --- a/src/orch/thread_pool/implementations/std_default_pool.rs +++ b/src/orch/thread_pool/implementations/std_default_pool.rs @@ -1,4 +1,4 @@ -use crate::orch::ParThreadPool; +use crate::orch::{NumSpawned, ParThreadPool}; #[derive(Default)] pub struct StdDefaultPool; diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs index 70f81f0..c0bb538 100644 --- a/src/orch/thread_pool/par_thread_pool.rs +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -35,22 +35,22 @@ pub trait ParThreadPool { // derived - fn run(&mut self, do_spawn: S, work: W) -> NumSpawned + fn run(&mut self, do_spawn: S, thread_do: F) -> NumSpawned where S: Fn(NumSpawned) -> bool + Sync, - W: Fn() + Sync, + F: Fn() + Sync, { let mut nt = NumSpawned::zero(); self.scope(|s| { while do_spawn(nt) { nt.increment(); - Self::run_in_scope(&s, &work); + Self::run_in_scope(&s, &thread_do); } }); nt } - fn map(&mut self, do_spawn: S, map: M) -> (NumSpawned, Result, E>) + fn map(&mut self, do_spawn: S, thread_map: M) -> (NumSpawned, Result, E>) where S: Fn(NumSpawned) -> bool + Sync, M: Fn() -> Result + Sync, @@ -60,7 +60,7 @@ pub trait ParThreadPool { let mut nt = NumSpawned::zero(); let thread_results = ConcurrentBag::new(); - let work = || _ = thread_results.push(map()); + let work = || _ = thread_results.push(thread_map()); self.scope(|s| { while do_spawn(nt) { nt.increment(); From 13b52111222a40839b83cf23e8c3b1372d56a797 Mon Sep 17 00:00:00 2001 From: orxfun Date: Sun, 14 Sep 2025 16:16:26 +0200 Subject: [PATCH 093/264] map defined on orchestrator --- src/orch/orchestrator.rs | 29 +++++++++++++++++-- .../collect_ordered.rs | 4 +-- 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index d58c0a0..b285a62 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -27,7 +27,7 @@ pub trait Orchestrator { params: Params, iter: I, kind: ComputationKind, - thread_work: F, + thread_do: F, ) -> NumSpawned where I: ConcurrentIter, @@ -41,10 +41,35 @@ pub trait Orchestrator { let state = runner.new_shared_state(); let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); let work = || { - thread_work(&iter, &state, runner.new_thread_runner(&state)); + thread_do(&iter, &state, runner.new_thread_runner(&state)); }; self.thread_pool().run(do_spawn, work) } + + fn map( + &mut self, + params: Params, + iter: I, + kind: ComputationKind, + thread_map: M, + ) -> (NumSpawned, Result, E>) + where + I: ConcurrentIter, + M: Fn( + &I, + &::SharedState, + ::ThreadRunner, + ) -> Result + + Sync, + T: Send, + E: Send, + { + let runner = Self::new_runner(kind, params, iter.try_get_len()); + let state = runner.new_shared_state(); + let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); + let work = || thread_map(&iter, &state, runner.new_thread_runner(&state)); + self.thread_pool().map(do_spawn, work) + } } pub(crate) type SharedStateOf = <::Runner as ParallelRunner>::SharedState; diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index 9ca2976..8fce1c2 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -29,10 +29,10 @@ where let offset = pinned_vec.len(); let o_bag: ConcurrentOrderedBag = pinned_vec.into(); - let thread_work = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_do = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { thread::collect_ordered::m(thread_runner, iter, state, &map1, &o_bag, offset); }; - let num_spawned = orchestrator.run(params, iter, ComputationKind::Collect, thread_work); + let num_spawned = orchestrator.run(params, iter, ComputationKind::Collect, thread_do); let values = unsafe { o_bag.into_inner().unwrap_only_if_counts_match() }; (num_spawned, values) From 59fc98ffa97cf0734fa647ff434478f5cd8b568a Mon Sep 17 00:00:00 2001 From: orxfun Date: Sun, 14 Sep 2025 16:25:59 +0200 Subject: [PATCH 094/264] collect ordered uses generic orchestrator map --- src/orch/orchestrator.rs | 2 +- .../collect_ordered.rs | 62 ++----------------- 2 files changed, 7 insertions(+), 57 deletions(-) diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index b285a62..51a8d13 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -46,7 +46,7 @@ pub trait Orchestrator { self.thread_pool().run(do_spawn, work) } - fn map( + fn map2( &mut self, params: Params, iter: I, diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index 8fce1c2..f981def 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -56,64 +56,14 @@ where X1: Fn(I::Item) -> Vo + Sync, P: IntoConcurrentPinnedVec, { - let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); - - // compute - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - let result: Result>, ::Error> = - orchestrator.thread_pool().scope_zzz(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned.increment(); - handles.push(s.spawn(|| { - thread::collect_ordered::x( - runner.new_thread_runner(shared_state), - &iter, - shared_state, - &xap1, - ) - })); - } - - let mut results = Vec::with_capacity(handles.len()); - - let mut error = None; - while !handles.is_empty() { - let mut finished_idx = None; - for (h, handle) in handles.iter().enumerate() { - if handle.is_finished() { - finished_idx = Some(h); - break; - } - } - - if let Some(h) = finished_idx { - let handle = handles.remove(h); - let result = handle.join().expect("failed to join the thread"); - match result.into_result() { - Ok(result) => results.push(result), - Err(e) => { - error = Some(e); - break; - } - } - } - } - - match error { - Some(error) => Err(error), - None => Ok(results), - } - }); - - let result = match result { + let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + thread::collect_ordered::x(thread_runner, iter, state, &xap1).into_result() + }; + let (num_spawned, results) = + orchestrator.map2(params, iter, ComputationKind::Collect, thread_map); + let result = match results { Err(error) => ParallelCollect::StoppedByError { error }, Ok(results) => ParallelCollect::reduce(results, pinned_vec), }; - (num_spawned, result) } From 0a0054315e4adca5cd78d67e1561d090adbf74ab Mon Sep 17 00:00:00 2001 From: orxfun Date: Sun, 14 Sep 2025 16:34:15 +0200 Subject: [PATCH 095/264] clean up --- src/runner/parallel_runner_compute/collect_ordered.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index f981def..de86079 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -1,10 +1,7 @@ use crate::Params; use crate::generic_values::Values; -use crate::generic_values::runner_results::{Fallibility, ParallelCollect, ThreadCollect}; -use crate::orch::{ - NumSpawned, Orchestrator, ParHandle, ParScope, ParThreadPool, SharedStateOf, ThreadRunnerOf, -}; -use crate::runner::parallel_runner::ParallelRunner; +use crate::generic_values::runner_results::{Fallibility, ParallelCollect}; +use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use orx_concurrent_iter::ConcurrentIter; use orx_concurrent_ordered_bag::ConcurrentOrderedBag; @@ -61,6 +58,7 @@ where }; let (num_spawned, results) = orchestrator.map2(params, iter, ComputationKind::Collect, thread_map); + let result = match results { Err(error) => ParallelCollect::StoppedByError { error }, Ok(results) => ParallelCollect::reduce(results, pinned_vec), From 73220f651688abc9f265d0ed49ab526cc422a8f9 Mon Sep 17 00:00:00 2001 From: orxfun Date: Sun, 14 Sep 2025 16:35:40 +0200 Subject: [PATCH 096/264] rname orch methods --- src/orch/orchestrator.rs | 4 ++-- src/runner/parallel_runner_compute/collect_arbitrary.rs | 2 +- src/runner/parallel_runner_compute/collect_ordered.rs | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 51a8d13..107ec5d 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -22,7 +22,7 @@ pub trait Orchestrator { // derived - fn run( + fn run_all( &mut self, params: Params, iter: I, @@ -46,7 +46,7 @@ pub trait Orchestrator { self.thread_pool().run(do_spawn, work) } - fn map2( + fn map_all( &mut self, params: Params, iter: I, diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 13960ab..151c7c5 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -39,7 +39,7 @@ where let thread_work = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { thread::collect_arbitrary::m(thread_runner, iter, state, &map1, &bag); }; - let num_spawned = orchestrator.run(params, iter, ComputationKind::Collect, thread_work); + let num_spawned = orchestrator.run_all(params, iter, ComputationKind::Collect, thread_work); let values = bag.into_inner(); (num_spawned, values) diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index de86079..8e760c2 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -29,7 +29,7 @@ where let thread_do = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { thread::collect_ordered::m(thread_runner, iter, state, &map1, &o_bag, offset); }; - let num_spawned = orchestrator.run(params, iter, ComputationKind::Collect, thread_do); + let num_spawned = orchestrator.run_all(params, iter, ComputationKind::Collect, thread_do); let values = unsafe { o_bag.into_inner().unwrap_only_if_counts_match() }; (num_spawned, values) @@ -57,7 +57,7 @@ where thread::collect_ordered::x(thread_runner, iter, state, &xap1).into_result() }; let (num_spawned, results) = - orchestrator.map2(params, iter, ComputationKind::Collect, thread_map); + orchestrator.map_all(params, iter, ComputationKind::Collect, thread_map); let result = match results { Err(error) => ParallelCollect::StoppedByError { error }, From 0191b2713fdfe96e0352a1b0df4aab6644284736 Mon Sep 17 00:00:00 2001 From: orxfun Date: Sun, 14 Sep 2025 18:32:21 +0200 Subject: [PATCH 097/264] collect arbitrary uses orchestrator map --- .../computations/collect.rs | 3 +- .../runner_results/collect_arbitrary.rs | 24 ++--- src/orch/thread_pool/par_thread_pool.rs | 6 +- .../collect_arbitrary.rs | 89 ++++--------------- .../collect_ordered.rs | 4 +- src/using/computations/u_xap/collect.rs | 3 +- .../u_collect_arbitrary.rs | 10 ++- 7 files changed, 40 insertions(+), 99 deletions(-) diff --git a/src/computational_variants/computations/collect.rs b/src/computational_variants/computations/collect.rs index fd1330f..bbd03b5 100644 --- a/src/computational_variants/computations/collect.rs +++ b/src/computational_variants/computations/collect.rs @@ -73,8 +73,7 @@ where let (num_threads, result) = prc::collect_arbitrary::x(orchestrator, params, iter, xap1, pinned_vec); let pinned_vec = match result { - ParallelCollectArbitrary::AllCollected { pinned_vec } => pinned_vec, - ParallelCollectArbitrary::StoppedByWhileCondition { pinned_vec } => pinned_vec, + ParallelCollectArbitrary::AllOrUntilWhileCollected { pinned_vec } => pinned_vec, }; (num_threads, pinned_vec) } diff --git a/src/generic_values/runner_results/collect_arbitrary.rs b/src/generic_values/runner_results/collect_arbitrary.rs index c48cc2f..3ff1edd 100644 --- a/src/generic_values/runner_results/collect_arbitrary.rs +++ b/src/generic_values/runner_results/collect_arbitrary.rs @@ -16,6 +16,15 @@ where StoppedByError { error: F::Error }, } +impl ThreadCollectArbitrary { + pub fn into_result(self) -> Result<(), F::Error> { + match self { + Self::StoppedByError { error } => Err(error), + _ => Ok(()), + } + } +} + impl core::fmt::Debug for ThreadCollectArbitrary { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -31,12 +40,10 @@ where V: Values, P: IntoConcurrentPinnedVec, { - AllCollected { - pinned_vec: P, - }, - StoppedByWhileCondition { + AllOrUntilWhileCollected { pinned_vec: P, }, + StoppedByError { error: ::Error, }, @@ -49,14 +56,10 @@ where { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Self::AllCollected { pinned_vec } => f + Self::AllOrUntilWhileCollected { pinned_vec } => f .debug_struct("AllCollected") .field("pinned_vec.len()", &pinned_vec.len()) .finish(), - Self::StoppedByWhileCondition { pinned_vec } => f - .debug_struct("StoppedByWhileCondition") - .field("pinned_vec.len()", &pinned_vec.len()) - .finish(), Self::StoppedByError { error: _ } => f.debug_struct("StoppedByError").finish(), } } @@ -69,8 +72,7 @@ where { pub fn into_result(self) -> Result::Error> { match self { - Self::AllCollected { pinned_vec } => Ok(pinned_vec), - Self::StoppedByWhileCondition { pinned_vec } => Ok(pinned_vec), + Self::AllOrUntilWhileCollected { pinned_vec } => Ok(pinned_vec), Self::StoppedByError { error } => Err(error), } } diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs index c0bb538..e3c1977 100644 --- a/src/orch/thread_pool/par_thread_pool.rs +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -1,7 +1,6 @@ -use orx_concurrent_bag::ConcurrentBag; - use super::par_scope::ParScope; use crate::orch::num_spawned::NumSpawned; +use orx_concurrent_bag::ConcurrentBag; pub trait ParThreadPool { type ScopeZzz<'scope, 'env>: ParScope<'scope, 'env> @@ -58,8 +57,7 @@ pub trait ParThreadPool { E: Send, { let mut nt = NumSpawned::zero(); - - let thread_results = ConcurrentBag::new(); + let thread_results = ConcurrentBag::with_fixed_capacity(64); let work = || _ = thread_results.push(thread_map()); self.scope(|s| { while do_spawn(nt) { diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 151c7c5..de99283 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -1,9 +1,8 @@ use crate::Params; use crate::generic_values::Values; -use crate::generic_values::runner_results::{ParallelCollectArbitrary, ThreadCollectArbitrary}; -use crate::orch::NumSpawned; -use crate::orch::{Orchestrator, ParHandle, ParScope, ParThreadPool}; -use crate::runner::ParallelRunner; +use crate::generic_values::runner_results::ParallelCollectArbitrary; +use crate::orch::Orchestrator; +use crate::orch::{NumSpawned, SharedStateOf, ThreadRunnerOf}; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use orx_concurrent_bag::ConcurrentBag; use orx_concurrent_iter::ConcurrentIter; @@ -26,8 +25,6 @@ where M1: Fn(I::Item) -> O + Sync, P: IntoConcurrentPinnedVec, { - use crate::orch::{SharedStateOf, ThreadRunnerOf}; - let capacity_bound = pinned_vec.capacity_bound(); let offset = pinned_vec.len(); let mut bag: ConcurrentBag = pinned_vec.into(); @@ -65,80 +62,24 @@ where let capacity_bound = pinned_vec.capacity_bound(); let offset = pinned_vec.len(); - let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); - let mut bag: ConcurrentBag = pinned_vec.into(); match iter.try_get_len() { Some(iter_len) => bag.reserve_maximum_capacity(offset + iter_len), None => bag.reserve_maximum_capacity(capacity_bound), }; - // compute - - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - let result: ThreadCollectArbitrary = - orchestrator.thread_pool().scope_zzz(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned.increment(); - handles.push(s.spawn(|| { - thread::collect_arbitrary::x( - runner.new_thread_runner(shared_state), - &iter, - shared_state, - &xap1, - &bag, - ) - })); - } - - let mut early_exit_result = None; - while !handles.is_empty() { - let mut finished_idx = None; - for (h, handle) in handles.iter().enumerate() { - if handle.is_finished() { - finished_idx = Some(h); - break; - } - } - - if let Some(h) = finished_idx { - let handle = handles.remove(h); - let result = handle.join().expect("failed to join the thread"); - match &result { - ThreadCollectArbitrary::AllCollected => {} - ThreadCollectArbitrary::StoppedByError { error: _ } => { - early_exit_result = Some(result); - break; - } - ThreadCollectArbitrary::StoppedByWhileCondition => { - early_exit_result = Some(result); - } - } - } - } - - early_exit_result.unwrap_or(ThreadCollectArbitrary::AllCollected) - }); + let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + thread::collect_arbitrary::x(thread_runner, iter, state, &xap1, &bag).into_result() + }; + let (num_spawned, result) = + orchestrator.map_all(params, iter, ComputationKind::Collect, thread_map); - ( - num_spawned, - match result { - ThreadCollectArbitrary::AllCollected => ParallelCollectArbitrary::AllCollected { - pinned_vec: bag.into_inner(), - }, - ThreadCollectArbitrary::StoppedByWhileCondition => { - ParallelCollectArbitrary::StoppedByWhileCondition { - pinned_vec: bag.into_inner(), - } - } - ThreadCollectArbitrary::StoppedByError { error } => { - ParallelCollectArbitrary::StoppedByError { error } - } + let result = match result { + Err(error) => ParallelCollectArbitrary::StoppedByError { error }, + Ok(_) => ParallelCollectArbitrary::AllOrUntilWhileCollected { + pinned_vec: bag.into_inner(), }, - ) + }; + + (num_spawned, result) } diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index 8e760c2..cbb933c 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -56,10 +56,10 @@ where let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { thread::collect_ordered::x(thread_runner, iter, state, &xap1).into_result() }; - let (num_spawned, results) = + let (num_spawned, result) = orchestrator.map_all(params, iter, ComputationKind::Collect, thread_map); - let result = match results { + let result = match result { Err(error) => ParallelCollect::StoppedByError { error }, Ok(results) => ParallelCollect::reduce(results, pinned_vec), }; diff --git a/src/using/computations/u_xap/collect.rs b/src/using/computations/u_xap/collect.rs index 619e837..9a01af4 100644 --- a/src/using/computations/u_xap/collect.rs +++ b/src/using/computations/u_xap/collect.rs @@ -35,8 +35,7 @@ where let (num_threads, result) = u_collect_arbitrary::u_x(R::collection(p, len), self, pinned_vec); let pinned_vec = match result { - ParallelCollectArbitrary::AllCollected { pinned_vec } => pinned_vec, - ParallelCollectArbitrary::StoppedByWhileCondition { pinned_vec } => pinned_vec, + ParallelCollectArbitrary::AllOrUntilWhileCollected { pinned_vec } => pinned_vec, }; (num_threads, pinned_vec) } diff --git a/src/using/runner/parallel_runner_compute/u_collect_arbitrary.rs b/src/using/runner/parallel_runner_compute/u_collect_arbitrary.rs index b44ba15..03a1be3 100644 --- a/src/using/runner/parallel_runner_compute/u_collect_arbitrary.rs +++ b/src/using/runner/parallel_runner_compute/u_collect_arbitrary.rs @@ -141,11 +141,13 @@ where ( num_spawned, match result { - ThreadCollectArbitrary::AllCollected => ParallelCollectArbitrary::AllCollected { - pinned_vec: bag.into_inner(), - }, + ThreadCollectArbitrary::AllCollected => { + ParallelCollectArbitrary::AllOrUntilWhileCollected { + pinned_vec: bag.into_inner(), + } + } ThreadCollectArbitrary::StoppedByWhileCondition => { - ParallelCollectArbitrary::StoppedByWhileCondition { + ParallelCollectArbitrary::AllOrUntilWhileCollected { pinned_vec: bag.into_inner(), } } From 3969f4db6e753ad2148d1cdcaae2cbc109e189da Mon Sep 17 00:00:00 2001 From: orxfun Date: Sun, 14 Sep 2025 18:32:35 +0200 Subject: [PATCH 098/264] todo test concurrent bag --- .../tests/xap/collect.rs | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/src/computational_variants/tests/xap/collect.rs b/src/computational_variants/tests/xap/collect.rs index 23f7f41..aac3277 100644 --- a/src/computational_variants/tests/xap/collect.rs +++ b/src/computational_variants/tests/xap/collect.rs @@ -13,6 +13,46 @@ const N: [usize; 2] = [37, 125]; #[cfg(not(miri))] const N: [usize; 2] = [1025, 4735]; +#[test] +fn todo_panic_at_con_bag_new() { + // TODO: this code panics when ParThreadPool::map uses ConcurrentBag::new rather than ConcurrentBag::with_fixed_capacity + let n = 10; + let nt = 2; + let chunk = 1; + let ordering = IterationOrder::Arbitrary; + + let offset = 33; + + let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); + let fmap = |x: String| x.chars().map(|x| x.to_string()).collect::>(); + let xmap = |x: String| Vector(fmap(x)); + + let mut output = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); + let mut expected = Vec::new(); + + for i in 0..offset { + let i = i.to_string(); + for x in fmap(i) { + output.push(x.clone()); + expected.push(x); + } + } + expected.extend(input.clone().into_iter().flat_map(&fmap)); + + let params = Params::new(nt, chunk, ordering); + let iter = input.into_con_iter(); + let x = ParXap::new(DefaultOrchestrator::default(), params, iter, xmap); + + let mut output = x.collect_into(output); + + if !params.is_sequential() && matches!(params.iteration_order, IterationOrder::Arbitrary) { + expected.sort(); + output.sort(); + } + + assert_eq!(expected, output.to_vec()); +} + #[test_matrix( [0, 1, N[0], N[1]], [1, 4], From 53c1dd609506c0c47b5afb2bf42b8382f64c2bd7 Mon Sep 17 00:00:00 2001 From: orxfun Date: Sun, 14 Sep 2025 18:42:42 +0200 Subject: [PATCH 099/264] ParThreadPool::max_num_threads is defined --- .../default_std_orchestrator.rs | 6 +- src/orch/orchestrator.rs | 12 ++- .../implementations/std_default_pool.rs | 38 +++++++- src/orch/thread_pool/par_thread_pool.rs | 2 + src/runner/parallel_runner_compute/next.rs | 89 ++++++++++--------- .../parallel_runner_compute/next_any.rs | 4 +- src/runner/parallel_runner_compute/reduce.rs | 4 +- 7 files changed, 100 insertions(+), 55 deletions(-) diff --git a/src/orch/implementations/default_std_orchestrator.rs b/src/orch/implementations/default_std_orchestrator.rs index 7e117e7..d478a5c 100644 --- a/src/orch/implementations/default_std_orchestrator.rs +++ b/src/orch/implementations/default_std_orchestrator.rs @@ -11,7 +11,11 @@ impl Orchestrator for DefaultStdOrchestrator { type ThreadPool = StdDefaultPool; - fn thread_pool(&mut self) -> &mut Self::ThreadPool { + fn thread_pool(&mut self) -> &Self::ThreadPool { + &self.0 + } + + fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { &mut self.0 } } diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 107ec5d..1a4f178 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -18,7 +18,9 @@ pub trait Orchestrator { ::new(kind, params, initial_input_len) } - fn thread_pool(&mut self) -> &mut Self::ThreadPool; + fn thread_pool(&mut self) -> &Self::ThreadPool; + + fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool; // derived @@ -43,7 +45,7 @@ pub trait Orchestrator { let work = || { thread_do(&iter, &state, runner.new_thread_runner(&state)); }; - self.thread_pool().run(do_spawn, work) + self.thread_pool_mut().run(do_spawn, work) } fn map_all( @@ -68,7 +70,11 @@ pub trait Orchestrator { let state = runner.new_shared_state(); let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); let work = || thread_map(&iter, &state, runner.new_thread_runner(&state)); - self.thread_pool().map(do_spawn, work) + self.thread_pool_mut().map(do_spawn, work) + } + + fn max_num_threads_for_computation(&self, params: Params) -> usize { + 1 } } diff --git a/src/orch/thread_pool/implementations/std_default_pool.rs b/src/orch/thread_pool/implementations/std_default_pool.rs index 4336df2..e913edf 100644 --- a/src/orch/thread_pool/implementations/std_default_pool.rs +++ b/src/orch/thread_pool/implementations/std_default_pool.rs @@ -1,7 +1,35 @@ -use crate::orch::{NumSpawned, ParThreadPool}; +use crate::{env::MAX_NUM_THREADS_ENV_VARIABLE, orch::ParThreadPool}; -#[derive(Default)] -pub struct StdDefaultPool; +const MAX_UNSET_NUM_THREADS: usize = 8; + +pub struct StdDefaultPool { + max_num_threads: usize, +} + +impl Default for StdDefaultPool { + fn default() -> Self { + let env_max_num_threads = match std::env::var(MAX_NUM_THREADS_ENV_VARIABLE) { + Ok(s) => match s.parse::() { + Ok(0) => None, // consistent with .num_threads(0) representing no bound + Ok(x) => Some(x), // set to a positive bound + Err(_e) => None, // not a number, ignored assuming no bound + }, + Err(_e) => None, // not set, no bound + }; + + let ava_max_num_threads: Option = + std::thread::available_parallelism().map(|x| x.into()).ok(); + + let max_num_threads = match (env_max_num_threads, ava_max_num_threads) { + (Some(env), Some(ava)) => env.min(ava), + (Some(env), None) => env, + (None, Some(ava)) => ava, + (None, None) => MAX_UNSET_NUM_THREADS, + }; + + Self { max_num_threads } + } +} impl ParThreadPool for StdDefaultPool { type ScopeZzz<'scope, 'env> @@ -22,6 +50,10 @@ impl ParThreadPool for StdDefaultPool { 'scope: 's, 'env: 'scope + 's; + fn max_num_threads(&self) -> usize { + self.max_num_threads + } + fn scope<'env, 'scope, F>(&'env mut self, f: F) where 'env: 'scope, diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs index e3c1977..0bf6699 100644 --- a/src/orch/thread_pool/par_thread_pool.rs +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -32,6 +32,8 @@ pub trait ParThreadPool { todo!() } + fn max_num_threads(&self) -> usize; + // derived fn run(&mut self, do_spawn: S, thread_do: F) -> NumSpawned diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index dd780f7..7dbbf1e 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -23,7 +23,7 @@ where let shared_state = &state; let mut num_spawned = NumSpawned::zero(); - let results = orchestrator.thread_pool().scope_zzz(|s| { + let results = orchestrator.thread_pool_mut().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { @@ -76,57 +76,58 @@ where let shared_state = &state; let mut num_spawned = NumSpawned::zero(); - let result: Result>, _> = orchestrator.thread_pool().scope_zzz(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned.increment(); - handles.push(s.spawn(|| { - thread::next::x( - runner.new_thread_runner(shared_state), - &iter, - shared_state, - &xap1, - ) - })) - } + let result: Result>, _> = + orchestrator.thread_pool_mut().scope_zzz(|s| { + let mut handles = vec![]; + + while runner.do_spawn_new(num_spawned, shared_state, &iter) { + num_spawned.increment(); + handles.push(s.spawn(|| { + thread::next::x( + runner.new_thread_runner(shared_state), + &iter, + shared_state, + &xap1, + ) + })) + } - let mut results = Vec::with_capacity(handles.len()); + let mut results = Vec::with_capacity(handles.len()); - let mut error = None; - while !handles.is_empty() { - let mut finished_idx = None; - for (h, handle) in handles.iter().enumerate() { - if handle.is_finished() { - finished_idx = Some(h); - break; + let mut error = None; + while !handles.is_empty() { + let mut finished_idx = None; + for (h, handle) in handles.iter().enumerate() { + if handle.is_finished() { + finished_idx = Some(h); + break; + } } - } - if let Some(h) = finished_idx { - let handle = handles.remove(h); - let result = handle.join().expect("failed to join the thread"); - match result { - NextWithIdx::Found { idx, value } => { - results.push(NextSuccess::Found { idx, value }) - } - NextWithIdx::NotFound => {} - NextWithIdx::StoppedByWhileCondition { idx } => { - results.push(NextSuccess::StoppedByWhileCondition { idx }); - } - NextWithIdx::StoppedByError { error: e } => { - error = Some(e); - break; + if let Some(h) = finished_idx { + let handle = handles.remove(h); + let result = handle.join().expect("failed to join the thread"); + match result { + NextWithIdx::Found { idx, value } => { + results.push(NextSuccess::Found { idx, value }) + } + NextWithIdx::NotFound => {} + NextWithIdx::StoppedByWhileCondition { idx } => { + results.push(NextSuccess::StoppedByWhileCondition { idx }); + } + NextWithIdx::StoppedByError { error: e } => { + error = Some(e); + break; + } } } } - } - match error { - Some(error) => Err(error), - None => Ok(results), - } - }); + match error { + Some(error) => Err(error), + None => Ok(results), + } + }); let next = result.map(NextSuccess::reduce); diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index 9775c18..18a6bf4 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -23,7 +23,7 @@ where let shared_state = &state; let mut num_spawned = NumSpawned::zero(); - let result = orchestrator.thread_pool().scope_zzz(|s| { + let result = orchestrator.thread_pool_mut().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { @@ -69,7 +69,7 @@ where let shared_state = &state; let mut num_spawned = NumSpawned::zero(); - let result = orchestrator.thread_pool().scope_zzz(|s| { + let result = orchestrator.thread_pool_mut().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index 130ebc0..8acf983 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -27,7 +27,7 @@ where let shared_state = &state; let mut num_spawned = NumSpawned::zero(); - let results = orchestrator.thread_pool().scope_zzz(|s| { + let results = orchestrator.thread_pool_mut().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { @@ -83,7 +83,7 @@ where let shared_state = &state; let mut num_spawned = NumSpawned::zero(); - let result: Result, _> = orchestrator.thread_pool().scope_zzz(|s| { + let result: Result, _> = orchestrator.thread_pool_mut().scope_zzz(|s| { let mut handles = vec![]; while runner.do_spawn_new(num_spawned, shared_state, &iter) { From 58237116c7653e6ba7ec11999d7d4dd12fe54c4b Mon Sep 17 00:00:00 2001 From: orxfun Date: Sun, 14 Sep 2025 18:54:39 +0200 Subject: [PATCH 100/264] max_num_threads_for_computation is defined --- .../default_std_orchestrator.rs | 2 +- src/orch/orchestrator.rs | 23 +++++++++++++++---- .../implementations/std_default_pool.rs | 7 ++++-- src/orch/thread_pool/par_thread_pool.rs | 4 +++- 4 files changed, 28 insertions(+), 8 deletions(-) diff --git a/src/orch/implementations/default_std_orchestrator.rs b/src/orch/implementations/default_std_orchestrator.rs index d478a5c..1e61593 100644 --- a/src/orch/implementations/default_std_orchestrator.rs +++ b/src/orch/implementations/default_std_orchestrator.rs @@ -11,7 +11,7 @@ impl Orchestrator for DefaultStdOrchestrator { type ThreadPool = StdDefaultPool; - fn thread_pool(&mut self) -> &Self::ThreadPool { + fn thread_pool(&self) -> &Self::ThreadPool { &self.0 } diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 1a4f178..6899a9e 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -1,5 +1,7 @@ +use std::num::NonZeroUsize; + use crate::{ - ParallelRunner, Params, + NumThreads, ParallelRunner, Params, orch::{NumSpawned, thread_pool::ParThreadPool}, runner::ComputationKind, }; @@ -18,7 +20,7 @@ pub trait Orchestrator { ::new(kind, params, initial_input_len) } - fn thread_pool(&mut self) -> &Self::ThreadPool; + fn thread_pool(&self) -> &Self::ThreadPool; fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool; @@ -73,8 +75,21 @@ pub trait Orchestrator { self.thread_pool_mut().map(do_spawn, work) } - fn max_num_threads_for_computation(&self, params: Params) -> usize { - 1 + fn max_num_threads_for_computation( + &self, + params: Params, + iter_len: Option, + ) -> NonZeroUsize { + let ava = self.thread_pool().max_num_threads(); + + let req = match (iter_len, params.num_threads) { + (Some(len), NumThreads::Auto) => NonZeroUsize::new(len.max(1)).expect(">0"), + (Some(len), NumThreads::Max(nt)) => NonZeroUsize::new(len.max(1)).expect(">0").min(nt), + (None, NumThreads::Auto) => NonZeroUsize::MAX, + (None, NumThreads::Max(nt)) => nt, + }; + + req.min(ava) } } diff --git a/src/orch/thread_pool/implementations/std_default_pool.rs b/src/orch/thread_pool/implementations/std_default_pool.rs index e913edf..3074552 100644 --- a/src/orch/thread_pool/implementations/std_default_pool.rs +++ b/src/orch/thread_pool/implementations/std_default_pool.rs @@ -1,9 +1,10 @@ use crate::{env::MAX_NUM_THREADS_ENV_VARIABLE, orch::ParThreadPool}; +use std::num::NonZeroUsize; const MAX_UNSET_NUM_THREADS: usize = 8; pub struct StdDefaultPool { - max_num_threads: usize, + max_num_threads: NonZeroUsize, } impl Default for StdDefaultPool { @@ -27,6 +28,8 @@ impl Default for StdDefaultPool { (None, None) => MAX_UNSET_NUM_THREADS, }; + let max_num_threads = NonZeroUsize::new(max_num_threads.max(1)).expect(">=1"); + Self { max_num_threads } } } @@ -50,7 +53,7 @@ impl ParThreadPool for StdDefaultPool { 'scope: 's, 'env: 'scope + 's; - fn max_num_threads(&self) -> usize { + fn max_num_threads(&self) -> NonZeroUsize { self.max_num_threads } diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs index 0bf6699..f728762 100644 --- a/src/orch/thread_pool/par_thread_pool.rs +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -1,3 +1,5 @@ +use std::num::NonZeroUsize; + use super::par_scope::ParScope; use crate::orch::num_spawned::NumSpawned; use orx_concurrent_bag::ConcurrentBag; @@ -32,7 +34,7 @@ pub trait ParThreadPool { todo!() } - fn max_num_threads(&self) -> usize; + fn max_num_threads(&self) -> NonZeroUsize; // derived From fdc464657502837b6cfe86d9b2925422ae13a5a4 Mon Sep 17 00:00:00 2001 From: orxfun Date: Sun, 14 Sep 2025 18:56:27 +0200 Subject: [PATCH 101/264] map bag has capacity wrt the pool and computation --- src/orch/orchestrator.rs | 6 ++++-- src/orch/thread_pool/par_thread_pool.rs | 9 +++++++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 6899a9e..200600b 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -68,11 +68,13 @@ pub trait Orchestrator { T: Send, E: Send, { - let runner = Self::new_runner(kind, params, iter.try_get_len()); + let iter_len = iter.try_get_len(); + let runner = Self::new_runner(kind, params, iter_len); let state = runner.new_shared_state(); let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); let work = || thread_map(&iter, &state, runner.new_thread_runner(&state)); - self.thread_pool_mut().map(do_spawn, work) + let max_num_threads = self.max_num_threads_for_computation(params, iter_len); + self.thread_pool_mut().map(do_spawn, work, max_num_threads) } fn max_num_threads_for_computation( diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs index f728762..a93c820 100644 --- a/src/orch/thread_pool/par_thread_pool.rs +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -53,7 +53,12 @@ pub trait ParThreadPool { nt } - fn map(&mut self, do_spawn: S, thread_map: M) -> (NumSpawned, Result, E>) + fn map( + &mut self, + do_spawn: S, + thread_map: M, + max_num_threads: NonZeroUsize, + ) -> (NumSpawned, Result, E>) where S: Fn(NumSpawned) -> bool + Sync, M: Fn() -> Result + Sync, @@ -61,7 +66,7 @@ pub trait ParThreadPool { E: Send, { let mut nt = NumSpawned::zero(); - let thread_results = ConcurrentBag::with_fixed_capacity(64); + let thread_results = ConcurrentBag::with_fixed_capacity(max_num_threads.into()); let work = || _ = thread_results.push(thread_map()); self.scope(|s| { while do_spawn(nt) { From 76ef97feb9e9fd07e791a697377a96f3e30fa2d8 Mon Sep 17 00:00:00 2001 From: orxfun Date: Sun, 14 Sep 2025 19:23:47 +0200 Subject: [PATCH 102/264] map_all is defined over all fallibility impl --- .../runner_results/fallibility.rs | 22 +++++++ src/orch/orchestrator.rs | 13 ++-- src/orch/thread_pool/par_thread_pool.rs | 60 +++++++++++++++++-- .../collect_arbitrary.rs | 8 ++- .../collect_ordered.rs | 8 ++- src/runner/parallel_runner_compute/reduce.rs | 17 +++++- 6 files changed, 114 insertions(+), 14 deletions(-) diff --git a/src/generic_values/runner_results/fallibility.rs b/src/generic_values/runner_results/fallibility.rs index eafb297..bb49b4a 100644 --- a/src/generic_values/runner_results/fallibility.rs +++ b/src/generic_values/runner_results/fallibility.rs @@ -18,6 +18,8 @@ pub trait Fallibility: Sized { fn reduce_to_stop(reduce: Reduce) -> Result, StopReduce> where V: Values; + + fn reduce_results(results: Vec>) -> Result, Self::Error>; } pub struct Infallible; @@ -59,6 +61,15 @@ impl Fallibility for Infallible { Reduce::StoppedByWhileCondition { acc } => Err(StopReduce::DueToWhile { acc }), } } + + fn reduce_results(results: Vec>) -> Result, Self::Error> { + Ok(results + .into_iter() + .map(|x| match x { + Ok(x) => x, + }) + .collect()) + } } pub struct Fallible(PhantomData); @@ -106,6 +117,17 @@ impl Fallibility for Fallible { Reduce::StoppedByError { error } => Err(StopReduce::DueToError { error }), } } + + fn reduce_results(results: Vec>) -> Result, Self::Error> { + let mut ok_results = Vec::with_capacity(results.len()); + for result in results { + match result { + Ok(x) => ok_results.push(x), + Err(e) => return Err(e), + } + } + Ok(ok_results) + } } pub enum Never {} diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 200600b..e9889d4 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -2,6 +2,7 @@ use std::num::NonZeroUsize; use crate::{ NumThreads, ParallelRunner, Params, + generic_values::runner_results::Fallibility, orch::{NumSpawned, thread_pool::ParThreadPool}, runner::ComputationKind, }; @@ -50,23 +51,24 @@ pub trait Orchestrator { self.thread_pool_mut().run(do_spawn, work) } - fn map_all( + fn map_all( &mut self, params: Params, iter: I, kind: ComputationKind, thread_map: M, - ) -> (NumSpawned, Result, E>) + ) -> (NumSpawned, Result, F::Error>) where + F: Fallibility, I: ConcurrentIter, M: Fn( &I, &::SharedState, ::ThreadRunner, - ) -> Result + ) -> Result + Sync, T: Send, - E: Send, + F::Error: Send, { let iter_len = iter.try_get_len(); let runner = Self::new_runner(kind, params, iter_len); @@ -74,7 +76,8 @@ pub trait Orchestrator { let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); let work = || thread_map(&iter, &state, runner.new_thread_runner(&state)); let max_num_threads = self.max_num_threads_for_computation(params, iter_len); - self.thread_pool_mut().map(do_spawn, work, max_num_threads) + self.thread_pool_mut() + .map_all::(do_spawn, work, max_num_threads) } fn max_num_threads_for_computation( diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs index a93c820..be95a00 100644 --- a/src/orch/thread_pool/par_thread_pool.rs +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -1,8 +1,7 @@ -use std::num::NonZeroUsize; - use super::par_scope::ParScope; -use crate::orch::num_spawned::NumSpawned; +use crate::{generic_values::runner_results::Fallibility, orch::num_spawned::NumSpawned}; use orx_concurrent_bag::ConcurrentBag; +use std::num::NonZeroUsize; pub trait ParThreadPool { type ScopeZzz<'scope, 'env>: ParScope<'scope, 'env> @@ -53,7 +52,31 @@ pub trait ParThreadPool { nt } - fn map( + fn map_infallible( + &mut self, + do_spawn: S, + thread_map: M, + max_num_threads: NonZeroUsize, + ) -> (NumSpawned, Vec) + where + S: Fn(NumSpawned) -> bool + Sync, + M: Fn() -> T + Sync, + T: Send, + { + let mut nt = NumSpawned::zero(); + let thread_results = ConcurrentBag::with_fixed_capacity(max_num_threads.into()); + let work = || _ = thread_results.push(thread_map()); + self.scope(|s| { + while do_spawn(nt) { + nt.increment(); + Self::run_in_scope(&s, &work); + } + }); + + (nt, thread_results.into_inner().into()) + } + + fn map_fallible( &mut self, do_spawn: S, thread_map: M, @@ -85,4 +108,33 @@ pub trait ParThreadPool { (nt, Ok(results)) } + + fn map_all( + &mut self, + do_spawn: S, + thread_map: M, + max_num_threads: NonZeroUsize, + ) -> (NumSpawned, Result, F::Error>) + where + F: Fallibility, + S: Fn(NumSpawned) -> bool + Sync, + M: Fn() -> Result + Sync, + T: Send, + F::Error: Send, + { + let mut nt = NumSpawned::zero(); + let thread_results = ConcurrentBag::with_fixed_capacity(max_num_threads.into()); + let work = || _ = thread_results.push(thread_map()); + self.scope(|s| { + while do_spawn(nt) { + nt.increment(); + Self::run_in_scope(&s, &work); + } + }); + + let thread_results: Vec<_> = thread_results.into_inner().into(); + let result = F::reduce_results(thread_results); + + (nt, result) + } } diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index de99283..6d70773 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -71,8 +71,12 @@ where let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { thread::collect_arbitrary::x(thread_runner, iter, state, &xap1, &bag).into_result() }; - let (num_spawned, result) = - orchestrator.map_all(params, iter, ComputationKind::Collect, thread_map); + let (num_spawned, result) = orchestrator.map_all::( + params, + iter, + ComputationKind::Collect, + thread_map, + ); let result = match result { Err(error) => ParallelCollectArbitrary::StoppedByError { error }, diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index cbb933c..42be87e 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -56,8 +56,12 @@ where let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { thread::collect_ordered::x(thread_runner, iter, state, &xap1).into_result() }; - let (num_spawned, result) = - orchestrator.map_all(params, iter, ComputationKind::Collect, thread_map); + let (num_spawned, result) = orchestrator.map_all::( + params, + iter, + ComputationKind::Collect, + thread_map, + ); let result = match result { Err(error) => ParallelCollect::StoppedByError { error }, diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index 8acf983..c4a2051 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -1,6 +1,8 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Reduce}; -use crate::orch::{NumSpawned, Orchestrator, ParHandle, ParScope, ParThreadPool}; +use crate::orch::{ + NumSpawned, Orchestrator, ParHandle, ParScope, ParThreadPool, SharedStateOf, ThreadRunnerOf, +}; use crate::runner::{ComputationKind, thread_runner_compute as thread}; use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; @@ -21,6 +23,19 @@ where Red: Fn(O, O) -> O + Sync, O: Send, { + // let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + // let x = thread::reduce::m(thread_runner, iter, state, &map1, &reduce); + // todo!() + // }; + // let (num_spawned, result) = + // orchestrator.map_fallible(params, iter, ComputationKind::Collect, thread_map); + + // let result = match result { + // Err(error) => ParallelCollect::StoppedByError { error }, + // Ok(results) => ParallelCollect::reduce(results, pinned_vec), + // }; + // (num_spawned, result); + let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); From 3701cd8ffec13e7675d9553d4e480cb803d830a4 Mon Sep 17 00:00:00 2001 From: orxfun Date: Sun, 14 Sep 2025 19:24:14 +0200 Subject: [PATCH 103/264] clean up --- src/orch/thread_pool/par_thread_pool.rs | 57 ------------------------- 1 file changed, 57 deletions(-) diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs index be95a00..2c237f6 100644 --- a/src/orch/thread_pool/par_thread_pool.rs +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -52,63 +52,6 @@ pub trait ParThreadPool { nt } - fn map_infallible( - &mut self, - do_spawn: S, - thread_map: M, - max_num_threads: NonZeroUsize, - ) -> (NumSpawned, Vec) - where - S: Fn(NumSpawned) -> bool + Sync, - M: Fn() -> T + Sync, - T: Send, - { - let mut nt = NumSpawned::zero(); - let thread_results = ConcurrentBag::with_fixed_capacity(max_num_threads.into()); - let work = || _ = thread_results.push(thread_map()); - self.scope(|s| { - while do_spawn(nt) { - nt.increment(); - Self::run_in_scope(&s, &work); - } - }); - - (nt, thread_results.into_inner().into()) - } - - fn map_fallible( - &mut self, - do_spawn: S, - thread_map: M, - max_num_threads: NonZeroUsize, - ) -> (NumSpawned, Result, E>) - where - S: Fn(NumSpawned) -> bool + Sync, - M: Fn() -> Result + Sync, - T: Send, - E: Send, - { - let mut nt = NumSpawned::zero(); - let thread_results = ConcurrentBag::with_fixed_capacity(max_num_threads.into()); - let work = || _ = thread_results.push(thread_map()); - self.scope(|s| { - while do_spawn(nt) { - nt.increment(); - Self::run_in_scope(&s, &work); - } - }); - - let mut results = vec![]; - for r in thread_results.into_inner() { - match r { - Ok(x) => results.push(x), - Err(e) => return (nt, Err(e)), - } - } - - (nt, Ok(results)) - } - fn map_all( &mut self, do_spawn: S, From ccf7af22802609674cc3497be04f5d60128f8300 Mon Sep 17 00:00:00 2001 From: orxfun Date: Sun, 14 Sep 2025 19:26:08 +0200 Subject: [PATCH 104/264] type alias to simplify fn signature --- src/orch/orchestrator.rs | 16 +++------------- src/runner/parallel_runner_compute/reduce.rs | 10 +++++----- 2 files changed, 8 insertions(+), 18 deletions(-) diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index e9889d4..44506c8 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -1,5 +1,3 @@ -use std::num::NonZeroUsize; - use crate::{ NumThreads, ParallelRunner, Params, generic_values::runner_results::Fallibility, @@ -7,6 +5,7 @@ use crate::{ runner::ComputationKind, }; use orx_concurrent_iter::ConcurrentIter; +use std::num::NonZeroUsize; pub trait Orchestrator { type Runner: ParallelRunner; @@ -36,11 +35,7 @@ pub trait Orchestrator { ) -> NumSpawned where I: ConcurrentIter, - F: Fn( - &I, - &::SharedState, - ::ThreadRunner, - ) + Sync, + F: Fn(&I, &SharedStateOf, ThreadRunnerOf) + Sync, { let runner = Self::new_runner(kind, params, iter.try_get_len()); let state = runner.new_shared_state(); @@ -61,12 +56,7 @@ pub trait Orchestrator { where F: Fallibility, I: ConcurrentIter, - M: Fn( - &I, - &::SharedState, - ::ThreadRunner, - ) -> Result - + Sync, + M: Fn(&I, &SharedStateOf, ThreadRunnerOf) -> Result + Sync, T: Send, F::Error: Send, { diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index c4a2051..27978ab 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -23,12 +23,12 @@ where Red: Fn(O, O) -> O + Sync, O: Send, { - // let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { - // let x = thread::reduce::m(thread_runner, iter, state, &map1, &reduce); - // todo!() - // }; + let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let x = thread::reduce::m(thread_runner, iter, state, &map1, &reduce); + todo!() + }; // let (num_spawned, result) = - // orchestrator.map_fallible(params, iter, ComputationKind::Collect, thread_map); + // orchestrator.map_all(params, iter, ComputationKind::Collect, thread_map); // let result = match result { // Err(error) => ParallelCollect::StoppedByError { error }, From 1d704448039cc42e9a663a7f5bdcaca1b6c0acae Mon Sep 17 00:00:00 2001 From: orxfun Date: Sun, 14 Sep 2025 19:33:29 +0200 Subject: [PATCH 105/264] map reduce is computed via orchestrator --- src/generic_values/runner_results/mod.rs | 2 +- src/orch/orchestrator.rs | 17 ++++++- src/runner/parallel_runner_compute/reduce.rs | 51 +++----------------- 3 files changed, 25 insertions(+), 45 deletions(-) diff --git a/src/generic_values/runner_results/mod.rs b/src/generic_values/runner_results/mod.rs index 89ece60..2d37f8c 100644 --- a/src/generic_values/runner_results/mod.rs +++ b/src/generic_values/runner_results/mod.rs @@ -9,7 +9,7 @@ mod stop; pub use collect_arbitrary::{ArbitraryPush, ParallelCollectArbitrary, ThreadCollectArbitrary}; pub use collect_ordered::{OrderedPush, ParallelCollect, ThreadCollect}; pub use collect_sequential::SequentialPush; -pub use fallibility::{Fallibility, Fallible, Infallible}; +pub use fallibility::{Fallibility, Fallible, Infallible, Never}; pub use next::{Next, NextSuccess, NextWithIdx}; pub use reduce::Reduce; pub use stop::{Stop, StopReduce, StopWithIdx}; diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 44506c8..66882c2 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -1,6 +1,6 @@ use crate::{ NumThreads, ParallelRunner, Params, - generic_values::runner_results::Fallibility, + generic_values::runner_results::{Fallibility, Infallible, Never}, orch::{NumSpawned, thread_pool::ParThreadPool}, runner::ComputationKind, }; @@ -70,6 +70,21 @@ pub trait Orchestrator { .map_all::(do_spawn, work, max_num_threads) } + fn map_infallible( + &mut self, + params: Params, + iter: I, + kind: ComputationKind, + thread_map: M, + ) -> (NumSpawned, Result, Never>) + where + I: ConcurrentIter, + M: Fn(&I, &SharedStateOf, ThreadRunnerOf) -> Result + Sync, + T: Send, + { + self.map_all::(params, iter, kind, thread_map) + } + fn max_num_threads_for_computation( &self, params: Params, diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index 27978ab..aec12ba 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -1,5 +1,5 @@ use crate::generic_values::Values; -use crate::generic_values::runner_results::{Fallibility, Reduce}; +use crate::generic_values::runner_results::{Fallibility, Infallible, Reduce}; use crate::orch::{ NumSpawned, Orchestrator, ParHandle, ParScope, ParThreadPool, SharedStateOf, ThreadRunnerOf, }; @@ -24,50 +24,15 @@ where O: Send, { let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { - let x = thread::reduce::m(thread_runner, iter, state, &map1, &reduce); - todo!() + let result = thread::reduce::m(thread_runner, iter, state, &map1, &reduce); + Ok(result) }; - // let (num_spawned, result) = - // orchestrator.map_all(params, iter, ComputationKind::Collect, thread_map); + let (num_spawned, result) = + orchestrator.map_infallible(params, iter, ComputationKind::Collect, thread_map); - // let result = match result { - // Err(error) => ParallelCollect::StoppedByError { error }, - // Ok(results) => ParallelCollect::reduce(results, pinned_vec), - // }; - // (num_spawned, result); - - let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); - - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - let results = orchestrator.thread_pool_mut().scope_zzz(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned.increment(); - handles.push(s.spawn(|| { - thread::reduce::m( - runner.new_thread_runner(shared_state), - &iter, - shared_state, - &map1, - &reduce, - ) - })); - } - - let mut results = Vec::with_capacity(handles.len()); - for x in handles { - if let Some(x) = x.join().expect("failed to join the thread") { - results.push(x); - } - } - results - }); - - let acc = results.into_iter().reduce(reduce); + let acc = match result { + Ok(results) => results.into_iter().filter_map(|x| x).reduce(reduce), + }; (num_spawned, acc) } From 44bd475656ebe7027b9967cc5c5d57dedb431693 Mon Sep 17 00:00:00 2001 From: orxfun Date: Mon, 15 Sep 2025 11:18:44 +0200 Subject: [PATCH 106/264] map reduce uses orchestrator map --- Cargo.toml | 2 +- src/runner/parallel_runner_compute/reduce.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9da1175..d0cde05 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,7 +38,7 @@ rayon = "1.10.0" test-case = "3.3.1" [[bench]] -name = "reduce_map_filter" +name = "reduce" harness = false [features] diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index aec12ba..7e1d3e1 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -1,5 +1,5 @@ use crate::generic_values::Values; -use crate::generic_values::runner_results::{Fallibility, Infallible, Reduce}; +use crate::generic_values::runner_results::{Fallibility, Infallible, ParallelCollect, Reduce}; use crate::orch::{ NumSpawned, Orchestrator, ParHandle, ParScope, ParThreadPool, SharedStateOf, ThreadRunnerOf, }; From 11cfbf76ba12def65fcec3a1feb26f3e3809179e Mon Sep 17 00:00:00 2001 From: orxfun Date: Mon, 15 Sep 2025 11:39:16 +0200 Subject: [PATCH 107/264] xap reduce uses orchestrator map --- Cargo.toml | 2 +- src/generic_values/runner_results/reduce.rs | 10 +++ src/runner/parallel_runner_compute/reduce.rs | 75 ++++---------------- 3 files changed, 24 insertions(+), 63 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d0cde05..9da1175 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,7 +38,7 @@ rayon = "1.10.0" test-case = "3.3.1" [[bench]] -name = "reduce" +name = "reduce_map_filter" harness = false [features] diff --git a/src/generic_values/runner_results/reduce.rs b/src/generic_values/runner_results/reduce.rs index db4a9e5..ffd5730 100644 --- a/src/generic_values/runner_results/reduce.rs +++ b/src/generic_values/runner_results/reduce.rs @@ -12,6 +12,16 @@ pub enum Reduce { }, } +impl Reduce { + pub fn into_result(self) -> Result, ::Error> { + match self { + Reduce::Done { acc } => Ok(acc), + Reduce::StoppedByWhileCondition { acc } => Ok(acc), + Reduce::StoppedByError { error } => Err(error), + } + } +} + impl core::fmt::Debug for Reduce { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index 7e1d3e1..937c485 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -1,10 +1,8 @@ +use crate::Params; use crate::generic_values::Values; -use crate::generic_values::runner_results::{Fallibility, Infallible, ParallelCollect, Reduce}; -use crate::orch::{ - NumSpawned, Orchestrator, ParHandle, ParScope, ParThreadPool, SharedStateOf, ThreadRunnerOf, -}; +use crate::generic_values::runner_results::Fallibility; +use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; use crate::runner::{ComputationKind, thread_runner_compute as thread}; -use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; // m @@ -57,62 +55,15 @@ where X1: Fn(I::Item) -> Vo + Sync, Red: Fn(Vo::Item, Vo::Item) -> Vo::Item + Sync, { - let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); - - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - let result: Result, _> = orchestrator.thread_pool_mut().scope_zzz(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned.increment(); - handles.push(s.spawn(|| { - thread::reduce::x( - runner.new_thread_runner(shared_state), - &iter, - shared_state, - &xap1, - &reduce, - ) - })); - } - - let mut results = Vec::with_capacity(handles.len()); - - let mut error = None; - while !handles.is_empty() { - let mut finished_idx = None; - for (h, handle) in handles.iter().enumerate() { - if handle.is_finished() { - finished_idx = Some(h); - break; - } - } - - if let Some(h) = finished_idx { - let handle = handles.remove(h); - let result = handle.join().expect("failed to join the thread"); - match result { - Reduce::Done { acc: Some(acc) } => results.push(acc), - Reduce::StoppedByWhileCondition { acc: Some(acc) } => results.push(acc), - Reduce::StoppedByError { error: e } => { - error = Some(e); - break; - } - _ => {} - } - } - } - - match error { - Some(error) => Err(error), - None => Ok(results), - } - }); - - let acc = result.map(|results| results.into_iter().reduce(reduce)); - + let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + thread::reduce::x(thread_runner, iter, state, &xap1, &reduce).into_result() + }; + let (num_spawned, result) = orchestrator.map_all::( + params, + iter, + ComputationKind::Collect, + thread_map, + ); + let acc = result.map(|results| results.into_iter().flat_map(|x| x).reduce(reduce)); (num_spawned, acc) } From c7c1645c619ae7594ff2f76e033d7a358b9c3f21 Mon Sep 17 00:00:00 2001 From: orxfun Date: Mon, 15 Sep 2025 11:51:49 +0200 Subject: [PATCH 108/264] wip --- .../collect_arbitrary.rs | 6 +- .../collect_ordered.rs | 6 +- src/runner/parallel_runner_compute/next.rs | 65 +++++++++---------- .../parallel_runner_compute/next_any.rs | 6 +- src/runner/parallel_runner_compute/reduce.rs | 7 +- .../u_collect_arbitrary.rs | 6 +- .../u_collect_ordered.rs | 6 +- .../runner/parallel_runner_compute/u_next.rs | 6 +- .../parallel_runner_compute/u_next_any.rs | 6 +- .../parallel_runner_compute/u_reduce.rs | 6 +- 10 files changed, 56 insertions(+), 64 deletions(-) diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 6d70773..62039f4 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -3,7 +3,7 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::ParallelCollectArbitrary; use crate::orch::Orchestrator; use crate::orch::{NumSpawned, SharedStateOf, ThreadRunnerOf}; -use crate::runner::{ComputationKind, thread_runner_compute as thread}; +use crate::runner::{ComputationKind, thread_runner_compute as th}; use orx_concurrent_bag::ConcurrentBag; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; @@ -34,7 +34,7 @@ where }; let thread_work = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { - thread::collect_arbitrary::m(thread_runner, iter, state, &map1, &bag); + th::collect_arbitrary::m(thread_runner, iter, state, &map1, &bag); }; let num_spawned = orchestrator.run_all(params, iter, ComputationKind::Collect, thread_work); @@ -69,7 +69,7 @@ where }; let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { - thread::collect_arbitrary::x(thread_runner, iter, state, &xap1, &bag).into_result() + th::collect_arbitrary::x(thread_runner, iter, state, &xap1, &bag).into_result() }; let (num_spawned, result) = orchestrator.map_all::( params, diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index 42be87e..f544a32 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect}; use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; -use crate::runner::{ComputationKind, thread_runner_compute as thread}; +use crate::runner::{ComputationKind, thread_runner_compute as th}; use orx_concurrent_iter::ConcurrentIter; use orx_concurrent_ordered_bag::ConcurrentOrderedBag; use orx_fixed_vec::IntoConcurrentPinnedVec; @@ -27,7 +27,7 @@ where let o_bag: ConcurrentOrderedBag = pinned_vec.into(); let thread_do = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { - thread::collect_ordered::m(thread_runner, iter, state, &map1, &o_bag, offset); + th::collect_ordered::m(thread_runner, iter, state, &map1, &o_bag, offset); }; let num_spawned = orchestrator.run_all(params, iter, ComputationKind::Collect, thread_do); @@ -54,7 +54,7 @@ where P: IntoConcurrentPinnedVec, { let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { - thread::collect_ordered::x(thread_runner, iter, state, &xap1).into_result() + th::collect_ordered::x(thread_runner, iter, state, &xap1).into_result() }; let (num_spawned, result) = orchestrator.map_all::( params, diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index 7dbbf1e..e1fb994 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -1,7 +1,9 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; -use crate::orch::{NumSpawned, Orchestrator, ParHandle, ParScope, ParThreadPool}; -use crate::runner::{ComputationKind, thread_runner_compute as thread}; +use crate::orch::{ + NumSpawned, Orchestrator, ParHandle, ParScope, ParThreadPool, SharedStateOf, ThreadRunnerOf, +}; +use crate::runner::{ComputationKind, thread_runner_compute as th}; use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; @@ -17,39 +19,20 @@ where O: Send, M1: Fn(I::Item) -> O + Sync, { - let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); - - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - let results = orchestrator.thread_pool_mut().scope_zzz(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned.increment(); - handles.push(s.spawn(|| { - thread::next::m( - runner.new_thread_runner(shared_state), - &iter, - shared_state, - &map1, - ) - })) - } - - let mut results: Vec<(usize, O)> = Vec::with_capacity(handles.len()); - for x in handles { - if let Some(x) = x.join().expect("failed to join the thread") { - results.push(x); - } - } - results - }); - - let acc = results.into_iter().min_by_key(|x| x.0).map(|x| x.1); - - (num_spawned, acc) + let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + Ok(th::next::m(thread_runner, iter, state, &map1)) + }; + let (num_spawned, result) = + orchestrator.map_infallible(params, iter, ComputationKind::Collect, thread_map); + + let next = match result { + Ok(results) => results + .into_iter() + .filter_map(|x| x) + .min_by_key(|x| x.0) + .map(|x| x.1), + }; + (num_spawned, next) } type ResultNext = Result< @@ -70,6 +53,16 @@ where Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, { + let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let x = th::next::x(thread_runner, iter, state, &xap1); + }; + let (num_spawned, result) = orchestrator.map_all::( + params, + iter, + ComputationKind::Collect, + thread_map, + ); + let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); let state = runner.new_shared_state(); @@ -83,7 +76,7 @@ where while runner.do_spawn_new(num_spawned, shared_state, &iter) { num_spawned.increment(); handles.push(s.spawn(|| { - thread::next::x( + th::next::x( runner.new_thread_runner(shared_state), &iter, shared_state, diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index 18a6bf4..b1ad2b1 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -1,7 +1,7 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; use crate::orch::{NumSpawned, Orchestrator, ParHandle, ParScope, ParThreadPool}; -use crate::runner::{ComputationKind, thread_runner_compute as thread}; +use crate::runner::{ComputationKind, thread_runner_compute as th}; use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; @@ -29,7 +29,7 @@ where while runner.do_spawn_new(num_spawned, shared_state, &iter) { num_spawned.increment(); handles.push(s.spawn(|| { - thread::next_any::m( + th::next_any::m( runner.new_thread_runner(shared_state), &iter, shared_state, @@ -75,7 +75,7 @@ where while runner.do_spawn_new(num_spawned, shared_state, &iter) { num_spawned.increment(); handles.push(s.spawn(|| { - thread::next_any::x( + th::next_any::x( runner.new_thread_runner(shared_state), &iter, shared_state, diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index 937c485..dc034fc 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; -use crate::runner::{ComputationKind, thread_runner_compute as thread}; +use crate::runner::{ComputationKind, thread_runner_compute as th}; use orx_concurrent_iter::ConcurrentIter; // m @@ -22,8 +22,7 @@ where O: Send, { let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { - let result = thread::reduce::m(thread_runner, iter, state, &map1, &reduce); - Ok(result) + Ok(th::reduce::m(thread_runner, iter, state, &map1, &reduce)) }; let (num_spawned, result) = orchestrator.map_infallible(params, iter, ComputationKind::Collect, thread_map); @@ -56,7 +55,7 @@ where Red: Fn(Vo::Item, Vo::Item) -> Vo::Item + Sync, { let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { - thread::reduce::x(thread_runner, iter, state, &xap1, &reduce).into_result() + th::reduce::x(thread_runner, iter, state, &xap1, &reduce).into_result() }; let (num_spawned, result) = orchestrator.map_all::( params, diff --git a/src/using/runner/parallel_runner_compute/u_collect_arbitrary.rs b/src/using/runner/parallel_runner_compute/u_collect_arbitrary.rs index 03a1be3..666b214 100644 --- a/src/using/runner/parallel_runner_compute/u_collect_arbitrary.rs +++ b/src/using/runner/parallel_runner_compute/u_collect_arbitrary.rs @@ -1,4 +1,4 @@ -use super::super::thread_runner_compute as thread; +use super::super::thread_runner_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::{ParallelCollectArbitrary, ThreadCollectArbitrary}; use crate::orch::NumSpawned; @@ -44,7 +44,7 @@ where let u = using.create(num_spawned.into_inner()); num_spawned.increment(); s.spawn(|| { - thread::u_collect_arbitrary::u_m( + th::u_collect_arbitrary::u_m( runner.new_thread_runner(shared_state), u, &iter, @@ -98,7 +98,7 @@ where let u = using.create(num_spawned.into_inner()); num_spawned.increment(); handles.push(s.spawn(|| { - thread::u_collect_arbitrary::u_x( + th::u_collect_arbitrary::u_x( runner.new_thread_runner(shared_state), u, &iter, diff --git a/src/using/runner/parallel_runner_compute/u_collect_ordered.rs b/src/using/runner/parallel_runner_compute/u_collect_ordered.rs index 7ac1bf2..90d893e 100644 --- a/src/using/runner/parallel_runner_compute/u_collect_ordered.rs +++ b/src/using/runner/parallel_runner_compute/u_collect_ordered.rs @@ -1,4 +1,4 @@ -use super::super::thread_runner_compute as thread; +use super::super::thread_runner_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect, ThreadCollect}; use crate::orch::NumSpawned; @@ -35,7 +35,7 @@ where let u = using.create(num_spawned.into_inner()); num_spawned.increment(); s.spawn(|| { - thread::u_collect_ordered::u_m( + th::u_collect_ordered::u_m( runner.new_thread_runner(shared_state), u, &iter, @@ -83,7 +83,7 @@ where let u = using.create(num_spawned.into_inner()); num_spawned.increment(); handles.push(s.spawn(|| { - thread::u_collect_ordered::u_x( + th::u_collect_ordered::u_x( runner.new_thread_runner(shared_state), u, &iter, diff --git a/src/using/runner/parallel_runner_compute/u_next.rs b/src/using/runner/parallel_runner_compute/u_next.rs index eee33aa..9a61886 100644 --- a/src/using/runner/parallel_runner_compute/u_next.rs +++ b/src/using/runner/parallel_runner_compute/u_next.rs @@ -1,4 +1,4 @@ -use super::super::thread_runner_compute as thread; +use super::super::thread_runner_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; use crate::orch::NumSpawned; @@ -28,7 +28,7 @@ where let u = using.create(num_spawned.into_inner()); num_spawned.increment(); handles.push(s.spawn(|| { - thread::u_next::u_m( + th::u_next::u_m( runner.new_thread_runner(shared_state), u, &iter, @@ -79,7 +79,7 @@ where let u = using.create(num_spawned.into_inner()); num_spawned.increment(); handles.push(s.spawn(|| { - thread::u_next::u_x( + th::u_next::u_x( runner.new_thread_runner(shared_state), u, &iter, diff --git a/src/using/runner/parallel_runner_compute/u_next_any.rs b/src/using/runner/parallel_runner_compute/u_next_any.rs index e4c1d42..21ccad3 100644 --- a/src/using/runner/parallel_runner_compute/u_next_any.rs +++ b/src/using/runner/parallel_runner_compute/u_next_any.rs @@ -1,4 +1,4 @@ -use super::super::thread_runner_compute as thread; +use super::super::thread_runner_compute as th; use crate::generic_values::runner_results::Fallibility; use crate::orch::NumSpawned; use crate::using::Using; @@ -27,7 +27,7 @@ where let u = using.create(num_spawned.into_inner()); num_spawned.increment(); handles.push(s.spawn(|| { - thread::u_next_any::u_m( + th::u_next_any::u_m( runner.new_thread_runner(shared_state), u, &iter, @@ -71,7 +71,7 @@ where let u = using.create(num_spawned.into_inner()); num_spawned.increment(); handles.push(s.spawn(|| { - thread::u_next_any::u_x( + th::u_next_any::u_x( runner.new_thread_runner(shared_state), u, &iter, diff --git a/src/using/runner/parallel_runner_compute/u_reduce.rs b/src/using/runner/parallel_runner_compute/u_reduce.rs index 6333b6c..370932b 100644 --- a/src/using/runner/parallel_runner_compute/u_reduce.rs +++ b/src/using/runner/parallel_runner_compute/u_reduce.rs @@ -1,4 +1,4 @@ -use super::super::thread_runner_compute as thread; +use super::super::thread_runner_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Reduce}; use crate::orch::NumSpawned; @@ -35,7 +35,7 @@ where let u = using.create(num_spawned.into_inner()); num_spawned.increment(); handles.push(s.spawn(|| { - thread::u_reduce::u_m( + th::u_reduce::u_m( runner.new_thread_runner(shared_state), u, &iter, @@ -93,7 +93,7 @@ where let u = using.create(num_spawned.into_inner()); num_spawned.increment(); handles.push(s.spawn(|| { - thread::u_reduce::u_x( + th::u_reduce::u_x( runner.new_thread_runner(shared_state), u, &iter, From 8f1d1d971cdd5b8833a48557ad4beb7ae6b533a2 Mon Sep 17 00:00:00 2001 From: orxfun Date: Mon, 15 Sep 2025 13:59:06 +0200 Subject: [PATCH 109/264] next and next-any uses orchestrator map --- Cargo.toml | 2 +- src/generic_values/runner_results/next.rs | 2 +- src/runner/parallel_runner_compute/next.rs | 82 +++----------- .../parallel_runner_compute/next_any.rs | 104 ++++-------------- src/runner/parallel_runner_compute/reduce.rs | 2 +- 5 files changed, 39 insertions(+), 153 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9da1175..4d8bf3a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,7 +38,7 @@ rayon = "1.10.0" test-case = "3.3.1" [[bench]] -name = "reduce_map_filter" +name = "find_iter_into_par" harness = false [features] diff --git a/src/generic_values/runner_results/next.rs b/src/generic_values/runner_results/next.rs index b5f1337..94eb9b0 100644 --- a/src/generic_values/runner_results/next.rs +++ b/src/generic_values/runner_results/next.rs @@ -30,7 +30,7 @@ pub enum NextSuccess { } impl NextSuccess { - pub fn reduce(results: Vec) -> Option<(usize, T)> { + pub fn reduce(results: impl IntoIterator) -> Option<(usize, T)> { let mut result = None; let mut idx_bound = usize::MAX; for x in results { diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index e1fb994..9081721 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -1,10 +1,8 @@ +use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; -use crate::orch::{ - NumSpawned, Orchestrator, ParHandle, ParScope, ParThreadPool, SharedStateOf, ThreadRunnerOf, -}; +use crate::orch::{NumSpawned, Orchestrator, SharedStateOf}; use crate::runner::{ComputationKind, thread_runner_compute as th}; -use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; pub fn m( @@ -19,7 +17,7 @@ where O: Send, M1: Fn(I::Item) -> O + Sync, { - let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_map = |iter: &I, state: &SharedStateOf, thread_runner| { Ok(th::next::m(thread_runner, iter, state, &map1)) }; let (num_spawned, result) = @@ -53,8 +51,15 @@ where Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, { - let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { - let x = th::next::x(thread_runner, iter, state, &xap1); + let thread_map = |iter: &I, state: &SharedStateOf, th_runner| match th::next::x( + th_runner, iter, state, &xap1, + ) { + NextWithIdx::Found { idx, value } => Ok(Some(NextSuccess::Found { idx, value })), + NextWithIdx::NotFound => Ok(None), + NextWithIdx::StoppedByWhileCondition { idx } => { + Ok(Some(NextSuccess::StoppedByWhileCondition { idx })) + } + NextWithIdx::StoppedByError { error } => Err(error), }; let (num_spawned, result) = orchestrator.map_all::( params, @@ -62,67 +67,6 @@ where ComputationKind::Collect, thread_map, ); - - let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); - - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - let result: Result>, _> = - orchestrator.thread_pool_mut().scope_zzz(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned.increment(); - handles.push(s.spawn(|| { - th::next::x( - runner.new_thread_runner(shared_state), - &iter, - shared_state, - &xap1, - ) - })) - } - - let mut results = Vec::with_capacity(handles.len()); - - let mut error = None; - while !handles.is_empty() { - let mut finished_idx = None; - for (h, handle) in handles.iter().enumerate() { - if handle.is_finished() { - finished_idx = Some(h); - break; - } - } - - if let Some(h) = finished_idx { - let handle = handles.remove(h); - let result = handle.join().expect("failed to join the thread"); - match result { - NextWithIdx::Found { idx, value } => { - results.push(NextSuccess::Found { idx, value }) - } - NextWithIdx::NotFound => {} - NextWithIdx::StoppedByWhileCondition { idx } => { - results.push(NextSuccess::StoppedByWhileCondition { idx }); - } - NextWithIdx::StoppedByError { error: e } => { - error = Some(e); - break; - } - } - } - } - - match error { - Some(error) => Err(error), - None => Ok(results), - } - }); - - let next = result.map(NextSuccess::reduce); - + let next = result.map(|results| NextSuccess::reduce(results.into_iter().filter_map(|x| x))); (num_spawned, next) } diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index b1ad2b1..8dedbba 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -1,8 +1,8 @@ +use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; -use crate::orch::{NumSpawned, Orchestrator, ParHandle, ParScope, ParThreadPool}; +use crate::orch::{NumSpawned, Orchestrator, SharedStateOf}; use crate::runner::{ComputationKind, thread_runner_compute as th}; -use crate::{ParallelRunner, Params}; use orx_concurrent_iter::ConcurrentIter; pub fn m( @@ -17,34 +17,16 @@ where O: Send, M1: Fn(I::Item) -> O + Sync, { - let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); - - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - let result = orchestrator.thread_pool_mut().scope_zzz(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned.increment(); - handles.push(s.spawn(|| { - th::next_any::m( - runner.new_thread_runner(shared_state), - &iter, - shared_state, - &map1, - ) - })); - } - - // do not wait to join other threads - handles - .into_iter() - .find_map(|x| x.join().expect("failed to join the thread")) - }); - - (num_spawned, result) + let thread_map = |iter: &I, state: &SharedStateOf, thread_runner| { + Ok(th::next_any::m(thread_runner, iter, state, &map1)) + }; + let (num_spawned, result) = + orchestrator.map_infallible(params, iter, ComputationKind::Collect, thread_map); + + let next = match result { + Ok(results) => results.into_iter().filter_map(|x| x).next(), + }; + (num_spawned, next) } type ResultNextAny = @@ -63,55 +45,15 @@ where Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, { - let runner = C::new_runner(ComputationKind::Collect, params, iter.try_get_len()); - - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - let result = orchestrator.thread_pool_mut().scope_zzz(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - num_spawned.increment(); - handles.push(s.spawn(|| { - th::next_any::x( - runner.new_thread_runner(shared_state), - &iter, - shared_state, - &xap1, - ) - })); - } - - let mut result = Ok(None); - while !handles.is_empty() { - let mut finished_idx = None; - for (h, handle) in handles.iter().enumerate() { - if handle.is_finished() { - finished_idx = Some(h); - break; - } - } - - if let Some(h) = finished_idx { - let handle = handles.remove(h); - match handle.join().expect("failed to join the thread") { - Ok(Some(x)) => { - result = Ok(Some(x)); - break; - } - Err(error) => { - result = Err(error); - break; - } - Ok(None) => {} - } - } - } - - result - }); - - (num_spawned, result) + let thread_map = |iter: &I, state: &SharedStateOf, th_runner| { + th::next_any::x(th_runner, iter, state, &xap1) + }; + let (num_spawned, result) = orchestrator.map_all::( + params, + iter, + ComputationKind::Collect, + thread_map, + ); + let next = result.map(|results| results.into_iter().filter_map(|x| x).next()); + (num_spawned, next) } diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index dc034fc..15c072a 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -63,6 +63,6 @@ where ComputationKind::Collect, thread_map, ); - let acc = result.map(|results| results.into_iter().flat_map(|x| x).reduce(reduce)); + let acc = result.map(|results| results.into_iter().filter_map(|x| x).reduce(reduce)); (num_spawned, acc) } From 105272386aca3abf52678ad21e51fc506beaf95a Mon Sep 17 00:00:00 2001 From: orxfun Date: Mon, 15 Sep 2025 14:07:33 +0200 Subject: [PATCH 110/264] thread pool clean up --- .../thread_pool/implementations/std_default_pool.rs | 12 ------------ src/orch/thread_pool/par_thread_pool.rs | 8 -------- 2 files changed, 20 deletions(-) diff --git a/src/orch/thread_pool/implementations/std_default_pool.rs b/src/orch/thread_pool/implementations/std_default_pool.rs index 3074552..8919200 100644 --- a/src/orch/thread_pool/implementations/std_default_pool.rs +++ b/src/orch/thread_pool/implementations/std_default_pool.rs @@ -35,18 +35,6 @@ impl Default for StdDefaultPool { } impl ParThreadPool for StdDefaultPool { - type ScopeZzz<'scope, 'env> - = std::thread::Scope<'scope, 'env> - where - 'env: 'scope; - - fn scope_zzz<'env, F, T>(&'env self, f: F) -> T - where - F: for<'scope> FnOnce(&'scope std::thread::Scope<'scope, 'env>) -> T, - { - std::thread::scope(f) - } - type ScopeRef<'s, 'env, 'scope> = &'s std::thread::Scope<'s, 'env> where diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs index 2c237f6..1e688b9 100644 --- a/src/orch/thread_pool/par_thread_pool.rs +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -4,14 +4,6 @@ use orx_concurrent_bag::ConcurrentBag; use std::num::NonZeroUsize; pub trait ParThreadPool { - type ScopeZzz<'scope, 'env>: ParScope<'scope, 'env> - where - 'env: 'scope; - - fn scope_zzz<'env, F, T>(&'env self, f: F) -> T - where - F: for<'scope> FnOnce(&'scope Self::ScopeZzz<'scope, 'env>) -> T; - type ScopeRef<'s, 'env, 'scope> where 'scope: 's, From de559c6354b271311f188bdd4335a80a95920b99 Mon Sep 17 00:00:00 2001 From: orxfun Date: Mon, 15 Sep 2025 14:09:46 +0200 Subject: [PATCH 111/264] clean up handle and scope --- src/orch/mod.rs | 2 +- src/orch/thread_pool/implementations/mod.rs | 1 - .../implementations/std_scoped_threads.rs | 27 ------------------- src/orch/thread_pool/mod.rs | 4 --- src/orch/thread_pool/par_handle.rs | 9 ------- src/orch/thread_pool/par_scope.rs | 16 ----------- src/orch/thread_pool/par_thread_pool.rs | 10 ++----- 7 files changed, 3 insertions(+), 66 deletions(-) delete mode 100644 src/orch/thread_pool/implementations/std_scoped_threads.rs delete mode 100644 src/orch/thread_pool/par_handle.rs delete mode 100644 src/orch/thread_pool/par_scope.rs diff --git a/src/orch/mod.rs b/src/orch/mod.rs index f2ac98d..a6bc8b3 100644 --- a/src/orch/mod.rs +++ b/src/orch/mod.rs @@ -8,6 +8,6 @@ pub(crate) use orchestrator::{SharedStateOf, ThreadRunnerOf}; pub use crate::orch::implementations::DefaultStdOrchestrator; pub use num_spawned::NumSpawned; pub use orchestrator::Orchestrator; -pub use thread_pool::{ParHandle, ParScope, ParThreadPool}; +pub use thread_pool::ParThreadPool; pub type DefaultOrchestrator = DefaultStdOrchestrator; diff --git a/src/orch/thread_pool/implementations/mod.rs b/src/orch/thread_pool/implementations/mod.rs index 2653be5..1b418af 100644 --- a/src/orch/thread_pool/implementations/mod.rs +++ b/src/orch/thread_pool/implementations/mod.rs @@ -1,5 +1,4 @@ mod std_default_pool; -mod std_scoped_threads; // #[cfg(feature = "threadpool")] // mod impl_threadpool; diff --git a/src/orch/thread_pool/implementations/std_scoped_threads.rs b/src/orch/thread_pool/implementations/std_scoped_threads.rs deleted file mode 100644 index fb9ee20..0000000 --- a/src/orch/thread_pool/implementations/std_scoped_threads.rs +++ /dev/null @@ -1,27 +0,0 @@ -use crate::orch::{ParHandle, ParScope, thread_pool::par_handle::JoinResult}; - -impl<'scope, T> ParHandle<'scope, T> for std::thread::ScopedJoinHandle<'scope, T> { - fn join(self) -> JoinResult { - std::thread::ScopedJoinHandle::join(self) - } - - fn is_finished(&self) -> bool { - std::thread::ScopedJoinHandle::is_finished(self) - } -} - -impl<'scope, 'env> ParScope<'scope, 'env> for std::thread::Scope<'scope, 'env> { - type Handle - = std::thread::ScopedJoinHandle<'scope, T> - where - Self: 'scope, - T: 'scope; - - fn spawn(&'scope self, f: F) -> Self::Handle - where - F: FnOnce() -> T + Send + 'scope, - T: Send + 'scope, - { - self.spawn(f) - } -} diff --git a/src/orch/thread_pool/mod.rs b/src/orch/thread_pool/mod.rs index 57e4e61..d027085 100644 --- a/src/orch/thread_pool/mod.rs +++ b/src/orch/thread_pool/mod.rs @@ -1,8 +1,4 @@ pub mod implementations; -mod par_handle; -mod par_scope; mod par_thread_pool; -pub use par_handle::ParHandle; -pub use par_scope::ParScope; pub use par_thread_pool::ParThreadPool; diff --git a/src/orch/thread_pool/par_handle.rs b/src/orch/thread_pool/par_handle.rs deleted file mode 100644 index 8f38f7c..0000000 --- a/src/orch/thread_pool/par_handle.rs +++ /dev/null @@ -1,9 +0,0 @@ -use std::any::Any; - -pub type JoinResult = Result>; - -pub trait ParHandle<'scope, T> { - fn join(self) -> JoinResult; - - fn is_finished(&self) -> bool; -} diff --git a/src/orch/thread_pool/par_scope.rs b/src/orch/thread_pool/par_scope.rs deleted file mode 100644 index b9918c5..0000000 --- a/src/orch/thread_pool/par_scope.rs +++ /dev/null @@ -1,16 +0,0 @@ -use super::par_handle::ParHandle; - -pub trait ParScope<'scope, 'env> -where - 'env: 'scope, -{ - type Handle: ParHandle<'scope, T> - where - Self: 'scope, - T: 'scope; - - fn spawn(&'scope self, f: F) -> Self::Handle - where - F: FnOnce() -> T + Send + 'scope, - T: Send + 'scope; -} diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs index 1e688b9..c0d3119 100644 --- a/src/orch/thread_pool/par_thread_pool.rs +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -1,4 +1,3 @@ -use super::par_scope::ParScope; use crate::{generic_values::runner_results::Fallibility, orch::num_spawned::NumSpawned}; use orx_concurrent_bag::ConcurrentBag; use std::num::NonZeroUsize; @@ -13,17 +12,12 @@ pub trait ParThreadPool { where 'scope: 's, 'env: 'scope + 's, - W: Fn() + Sync + 'scope + 'env, - { - } + W: Fn() + Sync + 'scope + 'env; fn scope<'env, 'scope, F>(&'env mut self, f: F) where 'env: 'scope, - for<'s> F: FnOnce(Self::ScopeRef<'s, 'env, 'scope>) + Send, - { - todo!() - } + for<'s> F: FnOnce(Self::ScopeRef<'s, 'env, 'scope>) + Send; fn max_num_threads(&self) -> NonZeroUsize; From ab1e76f170c016cda30b0a24ce5a13b6c5ce3128 Mon Sep 17 00:00:00 2001 From: orxfun Date: Mon, 15 Sep 2025 14:11:02 +0200 Subject: [PATCH 112/264] NumSpawned documentaiton --- src/orch/num_spawned.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/orch/num_spawned.rs b/src/orch/num_spawned.rs index e009196..5773fea 100644 --- a/src/orch/num_spawned.rs +++ b/src/orch/num_spawned.rs @@ -1,15 +1,19 @@ +/// Number of spawned threads to execute a parallel computation. #[derive(Clone, Copy)] pub struct NumSpawned(usize); impl NumSpawned { + /// Zero. pub fn zero() -> Self { Self(0) } + /// Adds one to the spawned thread count. pub fn increment(&mut self) { self.0 += 1; } + /// Converts into usize. pub fn into_inner(self) -> usize { self.0 } From faf5450c99120f41f371b4177c72cca3ad0292d4 Mon Sep 17 00:00:00 2001 From: orxfun Date: Mon, 15 Sep 2025 14:16:05 +0200 Subject: [PATCH 113/264] ParThreadPoolCompute is separated from ParThreadPool --- src/orch/orchestrator.rs | 5 ++++- src/orch/thread_pool/mod.rs | 2 +- src/orch/thread_pool/par_thread_pool.rs | 6 ++++-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 66882c2..853fbb8 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -1,7 +1,10 @@ use crate::{ NumThreads, ParallelRunner, Params, generic_values::runner_results::{Fallibility, Infallible, Never}, - orch::{NumSpawned, thread_pool::ParThreadPool}, + orch::{ + NumSpawned, + thread_pool::{ParThreadPool, ParThreadPoolCompute}, + }, runner::ComputationKind, }; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/orch/thread_pool/mod.rs b/src/orch/thread_pool/mod.rs index d027085..8d60a60 100644 --- a/src/orch/thread_pool/mod.rs +++ b/src/orch/thread_pool/mod.rs @@ -1,4 +1,4 @@ pub mod implementations; mod par_thread_pool; -pub use par_thread_pool::ParThreadPool; +pub use par_thread_pool::{ParThreadPool, ParThreadPoolCompute}; diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs index c0d3119..6eb7c0d 100644 --- a/src/orch/thread_pool/par_thread_pool.rs +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -20,9 +20,9 @@ pub trait ParThreadPool { for<'s> F: FnOnce(Self::ScopeRef<'s, 'env, 'scope>) + Send; fn max_num_threads(&self) -> NonZeroUsize; +} - // derived - +pub trait ParThreadPoolCompute: ParThreadPool { fn run(&mut self, do_spawn: S, thread_do: F) -> NumSpawned where S: Fn(NumSpawned) -> bool + Sync, @@ -67,3 +67,5 @@ pub trait ParThreadPool { (nt, result) } } + +impl ParThreadPoolCompute for X {} From abf567ac53ae12be2320fd7d34e716f1f17ad077 Mon Sep 17 00:00:00 2001 From: orxfun Date: Mon, 15 Sep 2025 14:19:43 +0200 Subject: [PATCH 114/264] clean up --- src/orch/implementations/mod.rs | 1 - src/orch/implementations/std_thread_pool.rs | 49 --------------------- src/orch/thread_pool/par_thread_pool.rs | 2 + 3 files changed, 2 insertions(+), 50 deletions(-) delete mode 100644 src/orch/implementations/std_thread_pool.rs diff --git a/src/orch/implementations/mod.rs b/src/orch/implementations/mod.rs index c657d69..baabd5f 100644 --- a/src/orch/implementations/mod.rs +++ b/src/orch/implementations/mod.rs @@ -1,4 +1,3 @@ mod default_std_orchestrator; -mod std_thread_pool; pub use default_std_orchestrator::DefaultStdOrchestrator; diff --git a/src/orch/implementations/std_thread_pool.rs b/src/orch/implementations/std_thread_pool.rs deleted file mode 100644 index afb317b..0000000 --- a/src/orch/implementations/std_thread_pool.rs +++ /dev/null @@ -1,49 +0,0 @@ -// use super::super::{ -// par_handle::{JoinResult, ParHandle}, -// par_scope::ParScope, -// par_thread_pool::ParThreadPool, -// }; - -// pub struct StdHandle<'scope, T>(std::thread::ScopedJoinHandle<'scope, T>); - -// impl<'scope, T> ParHandle<'scope, T> for StdHandle<'scope, T> { -// fn join(self) -> JoinResult { -// self.0.join() -// } - -// fn is_finished(&self) -> bool { -// self.0.is_finished() -// } -// } - -// impl<'env, 'scope> ParScope<'env, 'scope> for std::thread::Scope<'scope, 'env> { -// type Handle -// = StdHandle<'scope, T> -// where -// Self: 'scope, -// T: 'scope; - -// fn spawn(&'scope self, f: F) -> Self::Handle -// where -// F: FnOnce() -> T + Send + 'scope, -// T: Send + 'scope, -// { -// StdHandle(self.spawn(f)) -// } -// } - -// pub struct StdThreadPool; - -// impl ParThreadPool for StdThreadPool { -// type Scope<'env, 'scope> -// = std::thread::Scope<'scope, 'env> -// where -// 'env: 'scope; - -// fn scope<'env, F, T>(f: F) -> T -// where -// F: for<'scope> FnOnce(&'scope std::thread::Scope<'scope, 'env>) -> T, -// { -// std::thread::scope(f) -// } -// } diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs index 6eb7c0d..cd12d34 100644 --- a/src/orch/thread_pool/par_thread_pool.rs +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -22,6 +22,8 @@ pub trait ParThreadPool { fn max_num_threads(&self) -> NonZeroUsize; } +// derived + pub trait ParThreadPoolCompute: ParThreadPool { fn run(&mut self, do_spawn: S, thread_do: F) -> NumSpawned where From 27fa3d113b869b0e9f749011e539ec342240a73c Mon Sep 17 00:00:00 2001 From: orxfun Date: Mon, 15 Sep 2025 14:24:24 +0200 Subject: [PATCH 115/264] impl ParThreadPool for rayon::ThreadPool --- .../implementations/impl_rayon_threadpool.rs | 70 +++++++------------ src/orch/thread_pool/implementations/mod.rs | 4 +- .../implementations/std_default_pool.rs | 2 +- src/orch/thread_pool/par_thread_pool.rs | 6 +- 4 files changed, 32 insertions(+), 50 deletions(-) diff --git a/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs b/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs index bfb4cce..819be13 100644 --- a/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs +++ b/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs @@ -1,52 +1,34 @@ -use crate::orch::{ParHandle, ParScope, ParThreadPool, thread_pool::par_handle::JoinResult}; -use orx_concurrent_bag::ConcurrentBag; -use rayon::{Scope, ThreadPool}; +use crate::orch::ParThreadPool; +use std::num::NonZeroUsize; -pub struct ThreadPoolHandle<'scope, T> { - idx: usize, - result: Option, - bag: &'scope ConcurrentBag, -} - -impl<'scope, T> ParHandle<'scope, T> for ThreadPoolHandle<'scope, T> { - fn join(self) -> JoinResult { - todo!() - } - - fn is_finished(&self) -> bool { - todo!() - } -} - -impl<'scope, 'env> ParScope<'scope, 'env> for Scope<'scope> -where - 'env: 'scope, -{ - type Handle - = ThreadPoolHandle<'scope, T> +impl ParThreadPool for rayon::ThreadPool { + type ScopeRef<'s, 'env, 'scope> + = &'s rayon::Scope<'scope> where - Self: 'scope, - T: 'scope; + 'scope: 's, + 'env: 'scope + 's; - fn spawn(&'scope self, f: F) -> Self::Handle + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) where - F: FnOnce() -> T + Send + 'scope, - T: Send + 'scope, + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Sync + 'scope + 'env, { - todo!() + s.spawn(|_| work()); } -} -// impl ParThreadPool for ThreadPool { -// type Scope<'scope, 'env> -// = Scope<'scope> -// where -// 'env: 'scope; + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(&'s rayon::Scope<'scope>) + Send, + { + self.scope(f) + } -// fn scope<'env, F, T>(&'env mut self, f: F) -> T -// where -// F: for<'scope> FnOnce(&'scope Scope<'scope>) -> T, -// { -// todo!() -// } -// } + fn max_num_threads(&self) -> NonZeroUsize { + match self.current_num_threads() { + 0 => NonZeroUsize::new(1).expect(">0"), + n => NonZeroUsize::new(n).expect(">0"), + } + } +} diff --git a/src/orch/thread_pool/implementations/mod.rs b/src/orch/thread_pool/implementations/mod.rs index 1b418af..06de366 100644 --- a/src/orch/thread_pool/implementations/mod.rs +++ b/src/orch/thread_pool/implementations/mod.rs @@ -6,7 +6,7 @@ mod std_default_pool; // #[cfg(feature = "scoped_threadpool")] // mod impl_scoped_threadpool; -// #[cfg(feature = "rayon")] -// mod impl_rayon_threadpool; +#[cfg(feature = "rayon")] +mod impl_rayon_threadpool; pub use std_default_pool::StdDefaultPool; diff --git a/src/orch/thread_pool/implementations/std_default_pool.rs b/src/orch/thread_pool/implementations/std_default_pool.rs index 8919200..1c79474 100644 --- a/src/orch/thread_pool/implementations/std_default_pool.rs +++ b/src/orch/thread_pool/implementations/std_default_pool.rs @@ -45,7 +45,7 @@ impl ParThreadPool for StdDefaultPool { self.max_num_threads } - fn scope<'env, 'scope, F>(&'env mut self, f: F) + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) where 'env: 'scope, for<'s> F: FnOnce(&'s std::thread::Scope<'s, 'env>) + Send, diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/thread_pool/par_thread_pool.rs index cd12d34..99db0c1 100644 --- a/src/orch/thread_pool/par_thread_pool.rs +++ b/src/orch/thread_pool/par_thread_pool.rs @@ -14,7 +14,7 @@ pub trait ParThreadPool { 'env: 'scope + 's, W: Fn() + Sync + 'scope + 'env; - fn scope<'env, 'scope, F>(&'env mut self, f: F) + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) where 'env: 'scope, for<'s> F: FnOnce(Self::ScopeRef<'s, 'env, 'scope>) + Send; @@ -31,7 +31,7 @@ pub trait ParThreadPoolCompute: ParThreadPool { F: Fn() + Sync, { let mut nt = NumSpawned::zero(); - self.scope(|s| { + self.scoped_computation(|s| { while do_spawn(nt) { nt.increment(); Self::run_in_scope(&s, &thread_do); @@ -56,7 +56,7 @@ pub trait ParThreadPoolCompute: ParThreadPool { let mut nt = NumSpawned::zero(); let thread_results = ConcurrentBag::with_fixed_capacity(max_num_threads.into()); let work = || _ = thread_results.push(thread_map()); - self.scope(|s| { + self.scoped_computation(|s| { while do_spawn(nt) { nt.increment(); Self::run_in_scope(&s, &work); From f100124d424269440553ee5296f99944adcb1026 Mon Sep 17 00:00:00 2001 From: orxfun Date: Mon, 15 Sep 2025 14:26:04 +0200 Subject: [PATCH 116/264] ParThreadPool is implemented for all references --- .../implementations/impl_rayon_threadpool.rs | 67 +++++++++++++++++-- 1 file changed, 63 insertions(+), 4 deletions(-) diff --git a/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs b/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs index 819be13..b34d4e1 100644 --- a/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs +++ b/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs @@ -26,9 +26,68 @@ impl ParThreadPool for rayon::ThreadPool { } fn max_num_threads(&self) -> NonZeroUsize { - match self.current_num_threads() { - 0 => NonZeroUsize::new(1).expect(">0"), - n => NonZeroUsize::new(n).expect(">0"), - } + NonZeroUsize::new(self.current_num_threads().max(1)).expect(">0") + } +} + +// & + +impl<'a> ParThreadPool for &'a rayon::ThreadPool { + type ScopeRef<'s, 'env, 'scope> + = &'s rayon::Scope<'scope> + where + 'scope: 's, + 'env: 'scope + 's; + + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Sync + 'scope + 'env, + { + s.spawn(|_| work()); + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(&'s rayon::Scope<'scope>) + Send, + { + self.scope(f) + } + + fn max_num_threads(&self) -> NonZeroUsize { + NonZeroUsize::new(self.current_num_threads().max(1)).expect(">0") + } +} + +// &mut + +impl<'a> ParThreadPool for &'a mut rayon::ThreadPool { + type ScopeRef<'s, 'env, 'scope> + = &'s rayon::Scope<'scope> + where + 'scope: 's, + 'env: 'scope + 's; + + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Sync + 'scope + 'env, + { + s.spawn(|_| work()); + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(&'s rayon::Scope<'scope>) + Send, + { + self.scope(f) + } + + fn max_num_threads(&self) -> NonZeroUsize { + NonZeroUsize::new(self.current_num_threads().max(1)).expect(">0") } } From 13f571ca79272011a1708839a5d0991a2db9a97a Mon Sep 17 00:00:00 2001 From: orxfun Date: Mon, 15 Sep 2025 22:16:05 +0200 Subject: [PATCH 117/264] RayonOrchestrator is defined --- Cargo.toml | 1 + src/orch/implementations/mod.rs | 2 + .../implementations/rayon_orchestrator.rs | 58 +++++++++++++++++++ src/orch/thread_pool/implementations/mod.rs | 3 + .../thread_pool/implementations/tests/mod.rs | 2 + .../implementations/tests/rayon_threadpool.rs | 46 +++++++++++++++ src/parameters/num_threads.rs | 6 +- 7 files changed, 115 insertions(+), 3 deletions(-) create mode 100644 src/orch/implementations/rayon_orchestrator.rs create mode 100644 src/orch/thread_pool/implementations/tests/mod.rs create mode 100644 src/orch/thread_pool/implementations/tests/rayon_threadpool.rs diff --git a/Cargo.toml b/Cargo.toml index 4d8bf3a..0b6baa7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,6 +25,7 @@ rayon = { version = "1.10.0", optional = true } # optional thread pool dependencies threadpool = { version = "1.8.1", optional = true } scoped_threadpool = { version = "0.1.9", optional = true } +orx-self-or = { version = "1.2.0", registry = "artifactory" } [dev-dependencies] chrono = "0.4.39" diff --git a/src/orch/implementations/mod.rs b/src/orch/implementations/mod.rs index baabd5f..0b3fe9d 100644 --- a/src/orch/implementations/mod.rs +++ b/src/orch/implementations/mod.rs @@ -1,3 +1,5 @@ mod default_std_orchestrator; +mod rayon_orchestrator; pub use default_std_orchestrator::DefaultStdOrchestrator; +pub use rayon_orchestrator::RayonOrchestrator; diff --git a/src/orch/implementations/rayon_orchestrator.rs b/src/orch/implementations/rayon_orchestrator.rs new file mode 100644 index 0000000..3a6daff --- /dev/null +++ b/src/orch/implementations/rayon_orchestrator.rs @@ -0,0 +1,58 @@ +use crate::{ + DefaultRunner, ParallelRunner, + orch::{Orchestrator, ParThreadPool}, +}; +use orx_self_or::SoR; +use rayon::ThreadPool; +use std::marker::PhantomData; + +pub struct RayonOrchestrator +where + R: ParallelRunner, + P: SoR + ParThreadPool, +{ + pool: P, + runner: PhantomData, +} + +impl RayonOrchestrator +where + R: ParallelRunner, +{ + pub fn new(pool: ThreadPool) -> Self { + Self { + pool, + runner: PhantomData, + } + } +} + +impl<'a, R> RayonOrchestrator<&'a ThreadPool, R> +where + R: ParallelRunner, +{ + pub fn new(pool: &'a ThreadPool) -> Self { + Self { + pool, + runner: PhantomData, + } + } +} + +impl Orchestrator for RayonOrchestrator +where + R: ParallelRunner, + P: SoR + ParThreadPool, +{ + type Runner = R; + + type ThreadPool = P; + + fn thread_pool(&self) -> &Self::ThreadPool { + &self.pool + } + + fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { + &mut self.pool + } +} diff --git a/src/orch/thread_pool/implementations/mod.rs b/src/orch/thread_pool/implementations/mod.rs index 06de366..b60a4d4 100644 --- a/src/orch/thread_pool/implementations/mod.rs +++ b/src/orch/thread_pool/implementations/mod.rs @@ -1,3 +1,6 @@ +#[cfg(test)] +mod tests; + mod std_default_pool; // #[cfg(feature = "threadpool")] diff --git a/src/orch/thread_pool/implementations/tests/mod.rs b/src/orch/thread_pool/implementations/tests/mod.rs new file mode 100644 index 0000000..0d4ea74 --- /dev/null +++ b/src/orch/thread_pool/implementations/tests/mod.rs @@ -0,0 +1,2 @@ +#[cfg(feature = "rayon")] +mod rayon_threadpool; diff --git a/src/orch/thread_pool/implementations/tests/rayon_threadpool.rs b/src/orch/thread_pool/implementations/tests/rayon_threadpool.rs new file mode 100644 index 0000000..bfd099c --- /dev/null +++ b/src/orch/thread_pool/implementations/tests/rayon_threadpool.rs @@ -0,0 +1,46 @@ +use crate::{IntoParIter, IterationOrder, ParIter}; +use orx_pinned_vec::PinnedVec; +use orx_split_vec::SplitVec; +use test_case::test_matrix; + +#[cfg(miri)] +const N: [usize; 2] = [37, 125]; +#[cfg(not(miri))] +const N: [usize; 2] = [1025, 4735]; + +#[test_matrix( + [0, 1, N[0], N[1]], + [1, 4], + [1, 64], + [IterationOrder::Ordered, IterationOrder::Arbitrary]) +] +fn pool_rayon_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { + let offset = 33; + + let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); + let map = |x: String| format!("{}!", x); + + let mut output = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); + let mut expected = Vec::new(); + + for i in 0..offset { + let value = || map(i.to_string()); + output.push(value()); + expected.push(value()); + } + expected.extend(input.clone().into_iter().map(|x| map(x))); + + let mut output = input + .into_par() + .chunk_size(chunk) + .iteration_order(ordering) + .map(map) + .collect_into(output); + + if matches!(ordering, IterationOrder::Arbitrary) { + expected.sort(); + output.sort(); + } + + assert_eq!(expected, output.to_vec()); +} diff --git a/src/parameters/num_threads.rs b/src/parameters/num_threads.rs index f332c3f..d620088 100644 --- a/src/parameters/num_threads.rs +++ b/src/parameters/num_threads.rs @@ -37,7 +37,7 @@ pub enum NumThreads { Max(NonZeroUsize), } -const SEQUENTIAL_NUM_THREADS: NonZeroUsize = NonZeroUsize::new(1).expect("seq=1 is positive"); +const ONE: NonZeroUsize = NonZeroUsize::new(1).expect("seq=1 is positive"); impl From for NumThreads { /// Converts the nonnegative integer to number of threads as follows: @@ -58,13 +58,13 @@ impl NumThreads { /// This will lead to a sequential execution of the defined computation on the main thread. /// Both in terms of used resources and computation time, this mode is not similar but **identical** to a sequential execution using the regular sequential `Iterator`s. pub const fn sequential() -> Self { - NumThreads::Max(SEQUENTIAL_NUM_THREADS) + NumThreads::Max(ONE) } /// Returns true if number of threads is set to 1. /// /// Note that in this case the computation will be executed sequentially using regular iterators. pub fn is_sequential(self) -> bool { - matches!(self, Self::Max(n) if n == SEQUENTIAL_NUM_THREADS) + matches!(self, Self::Max(n) if n == ONE) } } From 8ce491cf6c3214089b4d180cde70a7a7e1047bb6 Mon Sep 17 00:00:00 2001 From: orxfun Date: Mon, 15 Sep 2025 22:22:56 +0200 Subject: [PATCH 118/264] rayon thread pool tests implemented, passing --- .../implementations/rayon_orchestrator.rs | 8 ++--- .../implementations/impl_rayon_threadpool.rs | 31 ------------------- .../implementations/tests/rayon_threadpool.rs | 8 ++++- 3 files changed, 11 insertions(+), 36 deletions(-) diff --git a/src/orch/implementations/rayon_orchestrator.rs b/src/orch/implementations/rayon_orchestrator.rs index 3a6daff..354cffe 100644 --- a/src/orch/implementations/rayon_orchestrator.rs +++ b/src/orch/implementations/rayon_orchestrator.rs @@ -15,11 +15,11 @@ where runner: PhantomData, } -impl RayonOrchestrator +impl From for RayonOrchestrator where R: ParallelRunner, { - pub fn new(pool: ThreadPool) -> Self { + fn from(pool: ThreadPool) -> Self { Self { pool, runner: PhantomData, @@ -27,11 +27,11 @@ where } } -impl<'a, R> RayonOrchestrator<&'a ThreadPool, R> +impl<'a, R> From<&'a ThreadPool> for RayonOrchestrator<&'a ThreadPool, R> where R: ParallelRunner, { - pub fn new(pool: &'a ThreadPool) -> Self { + fn from(pool: &'a ThreadPool) -> Self { Self { pool, runner: PhantomData, diff --git a/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs b/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs index b34d4e1..0c1facc 100644 --- a/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs +++ b/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs @@ -60,34 +60,3 @@ impl<'a> ParThreadPool for &'a rayon::ThreadPool { NonZeroUsize::new(self.current_num_threads().max(1)).expect(">0") } } - -// &mut - -impl<'a> ParThreadPool for &'a mut rayon::ThreadPool { - type ScopeRef<'s, 'env, 'scope> - = &'s rayon::Scope<'scope> - where - 'scope: 's, - 'env: 'scope + 's; - - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) - where - 'scope: 's, - 'env: 'scope + 's, - W: Fn() + Sync + 'scope + 'env, - { - s.spawn(|_| work()); - } - - fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) - where - 'env: 'scope, - for<'s> F: FnOnce(&'s rayon::Scope<'scope>) + Send, - { - self.scope(f) - } - - fn max_num_threads(&self) -> NonZeroUsize { - NonZeroUsize::new(self.current_num_threads().max(1)).expect(">0") - } -} diff --git a/src/orch/thread_pool/implementations/tests/rayon_threadpool.rs b/src/orch/thread_pool/implementations/tests/rayon_threadpool.rs index bfd099c..61579ae 100644 --- a/src/orch/thread_pool/implementations/tests/rayon_threadpool.rs +++ b/src/orch/thread_pool/implementations/tests/rayon_threadpool.rs @@ -1,4 +1,4 @@ -use crate::{IntoParIter, IterationOrder, ParIter}; +use crate::{IntoParIter, IterationOrder, ParIter, orch::implementations::RayonOrchestrator}; use orx_pinned_vec::PinnedVec; use orx_split_vec::SplitVec; use test_case::test_matrix; @@ -30,8 +30,14 @@ fn pool_rayon_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { } expected.extend(input.clone().into_iter().map(|x| map(x))); + let pool = rayon::ThreadPoolBuilder::new() + .num_threads(nt) + .build() + .unwrap(); + let orch: RayonOrchestrator<_> = (&pool).into(); let mut output = input .into_par() + .with_runner(orch) .chunk_size(chunk) .iteration_order(ordering) .map(map) From a24ee4cd7d6d3d93952cdd390bc06c8da5efe00f Mon Sep 17 00:00:00 2001 From: orxfun Date: Mon, 15 Sep 2025 22:26:06 +0200 Subject: [PATCH 119/264] auto impl orch for all &mut orch --- src/orch/orchestrator.rs | 19 +++++++++++++++++++ .../implementations/tests/rayon_threadpool.rs | 4 ++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 853fbb8..8957fa7 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -108,3 +108,22 @@ pub trait Orchestrator { pub(crate) type SharedStateOf = <::Runner as ParallelRunner>::SharedState; pub(crate) type ThreadRunnerOf = <::Runner as ParallelRunner>::ThreadRunner; + +// auto impl for &mut pool + +impl<'a, O> Orchestrator for &'a mut O +where + O: Orchestrator, +{ + type Runner = O::Runner; + + type ThreadPool = O::ThreadPool; + + fn thread_pool(&self) -> &Self::ThreadPool { + O::thread_pool(self) + } + + fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { + O::thread_pool_mut(self) + } +} diff --git a/src/orch/thread_pool/implementations/tests/rayon_threadpool.rs b/src/orch/thread_pool/implementations/tests/rayon_threadpool.rs index 61579ae..c3258e7 100644 --- a/src/orch/thread_pool/implementations/tests/rayon_threadpool.rs +++ b/src/orch/thread_pool/implementations/tests/rayon_threadpool.rs @@ -34,10 +34,10 @@ fn pool_rayon_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { .num_threads(nt) .build() .unwrap(); - let orch: RayonOrchestrator<_> = (&pool).into(); + let mut orch: RayonOrchestrator<_> = (pool).into(); let mut output = input .into_par() - .with_runner(orch) + .with_runner(&mut orch) .chunk_size(chunk) .iteration_order(ordering) .map(map) From 09609ab9127c616bbe39abdb9d0e810b424a1df8 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 08:55:39 +0200 Subject: [PATCH 120/264] fix dependency --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 0b6baa7..eb18b41 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,7 @@ rayon = { version = "1.10.0", optional = true } # optional thread pool dependencies threadpool = { version = "1.8.1", optional = true } scoped_threadpool = { version = "0.1.9", optional = true } -orx-self-or = { version = "1.2.0", registry = "artifactory" } +orx-self-or = { version = "1.2.0" } [dev-dependencies] chrono = "0.4.39" From 54d1690d2f9b9b540468026c849a15335029608d Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 08:59:16 +0200 Subject: [PATCH 121/264] feature flags added for rayon --- src/orch/implementations/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/orch/implementations/mod.rs b/src/orch/implementations/mod.rs index 0b3fe9d..eff4430 100644 --- a/src/orch/implementations/mod.rs +++ b/src/orch/implementations/mod.rs @@ -1,5 +1,7 @@ mod default_std_orchestrator; -mod rayon_orchestrator; - pub use default_std_orchestrator::DefaultStdOrchestrator; + +#[cfg(feature = "rayon")] +mod rayon_orchestrator; +#[cfg(feature = "rayon")] pub use rayon_orchestrator::RayonOrchestrator; From f213f7fa8ea193d1fbb01359fd962d090b11d91c Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 09:01:43 +0200 Subject: [PATCH 122/264] mod reorganization --- .../implementations/rayon_orchestrator.rs | 64 ++++++++++++++++++- .../implementations/impl_rayon_threadpool.rs | 62 ------------------ src/orch/thread_pool/implementations/mod.rs | 3 - 3 files changed, 63 insertions(+), 66 deletions(-) delete mode 100644 src/orch/thread_pool/implementations/impl_rayon_threadpool.rs diff --git a/src/orch/implementations/rayon_orchestrator.rs b/src/orch/implementations/rayon_orchestrator.rs index 354cffe..59cb8db 100644 --- a/src/orch/implementations/rayon_orchestrator.rs +++ b/src/orch/implementations/rayon_orchestrator.rs @@ -4,7 +4,69 @@ use crate::{ }; use orx_self_or::SoR; use rayon::ThreadPool; -use std::marker::PhantomData; +use std::{marker::PhantomData, num::NonZeroUsize}; + +// POOL + +impl ParThreadPool for ThreadPool { + type ScopeRef<'s, 'env, 'scope> + = &'s rayon::Scope<'scope> + where + 'scope: 's, + 'env: 'scope + 's; + + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Sync + 'scope + 'env, + { + s.spawn(|_| work()); + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(&'s rayon::Scope<'scope>) + Send, + { + self.scope(f) + } + + fn max_num_threads(&self) -> NonZeroUsize { + NonZeroUsize::new(self.current_num_threads().max(1)).expect(">0") + } +} + +impl<'a> ParThreadPool for &'a rayon::ThreadPool { + type ScopeRef<'s, 'env, 'scope> + = &'s rayon::Scope<'scope> + where + 'scope: 's, + 'env: 'scope + 's; + + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Sync + 'scope + 'env, + { + s.spawn(|_| work()); + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(&'s rayon::Scope<'scope>) + Send, + { + self.scope(f) + } + + fn max_num_threads(&self) -> NonZeroUsize { + NonZeroUsize::new(self.current_num_threads().max(1)).expect(">0") + } +} + +// ORCH pub struct RayonOrchestrator where diff --git a/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs b/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs deleted file mode 100644 index 0c1facc..0000000 --- a/src/orch/thread_pool/implementations/impl_rayon_threadpool.rs +++ /dev/null @@ -1,62 +0,0 @@ -use crate::orch::ParThreadPool; -use std::num::NonZeroUsize; - -impl ParThreadPool for rayon::ThreadPool { - type ScopeRef<'s, 'env, 'scope> - = &'s rayon::Scope<'scope> - where - 'scope: 's, - 'env: 'scope + 's; - - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) - where - 'scope: 's, - 'env: 'scope + 's, - W: Fn() + Sync + 'scope + 'env, - { - s.spawn(|_| work()); - } - - fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) - where - 'env: 'scope, - for<'s> F: FnOnce(&'s rayon::Scope<'scope>) + Send, - { - self.scope(f) - } - - fn max_num_threads(&self) -> NonZeroUsize { - NonZeroUsize::new(self.current_num_threads().max(1)).expect(">0") - } -} - -// & - -impl<'a> ParThreadPool for &'a rayon::ThreadPool { - type ScopeRef<'s, 'env, 'scope> - = &'s rayon::Scope<'scope> - where - 'scope: 's, - 'env: 'scope + 's; - - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) - where - 'scope: 's, - 'env: 'scope + 's, - W: Fn() + Sync + 'scope + 'env, - { - s.spawn(|_| work()); - } - - fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) - where - 'env: 'scope, - for<'s> F: FnOnce(&'s rayon::Scope<'scope>) + Send, - { - self.scope(f) - } - - fn max_num_threads(&self) -> NonZeroUsize { - NonZeroUsize::new(self.current_num_threads().max(1)).expect(">0") - } -} diff --git a/src/orch/thread_pool/implementations/mod.rs b/src/orch/thread_pool/implementations/mod.rs index b60a4d4..7285ac4 100644 --- a/src/orch/thread_pool/implementations/mod.rs +++ b/src/orch/thread_pool/implementations/mod.rs @@ -9,7 +9,4 @@ mod std_default_pool; // #[cfg(feature = "scoped_threadpool")] // mod impl_scoped_threadpool; -#[cfg(feature = "rayon")] -mod impl_rayon_threadpool; - pub use std_default_pool::StdDefaultPool; From bd1b1ee3f2cf874278364f3674782e7508ccd2b8 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 09:06:20 +0200 Subject: [PATCH 123/264] code reorganization --- src/orch/implementations/mod.rs | 7 +++++-- .../implementations/{rayon_orchestrator.rs => rayon.rs} | 0 src/orch/{thread_pool => }/implementations/tests/mod.rs | 0 .../implementations/tests/rayon_threadpool.rs | 2 +- src/orch/thread_pool/implementations/mod.rs | 3 --- 5 files changed, 6 insertions(+), 6 deletions(-) rename src/orch/implementations/{rayon_orchestrator.rs => rayon.rs} (100%) rename src/orch/{thread_pool => }/implementations/tests/mod.rs (100%) rename src/orch/{thread_pool => }/implementations/tests/rayon_threadpool.rs (96%) diff --git a/src/orch/implementations/mod.rs b/src/orch/implementations/mod.rs index eff4430..d0172af 100644 --- a/src/orch/implementations/mod.rs +++ b/src/orch/implementations/mod.rs @@ -1,7 +1,10 @@ +#[cfg(test)] +mod tests; + mod default_std_orchestrator; pub use default_std_orchestrator::DefaultStdOrchestrator; #[cfg(feature = "rayon")] -mod rayon_orchestrator; +mod rayon; #[cfg(feature = "rayon")] -pub use rayon_orchestrator::RayonOrchestrator; +pub use rayon::RayonOrchestrator; diff --git a/src/orch/implementations/rayon_orchestrator.rs b/src/orch/implementations/rayon.rs similarity index 100% rename from src/orch/implementations/rayon_orchestrator.rs rename to src/orch/implementations/rayon.rs diff --git a/src/orch/thread_pool/implementations/tests/mod.rs b/src/orch/implementations/tests/mod.rs similarity index 100% rename from src/orch/thread_pool/implementations/tests/mod.rs rename to src/orch/implementations/tests/mod.rs diff --git a/src/orch/thread_pool/implementations/tests/rayon_threadpool.rs b/src/orch/implementations/tests/rayon_threadpool.rs similarity index 96% rename from src/orch/thread_pool/implementations/tests/rayon_threadpool.rs rename to src/orch/implementations/tests/rayon_threadpool.rs index c3258e7..8fb5835 100644 --- a/src/orch/thread_pool/implementations/tests/rayon_threadpool.rs +++ b/src/orch/implementations/tests/rayon_threadpool.rs @@ -34,7 +34,7 @@ fn pool_rayon_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { .num_threads(nt) .build() .unwrap(); - let mut orch: RayonOrchestrator<_> = (pool).into(); + let mut orch: RayonOrchestrator<_> = (&pool).into(); let mut output = input .into_par() .with_runner(&mut orch) diff --git a/src/orch/thread_pool/implementations/mod.rs b/src/orch/thread_pool/implementations/mod.rs index 7285ac4..d4c893b 100644 --- a/src/orch/thread_pool/implementations/mod.rs +++ b/src/orch/thread_pool/implementations/mod.rs @@ -1,6 +1,3 @@ -#[cfg(test)] -mod tests; - mod std_default_pool; // #[cfg(feature = "threadpool")] From 62d28430f46bc3bdee47d3870fdfd88a13af906b Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 09:13:09 +0200 Subject: [PATCH 124/264] ScopedThreadPoolOrchestrator is implemented --- src/orch/implementations/mod.rs | 5 + src/orch/implementations/scoped_threadpool.rs | 120 ++++++++++++++++++ src/orch/implementations/tests/mod.rs | 2 +- .../tests/{rayon_threadpool.rs => rayon.rs} | 0 4 files changed, 126 insertions(+), 1 deletion(-) create mode 100644 src/orch/implementations/scoped_threadpool.rs rename src/orch/implementations/tests/{rayon_threadpool.rs => rayon.rs} (100%) diff --git a/src/orch/implementations/mod.rs b/src/orch/implementations/mod.rs index d0172af..2702a26 100644 --- a/src/orch/implementations/mod.rs +++ b/src/orch/implementations/mod.rs @@ -8,3 +8,8 @@ pub use default_std_orchestrator::DefaultStdOrchestrator; mod rayon; #[cfg(feature = "rayon")] pub use rayon::RayonOrchestrator; + +#[cfg(feature = "scoped_threadpool")] +mod scoped_threadpool; +#[cfg(feature = "scoped_threadpool")] +pub use scoped_threadpool::ScopedThreadPoolOrchestrator; diff --git a/src/orch/implementations/scoped_threadpool.rs b/src/orch/implementations/scoped_threadpool.rs new file mode 100644 index 0000000..950d0e3 --- /dev/null +++ b/src/orch/implementations/scoped_threadpool.rs @@ -0,0 +1,120 @@ +use crate::{ + DefaultRunner, ParallelRunner, + orch::{Orchestrator, ParThreadPool}, +}; +use orx_self_or::SoM; +use scoped_threadpool::Pool; +use std::{marker::PhantomData, num::NonZeroUsize}; + +// POOL + +impl ParThreadPool for Pool { + type ScopeRef<'s, 'env, 'scope> + = &'s scoped_threadpool::Scope<'env, 'scope> + where + 'scope: 's, + 'env: 'scope + 's; + + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Sync + 'scope + 'env, + { + s.execute(work); + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(&'s scoped_threadpool::Scope<'env, 'scope>) + Send, + { + self.scoped(f) + } + + fn max_num_threads(&self) -> NonZeroUsize { + NonZeroUsize::new((self.thread_count() as usize).max(1)).expect(">0") + } +} + +impl<'a> ParThreadPool for &'a mut Pool { + type ScopeRef<'s, 'env, 'scope> + = &'s scoped_threadpool::Scope<'env, 'scope> + where + 'scope: 's, + 'env: 'scope + 's; + + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Sync + 'scope + 'env, + { + s.execute(work); + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(&'s scoped_threadpool::Scope<'env, 'scope>) + Send, + { + self.scoped(f) + } + + fn max_num_threads(&self) -> NonZeroUsize { + NonZeroUsize::new((self.thread_count() as usize).max(1)).expect(">0") + } +} + +// ORCH + +pub struct ScopedThreadPoolOrchestrator +where + R: ParallelRunner, + P: SoM + ParThreadPool, +{ + pool: P, + runner: PhantomData, +} + +impl From for ScopedThreadPoolOrchestrator +where + R: ParallelRunner, +{ + fn from(pool: Pool) -> Self { + Self { + pool, + runner: PhantomData, + } + } +} + +impl<'a, R> From<&'a mut Pool> for ScopedThreadPoolOrchestrator<&'a mut Pool, R> +where + R: ParallelRunner, +{ + fn from(pool: &'a mut Pool) -> Self { + Self { + pool, + runner: PhantomData, + } + } +} + +impl Orchestrator for ScopedThreadPoolOrchestrator +where + R: ParallelRunner, + P: SoM + ParThreadPool, +{ + type Runner = R; + + type ThreadPool = P; + + fn thread_pool(&self) -> &Self::ThreadPool { + &self.pool + } + + fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { + &mut self.pool + } +} diff --git a/src/orch/implementations/tests/mod.rs b/src/orch/implementations/tests/mod.rs index 0d4ea74..d93782e 100644 --- a/src/orch/implementations/tests/mod.rs +++ b/src/orch/implementations/tests/mod.rs @@ -1,2 +1,2 @@ #[cfg(feature = "rayon")] -mod rayon_threadpool; +mod rayon; diff --git a/src/orch/implementations/tests/rayon_threadpool.rs b/src/orch/implementations/tests/rayon.rs similarity index 100% rename from src/orch/implementations/tests/rayon_threadpool.rs rename to src/orch/implementations/tests/rayon.rs From 02348811a04036c0ff52030b836bf77416b612ec Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 09:17:08 +0200 Subject: [PATCH 125/264] rayon orch tests extracted --- src/orch/implementations/tests/mod.rs | 6 +++ src/orch/implementations/tests/rayon.rs | 37 ++----------------- .../tests/scoped_threadpool.rs | 1 + src/orch/implementations/tests/utils.rs | 35 ++++++++++++++++++ 4 files changed, 46 insertions(+), 33 deletions(-) create mode 100644 src/orch/implementations/tests/scoped_threadpool.rs create mode 100644 src/orch/implementations/tests/utils.rs diff --git a/src/orch/implementations/tests/mod.rs b/src/orch/implementations/tests/mod.rs index d93782e..fabb867 100644 --- a/src/orch/implementations/tests/mod.rs +++ b/src/orch/implementations/tests/mod.rs @@ -1,2 +1,8 @@ #[cfg(feature = "rayon")] mod rayon; + +#[cfg(feature = "scoped_threadpool")] +mod scoped_threadpool; + +mod utils; +use utils::run_map; diff --git a/src/orch/implementations/tests/rayon.rs b/src/orch/implementations/tests/rayon.rs index 8fb5835..9f153e9 100644 --- a/src/orch/implementations/tests/rayon.rs +++ b/src/orch/implementations/tests/rayon.rs @@ -1,6 +1,5 @@ -use crate::{IntoParIter, IterationOrder, ParIter, orch::implementations::RayonOrchestrator}; -use orx_pinned_vec::PinnedVec; -use orx_split_vec::SplitVec; +use super::run_map; +use crate::{IterationOrder, orch::implementations::RayonOrchestrator}; use test_case::test_matrix; #[cfg(miri)] @@ -15,38 +14,10 @@ const N: [usize; 2] = [1025, 4735]; [IterationOrder::Ordered, IterationOrder::Arbitrary]) ] fn pool_rayon_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { - let offset = 33; - - let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); - let map = |x: String| format!("{}!", x); - - let mut output = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let mut expected = Vec::new(); - - for i in 0..offset { - let value = || map(i.to_string()); - output.push(value()); - expected.push(value()); - } - expected.extend(input.clone().into_iter().map(|x| map(x))); - let pool = rayon::ThreadPoolBuilder::new() .num_threads(nt) .build() .unwrap(); - let mut orch: RayonOrchestrator<_> = (&pool).into(); - let mut output = input - .into_par() - .with_runner(&mut orch) - .chunk_size(chunk) - .iteration_order(ordering) - .map(map) - .collect_into(output); - - if matches!(ordering, IterationOrder::Arbitrary) { - expected.sort(); - output.sort(); - } - - assert_eq!(expected, output.to_vec()); + let orch: RayonOrchestrator<_> = (&pool).into(); + run_map(n, chunk, ordering, orch); } diff --git a/src/orch/implementations/tests/scoped_threadpool.rs b/src/orch/implementations/tests/scoped_threadpool.rs new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/src/orch/implementations/tests/scoped_threadpool.rs @@ -0,0 +1 @@ + diff --git a/src/orch/implementations/tests/utils.rs b/src/orch/implementations/tests/utils.rs new file mode 100644 index 0000000..4ce9f49 --- /dev/null +++ b/src/orch/implementations/tests/utils.rs @@ -0,0 +1,35 @@ +use crate::{IntoParIter, IterationOrder, ParIter, orch::Orchestrator}; +use orx_pinned_vec::PinnedVec; +use orx_split_vec::SplitVec; + +pub fn run_map(n: usize, chunk: usize, ordering: IterationOrder, mut orch: impl Orchestrator) { + let offset = 33; + + let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); + let map = |x: String| format!("{}!", x); + + let mut output = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); + let mut expected = Vec::new(); + + for i in 0..offset { + let value = || map(i.to_string()); + output.push(value()); + expected.push(value()); + } + expected.extend(input.clone().into_iter().map(|x| map(x))); + + let mut output = input + .into_par() + .with_runner(&mut orch) + .chunk_size(chunk) + .iteration_order(ordering) + .map(map) + .collect_into(output); + + if matches!(ordering, IterationOrder::Arbitrary) { + expected.sort(); + output.sort(); + } + + assert_eq!(expected, output.to_vec()); +} From 5dae5a8090f61e32de31c1188e6c5a8735ae2ff2 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 09:19:19 +0200 Subject: [PATCH 126/264] scoped threadpool tests added --- .../tests/scoped_threadpool.rs | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/orch/implementations/tests/scoped_threadpool.rs b/src/orch/implementations/tests/scoped_threadpool.rs index 8b13789..bb553de 100644 --- a/src/orch/implementations/tests/scoped_threadpool.rs +++ b/src/orch/implementations/tests/scoped_threadpool.rs @@ -1 +1,21 @@ +use super::run_map; +use crate::{IterationOrder, orch::implementations::ScopedThreadPoolOrchestrator}; +use scoped_threadpool::Pool; +use test_case::test_matrix; +#[cfg(miri)] +const N: [usize; 2] = [37, 125]; +#[cfg(not(miri))] +const N: [usize; 2] = [1025, 4735]; + +#[test_matrix( + [0, 1, N[0], N[1]], + [1, 4], + [1, 64], + [IterationOrder::Ordered, IterationOrder::Arbitrary]) +] +fn pool_scoped_threadpool_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { + let mut pool = Pool::new(nt as u32); + let orch: ScopedThreadPoolOrchestrator<_> = (&mut pool).into(); + run_map(n, chunk, ordering, orch); +} From b0477b7bab59811628b081af0f0aa8c50509b80e Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 09:21:17 +0200 Subject: [PATCH 127/264] default orchestrator is refactored --- .../default_std_orchestrator.rs | 73 +++++++++++++++- .../implementations/impl_scoped_threadpool.rs | 84 ------------------- .../implementations/impl_threadpool.rs | 62 -------------- src/orch/thread_pool/implementations/mod.rs | 9 -- .../implementations/std_default_pool.rs | 64 -------------- src/orch/thread_pool/mod.rs | 1 - 6 files changed, 69 insertions(+), 224 deletions(-) delete mode 100644 src/orch/thread_pool/implementations/impl_scoped_threadpool.rs delete mode 100644 src/orch/thread_pool/implementations/impl_threadpool.rs delete mode 100644 src/orch/thread_pool/implementations/mod.rs delete mode 100644 src/orch/thread_pool/implementations/std_default_pool.rs diff --git a/src/orch/implementations/default_std_orchestrator.rs b/src/orch/implementations/default_std_orchestrator.rs index 1e61593..7380c97 100644 --- a/src/orch/implementations/default_std_orchestrator.rs +++ b/src/orch/implementations/default_std_orchestrator.rs @@ -1,7 +1,72 @@ -use crate::{ - DefaultRunner, - orch::{Orchestrator, thread_pool::implementations::StdDefaultPool}, -}; +use crate::{DefaultRunner, orch::Orchestrator}; +use crate::{env::MAX_NUM_THREADS_ENV_VARIABLE, orch::ParThreadPool}; +use std::num::NonZeroUsize; + +// POOL + +const MAX_UNSET_NUM_THREADS: usize = 8; + +pub struct StdDefaultPool { + max_num_threads: NonZeroUsize, +} + +impl Default for StdDefaultPool { + fn default() -> Self { + let env_max_num_threads = match std::env::var(MAX_NUM_THREADS_ENV_VARIABLE) { + Ok(s) => match s.parse::() { + Ok(0) => None, // consistent with .num_threads(0) representing no bound + Ok(x) => Some(x), // set to a positive bound + Err(_e) => None, // not a number, ignored assuming no bound + }, + Err(_e) => None, // not set, no bound + }; + + let ava_max_num_threads: Option = + std::thread::available_parallelism().map(|x| x.into()).ok(); + + let max_num_threads = match (env_max_num_threads, ava_max_num_threads) { + (Some(env), Some(ava)) => env.min(ava), + (Some(env), None) => env, + (None, Some(ava)) => ava, + (None, None) => MAX_UNSET_NUM_THREADS, + }; + + let max_num_threads = NonZeroUsize::new(max_num_threads.max(1)).expect(">=1"); + + Self { max_num_threads } + } +} + +impl ParThreadPool for StdDefaultPool { + type ScopeRef<'s, 'env, 'scope> + = &'s std::thread::Scope<'s, 'env> + where + 'scope: 's, + 'env: 'scope + 's; + + fn max_num_threads(&self) -> NonZeroUsize { + self.max_num_threads + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(&'s std::thread::Scope<'s, 'env>) + Send, + { + std::thread::scope(|s| f(&s)) + } + + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Sync + 'scope + 'env, + { + s.spawn(|| work()); + } +} + +// ORCH #[derive(Default)] pub struct DefaultStdOrchestrator(StdDefaultPool); diff --git a/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs b/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs deleted file mode 100644 index 965f103..0000000 --- a/src/orch/thread_pool/implementations/impl_scoped_threadpool.rs +++ /dev/null @@ -1,84 +0,0 @@ -use crate::orch::{ParHandle, ParScope, ParThreadPool, thread_pool::par_handle::JoinResult}; -use orx_concurrent_bag::ConcurrentBag; -use scoped_threadpool::{Pool, Scope}; - -pub struct ThreadPoolHandle<'scope, T> { - idx: usize, - result: Option, - bag: &'scope ConcurrentBag, -} - -impl<'scope, T> ParHandle<'scope, T> for ThreadPoolHandle<'scope, T> { - fn join(self) -> JoinResult { - todo!() - } - - fn is_finished(&self) -> bool { - todo!() - } -} - -impl<'scope, 'env> ParScope<'scope, 'env> for Scope<'env, 'scope> -where - 'env: 'scope, -{ - type Handle - = ThreadPoolHandle<'scope, T> - where - Self: 'scope, - T: 'scope; - - fn spawn(&'scope self, f: F) -> Self::Handle - where - F: FnOnce() -> T + Send + 'scope, - T: Send + 'scope, - { - todo!() - } -} - -impl ParThreadPool for Pool { - type ScopeZzz<'scope, 'env> - = Scope<'env, 'scope> - where - 'env: 'scope; - - fn scope_zzz<'env, F, T>(&'env self, f: F) -> T - where - F: for<'scope> FnOnce(&'scope Scope<'env, 'scope>) -> T, - { - // self.scoped(f); - todo!() - } -} - -// fn turn<'scope, 'env: 'scope, T>( -// f: impl FnOnce(&'scope Scope<'env, 'scope>) -> T, -// ) -> impl FnOnce(&Scope<'env, 'scope>) -> T { -// f -// } - -// pub fn scoped<'pool, 'scope, F, R>(&'pool mut self, f: F) -> R -// where -// F: FnOnce(&Scope<'pool, 'scope>) -> R, - -fn main() { - // Create a threadpool holding 4 threads - let mut pool = Pool::new(4); - - let mut vec = vec![0, 1, 2, 3, 4, 5, 6, 7]; - - // Use the threads as scoped threads that can - // reference anything outside this closure - pool.scoped(|scoped| { - // Create references to each element in the vector ... - for e in &mut vec { - // ... and add 1 to it in a seperate thread - scoped.execute(move || { - *e += 1; - }); - } - }); - - assert_eq!(vec, vec![1, 2, 3, 4, 5, 6, 7, 8]); -} diff --git a/src/orch/thread_pool/implementations/impl_threadpool.rs b/src/orch/thread_pool/implementations/impl_threadpool.rs deleted file mode 100644 index 76f8b01..0000000 --- a/src/orch/thread_pool/implementations/impl_threadpool.rs +++ /dev/null @@ -1,62 +0,0 @@ -use crate::orch::{ParHandle, ParScope, ParThreadPool, thread_pool::par_handle::JoinResult}; -use orx_concurrent_bag::ConcurrentBag; -use std::marker::PhantomData; -use threadpool::ThreadPool; - -pub struct ThreadPoolHandle<'scope, T> { - idx: usize, - result: Option, - bag: &'scope ConcurrentBag, -} - -impl<'scope, T> ParHandle<'scope, T> for ThreadPoolHandle<'scope, T> { - fn join(self) -> JoinResult { - todo!() - } - - fn is_finished(&self) -> bool { - todo!() - } -} - -pub struct ThreadPoolScope<'scope, 'env> -where - 'env: 'scope, -{ - pool: &'env ThreadPool, - bag: ConcurrentBag, - p: PhantomData<&'scope ()>, -} - -impl<'scope, 'env> ParScope<'scope, 'env> for ThreadPoolScope<'scope, 'env> -where - 'env: 'scope, -{ - type Handle - = ThreadPoolHandle<'scope, T> - where - Self: 'scope, - T: 'scope; - - fn spawn(&'scope self, f: F) -> Self::Handle - where - F: FnOnce() -> T + Send + 'scope, - T: Send + 'scope, - { - todo!() - } -} - -// impl ParThreadPool for ThreadPool { -// type ScopeZzz<'scope, 'env> -// = ThreadPoolScope<'scope, 'env> -// where -// 'env: 'scope; - -// fn scope_zzz<'env, F, T>(&'env self, f: F) -> T -// where -// F: for<'scope> FnOnce(&'scope ThreadPoolScope<'scope, 'env>) -> T, -// { -// todo!() -// } -// } diff --git a/src/orch/thread_pool/implementations/mod.rs b/src/orch/thread_pool/implementations/mod.rs deleted file mode 100644 index d4c893b..0000000 --- a/src/orch/thread_pool/implementations/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -mod std_default_pool; - -// #[cfg(feature = "threadpool")] -// mod impl_threadpool; - -// #[cfg(feature = "scoped_threadpool")] -// mod impl_scoped_threadpool; - -pub use std_default_pool::StdDefaultPool; diff --git a/src/orch/thread_pool/implementations/std_default_pool.rs b/src/orch/thread_pool/implementations/std_default_pool.rs deleted file mode 100644 index 1c79474..0000000 --- a/src/orch/thread_pool/implementations/std_default_pool.rs +++ /dev/null @@ -1,64 +0,0 @@ -use crate::{env::MAX_NUM_THREADS_ENV_VARIABLE, orch::ParThreadPool}; -use std::num::NonZeroUsize; - -const MAX_UNSET_NUM_THREADS: usize = 8; - -pub struct StdDefaultPool { - max_num_threads: NonZeroUsize, -} - -impl Default for StdDefaultPool { - fn default() -> Self { - let env_max_num_threads = match std::env::var(MAX_NUM_THREADS_ENV_VARIABLE) { - Ok(s) => match s.parse::() { - Ok(0) => None, // consistent with .num_threads(0) representing no bound - Ok(x) => Some(x), // set to a positive bound - Err(_e) => None, // not a number, ignored assuming no bound - }, - Err(_e) => None, // not set, no bound - }; - - let ava_max_num_threads: Option = - std::thread::available_parallelism().map(|x| x.into()).ok(); - - let max_num_threads = match (env_max_num_threads, ava_max_num_threads) { - (Some(env), Some(ava)) => env.min(ava), - (Some(env), None) => env, - (None, Some(ava)) => ava, - (None, None) => MAX_UNSET_NUM_THREADS, - }; - - let max_num_threads = NonZeroUsize::new(max_num_threads.max(1)).expect(">=1"); - - Self { max_num_threads } - } -} - -impl ParThreadPool for StdDefaultPool { - type ScopeRef<'s, 'env, 'scope> - = &'s std::thread::Scope<'s, 'env> - where - 'scope: 's, - 'env: 'scope + 's; - - fn max_num_threads(&self) -> NonZeroUsize { - self.max_num_threads - } - - fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) - where - 'env: 'scope, - for<'s> F: FnOnce(&'s std::thread::Scope<'s, 'env>) + Send, - { - std::thread::scope(|s| f(&s)) - } - - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) - where - 'scope: 's, - 'env: 'scope + 's, - W: Fn() + Sync + 'scope + 'env, - { - s.spawn(|| work()); - } -} diff --git a/src/orch/thread_pool/mod.rs b/src/orch/thread_pool/mod.rs index 8d60a60..09e7baa 100644 --- a/src/orch/thread_pool/mod.rs +++ b/src/orch/thread_pool/mod.rs @@ -1,4 +1,3 @@ -pub mod implementations; mod par_thread_pool; pub use par_thread_pool::{ParThreadPool, ParThreadPoolCompute}; From 214cc5e5765a6e8dcd1382d3ef1f972adcbbc4b5 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 09:22:19 +0200 Subject: [PATCH 128/264] thread pool module flattened --- src/orch/mod.rs | 4 ++-- src/orch/orchestrator.rs | 5 +---- src/orch/{thread_pool => }/par_thread_pool.rs | 0 src/orch/thread_pool/mod.rs | 3 --- 4 files changed, 3 insertions(+), 9 deletions(-) rename src/orch/{thread_pool => }/par_thread_pool.rs (100%) delete mode 100644 src/orch/thread_pool/mod.rs diff --git a/src/orch/mod.rs b/src/orch/mod.rs index a6bc8b3..cf9f867 100644 --- a/src/orch/mod.rs +++ b/src/orch/mod.rs @@ -1,13 +1,13 @@ mod implementations; mod num_spawned; mod orchestrator; -mod thread_pool; +mod par_thread_pool; pub(crate) use orchestrator::{SharedStateOf, ThreadRunnerOf}; pub use crate::orch::implementations::DefaultStdOrchestrator; pub use num_spawned::NumSpawned; pub use orchestrator::Orchestrator; -pub use thread_pool::ParThreadPool; +pub use par_thread_pool::{ParThreadPool, ParThreadPoolCompute}; pub type DefaultOrchestrator = DefaultStdOrchestrator; diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 8957fa7..619f4a7 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -1,10 +1,7 @@ use crate::{ NumThreads, ParallelRunner, Params, generic_values::runner_results::{Fallibility, Infallible, Never}, - orch::{ - NumSpawned, - thread_pool::{ParThreadPool, ParThreadPoolCompute}, - }, + orch::{NumSpawned, ParThreadPool, ParThreadPoolCompute}, runner::ComputationKind, }; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/orch/thread_pool/par_thread_pool.rs b/src/orch/par_thread_pool.rs similarity index 100% rename from src/orch/thread_pool/par_thread_pool.rs rename to src/orch/par_thread_pool.rs diff --git a/src/orch/thread_pool/mod.rs b/src/orch/thread_pool/mod.rs deleted file mode 100644 index 09e7baa..0000000 --- a/src/orch/thread_pool/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod par_thread_pool; - -pub use par_thread_pool::{ParThreadPool, ParThreadPoolCompute}; From 929716ce9df79597c070fabf619e9046dfa4aa85 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 10:44:46 +0200 Subject: [PATCH 129/264] std feature is defined --- Cargo.toml | 3 ++- src/lib.rs | 6 ++++++ src/orch/implementations/mod.rs | 2 ++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index eb18b41..ea3dae6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,5 +43,6 @@ name = "find_iter_into_par" harness = false [features] -default = ["threadpool", "scoped_threadpool", "rayon"] +default = ["std", "scoped_threadpool", "rayon"] +std = [] generic_iterator = ["rayon"] diff --git a/src/lib.rs b/src/lib.rs index 4d29fdd..ccac46a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,6 +10,12 @@ clippy::missing_panics_doc, clippy::todo )] +// #![no_std] + +extern crate alloc; + +#[cfg(any(test, feature = "std"))] +extern crate std; mod collect_into; /// Module containing variants of parallel iterators. diff --git a/src/orch/implementations/mod.rs b/src/orch/implementations/mod.rs index 2702a26..7be1def 100644 --- a/src/orch/implementations/mod.rs +++ b/src/orch/implementations/mod.rs @@ -1,7 +1,9 @@ #[cfg(test)] mod tests; +#[cfg(feature = "std")] mod default_std_orchestrator; +#[cfg(feature = "std")] pub use default_std_orchestrator::DefaultStdOrchestrator; #[cfg(feature = "rayon")] From a49a51ea3799fb5bbd88436c4598039c9d948972 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 10:46:16 +0200 Subject: [PATCH 130/264] no-std orch tests --- src/lib.rs | 2 +- src/orch/implementations/tests/utils.rs | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index ccac46a..2c70774 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,7 +10,7 @@ clippy::missing_panics_doc, clippy::todo )] -// #![no_std] +#![no_std] extern crate alloc; diff --git a/src/orch/implementations/tests/utils.rs b/src/orch/implementations/tests/utils.rs index 4ce9f49..31b3637 100644 --- a/src/orch/implementations/tests/utils.rs +++ b/src/orch/implementations/tests/utils.rs @@ -1,4 +1,7 @@ use crate::{IntoParIter, IterationOrder, ParIter, orch::Orchestrator}; +use alloc::format; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; use orx_pinned_vec::PinnedVec; use orx_split_vec::SplitVec; From 3122db552315ed00188ccc5f3d058d4d4f2f7e81 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 10:46:39 +0200 Subject: [PATCH 131/264] no-std orchestrator --- src/orch/orchestrator.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 619f4a7..ed3378a 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -4,8 +4,9 @@ use crate::{ orch::{NumSpawned, ParThreadPool, ParThreadPoolCompute}, runner::ComputationKind, }; +use alloc::vec::Vec; +use core::num::NonZeroUsize; use orx_concurrent_iter::ConcurrentIter; -use std::num::NonZeroUsize; pub trait Orchestrator { type Runner: ParallelRunner; From 45359bcbdeba0948a152ecb30051582779b6d40e Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 10:46:59 +0200 Subject: [PATCH 132/264] no std thread pool --- src/orch/par_thread_pool.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/orch/par_thread_pool.rs b/src/orch/par_thread_pool.rs index 99db0c1..7b07f4a 100644 --- a/src/orch/par_thread_pool.rs +++ b/src/orch/par_thread_pool.rs @@ -1,6 +1,7 @@ use crate::{generic_values::runner_results::Fallibility, orch::num_spawned::NumSpawned}; +use alloc::vec::Vec; +use core::num::NonZeroUsize; use orx_concurrent_bag::ConcurrentBag; -use std::num::NonZeroUsize; pub trait ParThreadPool { type ScopeRef<'s, 'env, 'scope> From 91d46f7f9ff00d6068434820912ab21dbb14f882 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 10:48:09 +0200 Subject: [PATCH 133/264] no-std generic values --- src/generic_values/option.rs | 1 + src/generic_values/option_result.rs | 1 + src/generic_values/result.rs | 1 + src/generic_values/values.rs | 1 + src/generic_values/vector.rs | 1 + src/generic_values/vector_result.rs | 1 + src/generic_values/whilst_atom.rs | 1 + src/generic_values/whilst_atom_result.rs | 1 + src/generic_values/whilst_option.rs | 1 + src/generic_values/whilst_option_result.rs | 1 + src/generic_values/whilst_vector.rs | 1 + src/generic_values/whilst_vector_result.rs | 1 + 12 files changed, 12 insertions(+) diff --git a/src/generic_values/option.rs b/src/generic_values/option.rs index bbdd8dd..6b1c0cd 100644 --- a/src/generic_values/option.rs +++ b/src/generic_values/option.rs @@ -7,6 +7,7 @@ use crate::generic_values::{ }, whilst_option::WhilstOption, }; +use alloc::vec::Vec; use orx_concurrent_bag::ConcurrentBag; use orx_pinned_vec::{IntoConcurrentPinnedVec, PinnedVec}; diff --git a/src/generic_values/option_result.rs b/src/generic_values/option_result.rs index 64f9cb1..7aaf00f 100644 --- a/src/generic_values/option_result.rs +++ b/src/generic_values/option_result.rs @@ -2,6 +2,7 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{ ArbitraryPush, Fallible, Next, OrderedPush, Reduce, SequentialPush, }; +use alloc::vec::Vec; use orx_concurrent_bag::ConcurrentBag; use orx_pinned_vec::{IntoConcurrentPinnedVec, PinnedVec}; diff --git a/src/generic_values/result.rs b/src/generic_values/result.rs index fd7cd10..69c7300 100644 --- a/src/generic_values/result.rs +++ b/src/generic_values/result.rs @@ -2,6 +2,7 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{ ArbitraryPush, Fallible, Next, OrderedPush, Reduce, SequentialPush, }; +use alloc::vec::Vec; use orx_concurrent_bag::ConcurrentBag; use orx_pinned_vec::{IntoConcurrentPinnedVec, PinnedVec}; diff --git a/src/generic_values/values.rs b/src/generic_values/values.rs index 8702b67..21de598 100644 --- a/src/generic_values/values.rs +++ b/src/generic_values/values.rs @@ -2,6 +2,7 @@ use crate::generic_values::runner_results::{ ArbitraryPush, Fallibility, Next, OrderedPush, Reduce, SequentialPush, Stop, StopReduce, StopWithIdx, }; +use alloc::vec::Vec; use orx_concurrent_bag::ConcurrentBag; use orx_fixed_vec::IntoConcurrentPinnedVec; use orx_pinned_vec::PinnedVec; diff --git a/src/generic_values/vector.rs b/src/generic_values/vector.rs index 8bb79a2..be591f0 100644 --- a/src/generic_values/vector.rs +++ b/src/generic_values/vector.rs @@ -5,6 +5,7 @@ use crate::generic_values::{ ArbitraryPush, Fallible, Infallible, Next, OrderedPush, Reduce, SequentialPush, }, }; +use alloc::vec::Vec; use orx_concurrent_bag::ConcurrentBag; use orx_fixed_vec::IntoConcurrentPinnedVec; use orx_pinned_vec::PinnedVec; diff --git a/src/generic_values/vector_result.rs b/src/generic_values/vector_result.rs index f50627b..ae7df91 100644 --- a/src/generic_values/vector_result.rs +++ b/src/generic_values/vector_result.rs @@ -2,6 +2,7 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{ ArbitraryPush, Fallible, Next, OrderedPush, Reduce, SequentialPush, }; +use alloc::vec::Vec; use orx_concurrent_bag::ConcurrentBag; use orx_fixed_vec::IntoConcurrentPinnedVec; use orx_pinned_vec::PinnedVec; diff --git a/src/generic_values/whilst_atom.rs b/src/generic_values/whilst_atom.rs index 550fbcf..8094695 100644 --- a/src/generic_values/whilst_atom.rs +++ b/src/generic_values/whilst_atom.rs @@ -4,6 +4,7 @@ use crate::generic_values::runner_results::{ use crate::generic_values::whilst_atom_result::WhilstAtomResult; use crate::generic_values::whilst_iterators::WhilstAtomFlatMapIter; use crate::generic_values::{TransformableValues, Values, WhilstOption, WhilstVector}; +use alloc::vec::Vec; use orx_concurrent_bag::ConcurrentBag; use orx_pinned_vec::{IntoConcurrentPinnedVec, PinnedVec}; diff --git a/src/generic_values/whilst_atom_result.rs b/src/generic_values/whilst_atom_result.rs index 7045de4..a5d1a8b 100644 --- a/src/generic_values/whilst_atom_result.rs +++ b/src/generic_values/whilst_atom_result.rs @@ -2,6 +2,7 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{ ArbitraryPush, Fallible, Next, OrderedPush, Reduce, SequentialPush, }; +use alloc::vec::Vec; use orx_concurrent_bag::ConcurrentBag; use orx_pinned_vec::{IntoConcurrentPinnedVec, PinnedVec}; diff --git a/src/generic_values/whilst_option.rs b/src/generic_values/whilst_option.rs index fb29dfb..b9f5b98 100644 --- a/src/generic_values/whilst_option.rs +++ b/src/generic_values/whilst_option.rs @@ -4,6 +4,7 @@ use crate::generic_values::runner_results::{ use crate::generic_values::whilst_iterators::WhilstOptionFlatMapIter; use crate::generic_values::whilst_option_result::WhilstOptionResult; use crate::generic_values::{TransformableValues, Values, WhilstVector}; +use alloc::vec::Vec; use orx_concurrent_bag::ConcurrentBag; use orx_pinned_vec::{IntoConcurrentPinnedVec, PinnedVec}; diff --git a/src/generic_values/whilst_option_result.rs b/src/generic_values/whilst_option_result.rs index 07b7c28..7be5d45 100644 --- a/src/generic_values/whilst_option_result.rs +++ b/src/generic_values/whilst_option_result.rs @@ -2,6 +2,7 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{ ArbitraryPush, Fallible, Next, OrderedPush, Reduce, SequentialPush, }; +use alloc::vec::Vec; use orx_concurrent_bag::ConcurrentBag; use orx_pinned_vec::{IntoConcurrentPinnedVec, PinnedVec}; diff --git a/src/generic_values/whilst_vector.rs b/src/generic_values/whilst_vector.rs index b3121a1..240f3bf 100644 --- a/src/generic_values/whilst_vector.rs +++ b/src/generic_values/whilst_vector.rs @@ -7,6 +7,7 @@ use crate::generic_values::{ whilst_iterators::WhilstAtomFlatMapIter, whilst_vector_result::WhilstVectorResult, }; +use alloc::vec::Vec; use orx_concurrent_bag::ConcurrentBag; use orx_fixed_vec::IntoConcurrentPinnedVec; use orx_pinned_vec::PinnedVec; diff --git a/src/generic_values/whilst_vector_result.rs b/src/generic_values/whilst_vector_result.rs index 30031f5..df86b15 100644 --- a/src/generic_values/whilst_vector_result.rs +++ b/src/generic_values/whilst_vector_result.rs @@ -2,6 +2,7 @@ use crate::generic_values::{ Values, WhilstAtom, runner_results::{ArbitraryPush, Fallible, Next, OrderedPush, Reduce, SequentialPush}, }; +use alloc::vec::Vec; use orx_concurrent_bag::ConcurrentBag; use orx_fixed_vec::IntoConcurrentPinnedVec; use orx_pinned_vec::PinnedVec; From 6f761edae4e3cf25de43f9ef162cc7b157e167c0 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 10:48:34 +0200 Subject: [PATCH 134/264] no std fallibility --- src/generic_values/runner_results/collect_ordered.rs | 1 + src/generic_values/runner_results/fallibility.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/src/generic_values/runner_results/collect_ordered.rs b/src/generic_values/runner_results/collect_ordered.rs index 54aa71d..52a6e26 100644 --- a/src/generic_values/runner_results/collect_ordered.rs +++ b/src/generic_values/runner_results/collect_ordered.rs @@ -2,6 +2,7 @@ use crate::{ computations::heap_sort_into, generic_values::{Values, runner_results::Fallibility}, }; +use alloc::vec::Vec; use core::fmt::Debug; use orx_fixed_vec::IntoConcurrentPinnedVec; diff --git a/src/generic_values/runner_results/fallibility.rs b/src/generic_values/runner_results/fallibility.rs index bb49b4a..71cd353 100644 --- a/src/generic_values/runner_results/fallibility.rs +++ b/src/generic_values/runner_results/fallibility.rs @@ -4,6 +4,7 @@ use crate::generic_values::{ ArbitraryPush, OrderedPush, Reduce, SequentialPush, Stop, StopWithIdx, stop::StopReduce, }, }; +use alloc::vec::Vec; use std::marker::PhantomData; pub trait Fallibility: Sized { From cd5845ee880428e181022c28aa2333ac79fa37b9 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 10:49:04 +0200 Subject: [PATCH 135/264] no-std heap sort --- src/computations/heap_sort.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/computations/heap_sort.rs b/src/computations/heap_sort.rs index 964cf29..193ce42 100644 --- a/src/computations/heap_sort.rs +++ b/src/computations/heap_sort.rs @@ -1,3 +1,5 @@ +use alloc::vec; +use alloc::vec::Vec; use orx_pinned_vec::PinnedVec; use orx_priority_queue::{BinaryHeap, PriorityQueue}; From 603d61b26305efa320522a8d17a945ed9d2c310b Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 10:49:27 +0200 Subject: [PATCH 136/264] no-std ordered collection --- src/runner/thread_runner_compute/collect_ordered.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/runner/thread_runner_compute/collect_ordered.rs b/src/runner/thread_runner_compute/collect_ordered.rs index 0bf6f65..a942d7a 100644 --- a/src/runner/thread_runner_compute/collect_ordered.rs +++ b/src/runner/thread_runner_compute/collect_ordered.rs @@ -1,6 +1,7 @@ use crate::ThreadRunner; use crate::generic_values::Values; use crate::generic_values::runner_results::{StopWithIdx, ThreadCollect}; +use alloc::vec::Vec; use orx_concurrent_iter::{ChunkPuller, ConcurrentIter}; use orx_concurrent_ordered_bag::ConcurrentOrderedBag; use orx_fixed_vec::IntoConcurrentPinnedVec; From dbd2c4cd4cd41f14d5c27a126c176fdcae2d3437 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 10:50:02 +0200 Subject: [PATCH 137/264] no std collect into --- src/collect_into/fixed_vec.rs | 1 + src/collect_into/utils.rs | 1 + src/collect_into/vec.rs | 1 + 3 files changed, 3 insertions(+) diff --git a/src/collect_into/fixed_vec.rs b/src/collect_into/fixed_vec.rs index a0e0e6e..b369acf 100644 --- a/src/collect_into/fixed_vec.rs +++ b/src/collect_into/fixed_vec.rs @@ -3,6 +3,7 @@ use crate::Params; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; +use alloc::vec::Vec; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; #[cfg(test)] diff --git a/src/collect_into/utils.rs b/src/collect_into/utils.rs index 6153e2a..b569f60 100644 --- a/src/collect_into/utils.rs +++ b/src/collect_into/utils.rs @@ -1,3 +1,4 @@ +use alloc::vec::Vec; use orx_pinned_vec::PinnedVec; use orx_split_vec::{GrowthWithConstantTimeAccess, SplitVec}; diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index d02708a..627bf96 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -5,6 +5,7 @@ use crate::computational_variants::computations::map_collect_into; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; +use alloc::vec::Vec; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; use orx_split_vec::SplitVec; From 5f87cea929c1af75a543f09f051f23097f10d1f3 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 10:56:05 +0200 Subject: [PATCH 138/264] updates towards no std --- src/computational_variants/tests/copied.rs | 1 + src/computational_variants/tests/count.rs | 3 +++ src/computational_variants/tests/flatten.rs | 3 +++ src/computational_variants/tests/for_each.rs | 3 +++ src/computational_variants/tests/inspect.rs | 3 +++ src/computational_variants/tests/iter_consuming.rs | 3 +++ src/computational_variants/tests/iter_ref.rs | 2 ++ src/computational_variants/tests/map/collect.rs | 3 +++ src/computational_variants/tests/map/find.rs | 3 +++ src/computational_variants/tests/map/reduce.rs | 3 +++ src/computational_variants/tests/min_max.rs | 2 ++ src/computational_variants/tests/range.rs | 2 ++ src/computational_variants/tests/slice.rs | 2 ++ src/computational_variants/tests/sum.rs | 2 ++ src/computational_variants/tests/vectors.rs | 4 ++++ src/computational_variants/tests/xap/collect.rs | 3 +++ src/computational_variants/tests/xap/find.rs | 3 +++ src/computational_variants/tests/xap/reduce.rs | 3 +++ 18 files changed, 48 insertions(+) diff --git a/src/computational_variants/tests/copied.rs b/src/computational_variants/tests/copied.rs index 45191d6..5347cb9 100644 --- a/src/computational_variants/tests/copied.rs +++ b/src/computational_variants/tests/copied.rs @@ -1,4 +1,5 @@ use crate::{test_utils::*, *}; +use alloc::vec::Vec; use test_case::test_matrix; fn input>(n: usize) -> O { diff --git a/src/computational_variants/tests/count.rs b/src/computational_variants/tests/count.rs index 77ca8c9..0383d4f 100644 --- a/src/computational_variants/tests/count.rs +++ b/src/computational_variants/tests/count.rs @@ -1,4 +1,7 @@ use crate::{test_utils::*, *}; +use alloc::format; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; use test_case::test_matrix; fn input>(n: usize) -> O { diff --git a/src/computational_variants/tests/flatten.rs b/src/computational_variants/tests/flatten.rs index ae3ba62..e2aa555 100644 --- a/src/computational_variants/tests/flatten.rs +++ b/src/computational_variants/tests/flatten.rs @@ -1,4 +1,7 @@ use crate::{test_utils::*, *}; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; +use alloc::{format, vec}; use test_case::test_matrix; #[test_matrix(N, NT, CHUNK)] diff --git a/src/computational_variants/tests/for_each.rs b/src/computational_variants/tests/for_each.rs index 91b78e5..b973feb 100644 --- a/src/computational_variants/tests/for_each.rs +++ b/src/computational_variants/tests/for_each.rs @@ -1,4 +1,7 @@ use crate::{test_utils::*, *}; +use alloc::format; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; use orx_concurrent_vec::ConcurrentVec; use test_case::test_matrix; diff --git a/src/computational_variants/tests/inspect.rs b/src/computational_variants/tests/inspect.rs index 2334ce7..4978387 100644 --- a/src/computational_variants/tests/inspect.rs +++ b/src/computational_variants/tests/inspect.rs @@ -1,4 +1,7 @@ use crate::{test_utils::*, *}; +use alloc::format; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; use orx_concurrent_vec::ConcurrentVec; use test_case::test_matrix; diff --git a/src/computational_variants/tests/iter_consuming.rs b/src/computational_variants/tests/iter_consuming.rs index 4cece5b..7bee27d 100644 --- a/src/computational_variants/tests/iter_consuming.rs +++ b/src/computational_variants/tests/iter_consuming.rs @@ -1,4 +1,7 @@ use crate::{test_utils::*, *}; +use alloc::string::{String, ToString}; +use alloc::vec; +use alloc::vec::Vec; use orx_fixed_vec::FixedVec; use orx_iterable::Collection; use orx_split_vec::SplitVec; diff --git a/src/computational_variants/tests/iter_ref.rs b/src/computational_variants/tests/iter_ref.rs index 1e5cc66..f8ce262 100644 --- a/src/computational_variants/tests/iter_ref.rs +++ b/src/computational_variants/tests/iter_ref.rs @@ -1,4 +1,6 @@ use crate::{collect_into::ParCollectIntoCore, test_utils::*, *}; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; use orx_fixed_vec::FixedVec; use orx_iterable::{Collection, IntoCloningIterable}; use orx_split_vec::{Doubling, Linear, PseudoDefault, SplitVec}; diff --git a/src/computational_variants/tests/map/collect.rs b/src/computational_variants/tests/map/collect.rs index 798ff36..cb526e1 100644 --- a/src/computational_variants/tests/map/collect.rs +++ b/src/computational_variants/tests/map/collect.rs @@ -2,6 +2,9 @@ use crate::{ IterationOrder, Params, computational_variants::computations::map_collect_into, orch::DefaultOrchestrator, }; +use alloc::format; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; use orx_concurrent_iter::IntoConcurrentIter; use orx_pinned_vec::PinnedVec; use orx_split_vec::SplitVec; diff --git a/src/computational_variants/tests/map/find.rs b/src/computational_variants/tests/map/find.rs index 138f9e0..4421b8f 100644 --- a/src/computational_variants/tests/map/find.rs +++ b/src/computational_variants/tests/map/find.rs @@ -1,6 +1,9 @@ use crate::{ Params, computations::map_self, orch::DefaultOrchestrator, runner::parallel_runner_compute, }; +use alloc::format; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; use orx_concurrent_iter::IntoConcurrentIter; use test_case::test_matrix; diff --git a/src/computational_variants/tests/map/reduce.rs b/src/computational_variants/tests/map/reduce.rs index 6c71faa..11b4784 100644 --- a/src/computational_variants/tests/map/reduce.rs +++ b/src/computational_variants/tests/map/reduce.rs @@ -1,6 +1,9 @@ use crate::{ Params, computations::map_self, orch::DefaultOrchestrator, runner::parallel_runner_compute, }; +use alloc::format; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; use orx_concurrent_iter::IntoConcurrentIter; use test_case::test_matrix; diff --git a/src/computational_variants/tests/min_max.rs b/src/computational_variants/tests/min_max.rs index 5f56fc5..7fe8724 100644 --- a/src/computational_variants/tests/min_max.rs +++ b/src/computational_variants/tests/min_max.rs @@ -1,4 +1,6 @@ use crate::{test_utils::*, *}; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; use core::cmp::Ordering; use test_case::test_matrix; diff --git a/src/computational_variants/tests/range.rs b/src/computational_variants/tests/range.rs index f67b5b1..2e7a4d9 100644 --- a/src/computational_variants/tests/range.rs +++ b/src/computational_variants/tests/range.rs @@ -1,4 +1,6 @@ use crate::{test_utils::*, *}; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; use orx_fixed_vec::FixedVec; use orx_iterable::Iterable; use orx_split_vec::SplitVec; diff --git a/src/computational_variants/tests/slice.rs b/src/computational_variants/tests/slice.rs index 6067c01..9443408 100644 --- a/src/computational_variants/tests/slice.rs +++ b/src/computational_variants/tests/slice.rs @@ -1,4 +1,6 @@ use crate::{collect_into::ParCollectIntoCore, test_utils::*, *}; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; use orx_fixed_vec::FixedVec; use orx_iterable::Collection; use orx_split_vec::{Doubling, Linear, PseudoDefault, SplitVec}; diff --git a/src/computational_variants/tests/sum.rs b/src/computational_variants/tests/sum.rs index f4f78ad..b67143d 100644 --- a/src/computational_variants/tests/sum.rs +++ b/src/computational_variants/tests/sum.rs @@ -1,4 +1,6 @@ use crate::{test_utils::*, *}; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; use test_case::test_matrix; fn input>(n: usize) -> O { diff --git a/src/computational_variants/tests/vectors.rs b/src/computational_variants/tests/vectors.rs index 0a8f668..4ce0a94 100644 --- a/src/computational_variants/tests/vectors.rs +++ b/src/computational_variants/tests/vectors.rs @@ -1,4 +1,8 @@ use crate::{test_utils::*, *}; +use alloc::format; +use alloc::string::{String, ToString}; +use alloc::vec; +use alloc::vec::Vec; use orx_fixed_vec::FixedVec; use orx_iterable::Collection; use orx_split_vec::SplitVec; diff --git a/src/computational_variants/tests/xap/collect.rs b/src/computational_variants/tests/xap/collect.rs index aac3277..125fbc1 100644 --- a/src/computational_variants/tests/xap/collect.rs +++ b/src/computational_variants/tests/xap/collect.rs @@ -3,6 +3,9 @@ use crate::computational_variants::ParXap; use crate::generic_values::Vector; use crate::orch::DefaultOrchestrator; use crate::{IterationOrder, Params}; +use alloc::format; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; use orx_concurrent_iter::IntoConcurrentIter; use orx_pinned_vec::PinnedVec; use orx_split_vec::SplitVec; diff --git a/src/computational_variants/tests/xap/find.rs b/src/computational_variants/tests/xap/find.rs index 54ad109..5e487da 100644 --- a/src/computational_variants/tests/xap/find.rs +++ b/src/computational_variants/tests/xap/find.rs @@ -3,6 +3,9 @@ use crate::Params; use crate::computational_variants::ParXap; use crate::generic_values::Vector; use crate::orch::DefaultOrchestrator; +use alloc::format; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; use orx_concurrent_iter::IntoConcurrentIter; use test_case::test_matrix; diff --git a/src/computational_variants/tests/xap/reduce.rs b/src/computational_variants/tests/xap/reduce.rs index d5a5016..55593ea 100644 --- a/src/computational_variants/tests/xap/reduce.rs +++ b/src/computational_variants/tests/xap/reduce.rs @@ -3,6 +3,9 @@ use crate::Params; use crate::computational_variants::ParXap; use crate::generic_values::Vector; use crate::orch::DefaultOrchestrator; +use alloc::format; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; use orx_concurrent_iter::IntoConcurrentIter; use test_case::test_matrix; From 8c897c98c9a6d670480dfc16f7147078458da3b1 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 17:09:08 +0200 Subject: [PATCH 139/264] minor --- src/collect_into/vec.rs | 15 +++++++++++++++ src/lib.rs | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index 627bf96..663f49f 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -95,3 +95,18 @@ where self.len() } } + +// #[cfg(test)] +// mod tsts { +// use crate::*; +// use alloc::vec::Vec; +// use orx_split_vec::SplitVec; + +// #[test] +// fn abc() { +// fn take>(c: C) {} + +// take(SplitVec::new()); +// take(Vec::new()); +// } +// } diff --git a/src/lib.rs b/src/lib.rs index 2c70774..ccac46a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,7 +10,7 @@ clippy::missing_panics_doc, clippy::todo )] -#![no_std] +// #![no_std] extern crate alloc; From f6ae78faf3d808e3988be2beb24e39158a02db2f Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 17:13:38 +0200 Subject: [PATCH 140/264] wip --- src/collect_into/par_collect_into.rs | 2 +- src/computational_variants/map.rs | 2 +- src/computational_variants/par.rs | 2 +- src/computational_variants/xap.rs | 2 +- src/lib.rs | 4 ++-- src/par_iter.rs | 2 +- src/{using => using_old}/collect_into/fixed_vec.rs | 6 +++--- src/{using => using_old}/collect_into/mod.rs | 0 src/{using => using_old}/collect_into/split_vec.rs | 6 +++--- src/{using => using_old}/collect_into/u_par_collect_into.rs | 4 ++-- src/{using => using_old}/collect_into/vec.rs | 6 +++--- src/{using => using_old}/computational_variants/mod.rs | 0 src/{using => using_old}/computational_variants/u_map.rs | 2 +- src/{using => using_old}/computational_variants/u_par.rs | 2 +- src/{using => using_old}/computational_variants/u_xap.rs | 2 +- src/{using => using_old}/computations/default_fns.rs | 0 src/{using => using_old}/computations/mod.rs | 0 src/{using => using_old}/computations/u_map/collect.rs | 6 +++--- src/{using => using_old}/computations/u_map/m.rs | 2 +- src/{using => using_old}/computations/u_map/mod.rs | 0 src/{using => using_old}/computations/u_map/next.rs | 4 ++-- src/{using => using_old}/computations/u_map/reduce.rs | 6 +++--- .../computations/u_map/tests/collect.rs | 3 ++- src/{using => using_old}/computations/u_map/tests/find.rs | 2 +- src/{using => using_old}/computations/u_map/tests/mod.rs | 0 src/{using => using_old}/computations/u_map/tests/reduce.rs | 4 ++-- .../computations/u_map/transformations.rs | 4 ++-- src/{using => using_old}/computations/u_xap/collect.rs | 6 +++--- src/{using => using_old}/computations/u_xap/mod.rs | 0 src/{using => using_old}/computations/u_xap/next.rs | 6 +++--- src/{using => using_old}/computations/u_xap/reduce.rs | 6 +++--- .../computations/u_xap/tests/collect.rs | 2 +- src/{using => using_old}/computations/u_xap/tests/find.rs | 2 +- src/{using => using_old}/computations/u_xap/tests/mod.rs | 0 src/{using => using_old}/computations/u_xap/tests/reduce.rs | 2 +- src/{using => using_old}/computations/u_xap/x.rs | 2 +- src/{using => using_old}/mod.rs | 0 src/{using => using_old}/runner/mod.rs | 0 .../runner/parallel_runner_compute/mod.rs | 0 .../runner/parallel_runner_compute/u_collect_arbitrary.rs | 6 +++--- .../runner/parallel_runner_compute/u_collect_ordered.rs | 4 ++-- .../runner/parallel_runner_compute/u_next.rs | 4 ++-- .../runner/parallel_runner_compute/u_next_any.rs | 4 ++-- .../runner/parallel_runner_compute/u_reduce.rs | 4 ++-- .../runner/thread_runner_compute/mod.rs | 0 .../runner/thread_runner_compute/u_collect_arbitrary.rs | 0 .../runner/thread_runner_compute/u_collect_ordered.rs | 0 .../runner/thread_runner_compute/u_next.rs | 0 .../runner/thread_runner_compute/u_next_any.rs | 0 .../runner/thread_runner_compute/u_reduce.rs | 0 src/{using => using_old}/u_par_iter.rs | 2 +- src/{using => using_old}/using_variants.rs | 0 52 files changed, 62 insertions(+), 61 deletions(-) rename src/{using => using_old}/collect_into/fixed_vec.rs (86%) rename src/{using => using_old}/collect_into/mod.rs (100%) rename src/{using => using_old}/collect_into/split_vec.rs (90%) rename src/{using => using_old}/collect_into/u_par_collect_into.rs (90%) rename src/{using => using_old}/collect_into/vec.rs (91%) rename src/{using => using_old}/computational_variants/mod.rs (100%) rename src/{using => using_old}/computational_variants/u_map.rs (99%) rename src/{using => using_old}/computational_variants/u_par.rs (99%) rename src/{using => using_old}/computational_variants/u_xap.rs (99%) rename src/{using => using_old}/computations/default_fns.rs (100%) rename src/{using => using_old}/computations/mod.rs (100%) rename src/{using => using_old}/computations/u_map/collect.rs (88%) rename src/{using => using_old}/computations/u_map/m.rs (98%) rename src/{using => using_old}/computations/u_map/mod.rs (100%) rename src/{using => using_old}/computations/u_map/next.rs (86%) rename src/{using => using_old}/computations/u_map/reduce.rs (80%) rename src/{using => using_old}/computations/u_map/tests/collect.rs (92%) rename src/{using => using_old}/computations/u_map/tests/find.rs (96%) rename src/{using => using_old}/computations/u_map/tests/mod.rs (100%) rename src/{using => using_old}/computations/u_map/tests/reduce.rs (96%) rename src/{using => using_old}/computations/u_map/transformations.rs (89%) rename src/{using => using_old}/computations/u_xap/collect.rs (93%) rename src/{using => using_old}/computations/u_xap/mod.rs (100%) rename src/{using => using_old}/computations/u_xap/next.rs (87%) rename src/{using => using_old}/computations/u_xap/reduce.rs (85%) rename src/{using => using_old}/computations/u_xap/tests/collect.rs (98%) rename src/{using => using_old}/computations/u_xap/tests/find.rs (97%) rename src/{using => using_old}/computations/u_xap/tests/mod.rs (100%) rename src/{using => using_old}/computations/u_xap/tests/reduce.rs (97%) rename src/{using => using_old}/computations/u_xap/x.rs (97%) rename src/{using => using_old}/mod.rs (100%) rename src/{using => using_old}/runner/mod.rs (100%) rename src/{using => using_old}/runner/parallel_runner_compute/mod.rs (100%) rename src/{using => using_old}/runner/parallel_runner_compute/u_collect_arbitrary.rs (97%) rename src/{using => using_old}/runner/parallel_runner_compute/u_collect_ordered.rs (98%) rename src/{using => using_old}/runner/parallel_runner_compute/u_next.rs (98%) rename src/{using => using_old}/runner/parallel_runner_compute/u_next_any.rs (97%) rename src/{using => using_old}/runner/parallel_runner_compute/u_reduce.rs (98%) rename src/{using => using_old}/runner/thread_runner_compute/mod.rs (100%) rename src/{using => using_old}/runner/thread_runner_compute/u_collect_arbitrary.rs (100%) rename src/{using => using_old}/runner/thread_runner_compute/u_collect_ordered.rs (100%) rename src/{using => using_old}/runner/thread_runner_compute/u_next.rs (100%) rename src/{using => using_old}/runner/thread_runner_compute/u_next_any.rs (100%) rename src/{using => using_old}/runner/thread_runner_compute/u_reduce.rs (100%) rename src/{using => using_old}/u_par_iter.rs (99%) rename src/{using => using_old}/using_variants.rs (100%) diff --git a/src/collect_into/par_collect_into.rs b/src/collect_into/par_collect_into.rs index 0e7e8ab..c03c319 100644 --- a/src/collect_into/par_collect_into.rs +++ b/src/collect_into/par_collect_into.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; -use crate::using::UParCollectIntoCore; +use crate::using_old::UParCollectIntoCore; use orx_concurrent_iter::ConcurrentIter; use orx_iterable::Collection; use orx_pinned_vec::IntoConcurrentPinnedVec; diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index b70a2f9..66ce165 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -7,7 +7,7 @@ use crate::par_iter_result::IntoResult; use crate::runner::parallel_runner_compute as prc; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsing, Params, - using::{UsingClone, UsingFun, computational_variants::UParMap}, + using_old::{UsingClone, UsingFun, computational_variants::UParMap}, }; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index a07f8d3..37c1c3e 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -7,7 +7,7 @@ use crate::runner::parallel_runner_compute as prc; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsing, Params, computations::map_self, - using::{UsingClone, UsingFun, computational_variants::UPar}, + using_old::{UsingClone, UsingFun, computational_variants::UPar}, }; use crate::{IntoParIter, ParIterResult}; use orx_concurrent_iter::chain::ChainKnownLenI; diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index 1d97e9c..a15ec48 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -7,7 +7,7 @@ use crate::par_iter_result::IntoResult; use crate::runner::parallel_runner_compute as prc; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsing, Params, - using::{UsingClone, UsingFun, computational_variants::UParXap}, + using_old::{UsingClone, UsingFun, computational_variants::UParXap}, }; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/lib.rs b/src/lib.rs index ccac46a..b304395 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -41,7 +41,7 @@ mod parameters; pub mod runner; mod special_type_sets; /// Module defining parallel iterators with mutable access to values distributed to each thread. -pub mod using; +pub mod using_old; /// Module defining the GenericIterator which is a generalization over /// sequential iterator, rayon's parallel iterator and orx-parallel's @@ -68,4 +68,4 @@ pub use parallelizable_collection_mut::ParallelizableCollectionMut; pub use parameters::{ChunkSize, IterationOrder, NumThreads, Params}; pub use runner::{DefaultRunner, ParallelRunner, ThreadRunner}; pub use special_type_sets::Sum; -pub use using::ParIterUsing; +pub use using_old::ParIterUsing; diff --git a/src/par_iter.rs b/src/par_iter.rs index c42b5d0..3a1b04a 100644 --- a/src/par_iter.rs +++ b/src/par_iter.rs @@ -3,7 +3,7 @@ use crate::computational_variants::fallible_option::ParOption; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_option::{IntoOption, ParIterOption}; use crate::par_iter_result::IntoResult; -use crate::using::{UsingClone, UsingFun}; +use crate::using_old::{UsingClone, UsingFun}; use crate::{ ParIterUsing, Params, collect_into::ParCollectInto, diff --git a/src/using/collect_into/fixed_vec.rs b/src/using_old/collect_into/fixed_vec.rs similarity index 86% rename from src/using/collect_into/fixed_vec.rs rename to src/using_old/collect_into/fixed_vec.rs index 2243727..7b3d7b6 100644 --- a/src/using/collect_into/fixed_vec.rs +++ b/src/using_old/collect_into/fixed_vec.rs @@ -1,9 +1,9 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::Infallible; use crate::runner::ParallelRunner; -use crate::using::Using; -use crate::using::collect_into::u_par_collect_into::UParCollectIntoCore; -use crate::using::computations::{UM, UX}; +use crate::using_old::Using; +use crate::using_old::collect_into::u_par_collect_into::UParCollectIntoCore; +use crate::using_old::computations::{UM, UX}; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; diff --git a/src/using/collect_into/mod.rs b/src/using_old/collect_into/mod.rs similarity index 100% rename from src/using/collect_into/mod.rs rename to src/using_old/collect_into/mod.rs diff --git a/src/using/collect_into/split_vec.rs b/src/using_old/collect_into/split_vec.rs similarity index 90% rename from src/using/collect_into/split_vec.rs rename to src/using_old/collect_into/split_vec.rs index 66cf6fd..adfa43b 100644 --- a/src/using/collect_into/split_vec.rs +++ b/src/using_old/collect_into/split_vec.rs @@ -2,9 +2,9 @@ use crate::collect_into::utils::split_vec_reserve; use crate::generic_values::Values; use crate::generic_values::runner_results::Infallible; use crate::runner::ParallelRunner; -use crate::using::Using; -use crate::using::collect_into::u_par_collect_into::UParCollectIntoCore; -use crate::using::computations::{UM, UX}; +use crate::using_old::Using; +use crate::using_old::collect_into::u_par_collect_into::UParCollectIntoCore; +use crate::using_old::computations::{UM, UX}; use orx_concurrent_iter::ConcurrentIter; use orx_split_vec::{GrowthWithConstantTimeAccess, PseudoDefault, SplitVec}; diff --git a/src/using/collect_into/u_par_collect_into.rs b/src/using_old/collect_into/u_par_collect_into.rs similarity index 90% rename from src/using/collect_into/u_par_collect_into.rs rename to src/using_old/collect_into/u_par_collect_into.rs index 5dfe73d..ec52de0 100644 --- a/src/using/collect_into/u_par_collect_into.rs +++ b/src/using_old/collect_into/u_par_collect_into.rs @@ -2,8 +2,8 @@ use crate::collect_into::ParCollectIntoCore; use crate::generic_values::Values; use crate::generic_values::runner_results::Infallible; use crate::runner::ParallelRunner; -use crate::using::Using; -use crate::using::computations::{UM, UX}; +use crate::using_old::Using; +use crate::using_old::computations::{UM, UX}; use orx_concurrent_iter::ConcurrentIter; pub trait UParCollectIntoCore: ParCollectIntoCore { diff --git a/src/using/collect_into/vec.rs b/src/using_old/collect_into/vec.rs similarity index 91% rename from src/using/collect_into/vec.rs rename to src/using_old/collect_into/vec.rs index 9b3385e..961ed99 100644 --- a/src/using/collect_into/vec.rs +++ b/src/using_old/collect_into/vec.rs @@ -2,9 +2,9 @@ use crate::collect_into::utils::extend_vec_from_split; use crate::generic_values::Values; use crate::generic_values::runner_results::Infallible; use crate::runner::ParallelRunner; -use crate::using::Using; -use crate::using::collect_into::u_par_collect_into::UParCollectIntoCore; -use crate::using::computations::{UM, UX}; +use crate::using_old::Using; +use crate::using_old::collect_into::u_par_collect_into::UParCollectIntoCore; +use crate::using_old::computations::{UM, UX}; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; use orx_split_vec::SplitVec; diff --git a/src/using/computational_variants/mod.rs b/src/using_old/computational_variants/mod.rs similarity index 100% rename from src/using/computational_variants/mod.rs rename to src/using_old/computational_variants/mod.rs diff --git a/src/using/computational_variants/u_map.rs b/src/using_old/computational_variants/u_map.rs similarity index 99% rename from src/using/computational_variants/u_map.rs rename to src/using_old/computational_variants/u_map.rs index 661da72..d5cf22f 100644 --- a/src/using/computational_variants/u_map.rs +++ b/src/using_old/computational_variants/u_map.rs @@ -2,7 +2,7 @@ use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, generic_values::Vector, orch::{DefaultOrchestrator, Orchestrator}, - using::{ + using_old::{ Using, computational_variants::u_xap::UParXap, computations::UM, u_par_iter::ParIterUsing, }, }; diff --git a/src/using/computational_variants/u_par.rs b/src/using_old/computational_variants/u_par.rs similarity index 99% rename from src/using/computational_variants/u_par.rs rename to src/using_old/computational_variants/u_par.rs index fd901f9..d417b42 100644 --- a/src/using/computational_variants/u_par.rs +++ b/src/using_old/computational_variants/u_par.rs @@ -2,7 +2,7 @@ use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, generic_values::Vector, orch::{DefaultOrchestrator, Orchestrator}, - using::{ + using_old::{ Using, computational_variants::{u_map::UParMap, u_xap::UParXap}, computations::{UM, u_map_self}, diff --git a/src/using/computational_variants/u_xap.rs b/src/using_old/computational_variants/u_xap.rs similarity index 99% rename from src/using/computational_variants/u_xap.rs rename to src/using_old/computational_variants/u_xap.rs index ba4bb4f..d3848a2 100644 --- a/src/using/computational_variants/u_xap.rs +++ b/src/using_old/computational_variants/u_xap.rs @@ -2,7 +2,7 @@ use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, generic_values::{TransformableValues, runner_results::Infallible}, orch::{DefaultOrchestrator, Orchestrator}, - using::{Using, computations::UX, u_par_iter::ParIterUsing}, + using_old::{Using, computations::UX, u_par_iter::ParIterUsing}, }; use orx_concurrent_iter::ConcurrentIter; use std::marker::PhantomData; diff --git a/src/using/computations/default_fns.rs b/src/using_old/computations/default_fns.rs similarity index 100% rename from src/using/computations/default_fns.rs rename to src/using_old/computations/default_fns.rs diff --git a/src/using/computations/mod.rs b/src/using_old/computations/mod.rs similarity index 100% rename from src/using/computations/mod.rs rename to src/using_old/computations/mod.rs diff --git a/src/using/computations/u_map/collect.rs b/src/using_old/computations/u_map/collect.rs similarity index 88% rename from src/using/computations/u_map/collect.rs rename to src/using_old/computations/u_map/collect.rs index ec95b16..bcb33bb 100644 --- a/src/using/computations/u_map/collect.rs +++ b/src/using_old/computations/u_map/collect.rs @@ -3,10 +3,10 @@ use super::m::UM; use crate::IterationOrder; use crate::orch::NumSpawned; use crate::runner::{ParallelRunner, ParallelRunnerCompute}; -use crate::using::Using; +use crate::using_old::Using; #[cfg(test)] -use crate::using::runner::parallel_runner_compute::u_collect_arbitrary; -use crate::using::runner::parallel_runner_compute::u_collect_ordered; +use crate::using_old::runner::parallel_runner_compute::u_collect_arbitrary; +use crate::using_old::runner::parallel_runner_compute::u_collect_ordered; use orx_concurrent_iter::ConcurrentIter; use orx_pinned_vec::IntoConcurrentPinnedVec; diff --git a/src/using/computations/u_map/m.rs b/src/using_old/computations/u_map/m.rs similarity index 98% rename from src/using/computations/u_map/m.rs rename to src/using_old/computations/u_map/m.rs index f52b10f..b0c3608 100644 --- a/src/using/computations/u_map/m.rs +++ b/src/using_old/computations/u_map/m.rs @@ -1,4 +1,4 @@ -use crate::{ChunkSize, IterationOrder, NumThreads, Params, using::Using}; +use crate::{ChunkSize, IterationOrder, NumThreads, Params, using_old::Using}; use orx_concurrent_iter::ConcurrentIter; pub struct UM diff --git a/src/using/computations/u_map/mod.rs b/src/using_old/computations/u_map/mod.rs similarity index 100% rename from src/using/computations/u_map/mod.rs rename to src/using_old/computations/u_map/mod.rs diff --git a/src/using/computations/u_map/next.rs b/src/using_old/computations/u_map/next.rs similarity index 86% rename from src/using/computations/u_map/next.rs rename to src/using_old/computations/u_map/next.rs index b88927c..c11d596 100644 --- a/src/using/computations/u_map/next.rs +++ b/src/using_old/computations/u_map/next.rs @@ -1,8 +1,8 @@ use super::m::UM; use crate::orch::NumSpawned; use crate::runner::{ParallelRunner, ParallelRunnerCompute}; -use crate::using::Using; -use crate::using::runner::parallel_runner_compute::{u_next, u_next_any}; +use crate::using_old::Using; +use crate::using_old::runner::parallel_runner_compute::{u_next, u_next_any}; use orx_concurrent_iter::ConcurrentIter; impl UM diff --git a/src/using/computations/u_map/reduce.rs b/src/using_old/computations/u_map/reduce.rs similarity index 80% rename from src/using/computations/u_map/reduce.rs rename to src/using_old/computations/u_map/reduce.rs index b8e64dd..c062170 100644 --- a/src/using/computations/u_map/reduce.rs +++ b/src/using_old/computations/u_map/reduce.rs @@ -1,8 +1,8 @@ use crate::orch::NumSpawned; use crate::runner::{ParallelRunner, ParallelRunnerCompute}; -use crate::using::Using; -use crate::using::computations::UM; -use crate::using::runner::parallel_runner_compute::u_reduce; +use crate::using_old::Using; +use crate::using_old::computations::UM; +use crate::using_old::runner::parallel_runner_compute::u_reduce; use orx_concurrent_iter::ConcurrentIter; impl UM diff --git a/src/using/computations/u_map/tests/collect.rs b/src/using_old/computations/u_map/tests/collect.rs similarity index 92% rename from src/using/computations/u_map/tests/collect.rs rename to src/using_old/computations/u_map/tests/collect.rs index 0e89c93..e1ebf38 100644 --- a/src/using/computations/u_map/tests/collect.rs +++ b/src/using_old/computations/u_map/tests/collect.rs @@ -1,5 +1,6 @@ use crate::{ - IterationOrder, Params, runner::DefaultRunner, using::UsingClone, using::computations::UM, + IterationOrder, Params, runner::DefaultRunner, using_old::UsingClone, + using_old::computations::UM, }; use orx_concurrent_iter::IntoConcurrentIter; use orx_pinned_vec::PinnedVec; diff --git a/src/using/computations/u_map/tests/find.rs b/src/using_old/computations/u_map/tests/find.rs similarity index 96% rename from src/using/computations/u_map/tests/find.rs rename to src/using_old/computations/u_map/tests/find.rs index 8227cdb..d1e60eb 100644 --- a/src/using/computations/u_map/tests/find.rs +++ b/src/using_old/computations/u_map/tests/find.rs @@ -1,6 +1,6 @@ use crate::{ DefaultRunner, Params, - using::{UsingClone, UsingFun, computations::UM}, + using_old::{UsingClone, UsingFun, computations::UM}, }; use orx_concurrent_iter::IntoConcurrentIter; use test_case::test_matrix; diff --git a/src/using/computations/u_map/tests/mod.rs b/src/using_old/computations/u_map/tests/mod.rs similarity index 100% rename from src/using/computations/u_map/tests/mod.rs rename to src/using_old/computations/u_map/tests/mod.rs diff --git a/src/using/computations/u_map/tests/reduce.rs b/src/using_old/computations/u_map/tests/reduce.rs similarity index 96% rename from src/using/computations/u_map/tests/reduce.rs rename to src/using_old/computations/u_map/tests/reduce.rs index 222a915..f9e1189 100644 --- a/src/using/computations/u_map/tests/reduce.rs +++ b/src/using_old/computations/u_map/tests/reduce.rs @@ -1,8 +1,8 @@ use crate::{ Params, runner::DefaultRunner, - using::computations::UM, - using::{UsingClone, UsingFun}, + using_old::computations::UM, + using_old::{UsingClone, UsingFun}, }; use orx_concurrent_iter::IntoConcurrentIter; use test_case::test_matrix; diff --git a/src/using/computations/u_map/transformations.rs b/src/using_old/computations/u_map/transformations.rs similarity index 89% rename from src/using/computations/u_map/transformations.rs rename to src/using_old/computations/u_map/transformations.rs index ce559de..29932eb 100644 --- a/src/using/computations/u_map/transformations.rs +++ b/src/using_old/computations/u_map/transformations.rs @@ -1,5 +1,5 @@ -use crate::using::Using; -use crate::using::computations::UM; +use crate::using_old::Using; +use crate::using_old::computations::UM; use orx_concurrent_iter::ConcurrentIter; impl UM diff --git a/src/using/computations/u_xap/collect.rs b/src/using_old/computations/u_xap/collect.rs similarity index 93% rename from src/using/computations/u_xap/collect.rs rename to src/using_old/computations/u_xap/collect.rs index 9a01af4..5e917b3 100644 --- a/src/using/computations/u_xap/collect.rs +++ b/src/using_old/computations/u_xap/collect.rs @@ -2,9 +2,9 @@ use crate::generic_values::runner_results::{ Infallible, ParallelCollect, ParallelCollectArbitrary, }; use crate::orch::NumSpawned; -use crate::using::Using; -use crate::using::computations::UX; -use crate::using::runner::parallel_runner_compute::{u_collect_arbitrary, u_collect_ordered}; +use crate::using_old::Using; +use crate::using_old::computations::UX; +use crate::using_old::runner::parallel_runner_compute::{u_collect_arbitrary, u_collect_ordered}; use crate::{ IterationOrder, generic_values::Values, diff --git a/src/using/computations/u_xap/mod.rs b/src/using_old/computations/u_xap/mod.rs similarity index 100% rename from src/using/computations/u_xap/mod.rs rename to src/using_old/computations/u_xap/mod.rs diff --git a/src/using/computations/u_xap/next.rs b/src/using_old/computations/u_xap/next.rs similarity index 87% rename from src/using/computations/u_xap/next.rs rename to src/using_old/computations/u_xap/next.rs index 498ee79..3181dbb 100644 --- a/src/using/computations/u_xap/next.rs +++ b/src/using_old/computations/u_xap/next.rs @@ -2,9 +2,9 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::Infallible; use crate::orch::NumSpawned; use crate::runner::{ParallelRunner, ParallelRunnerCompute}; -use crate::using::Using; -use crate::using::computations::UX; -use crate::using::runner::parallel_runner_compute::{u_next, u_next_any}; +use crate::using_old::Using; +use crate::using_old::computations::UX; +use crate::using_old::runner::parallel_runner_compute::{u_next, u_next_any}; use orx_concurrent_iter::ConcurrentIter; impl UX diff --git a/src/using/computations/u_xap/reduce.rs b/src/using_old/computations/u_xap/reduce.rs similarity index 85% rename from src/using/computations/u_xap/reduce.rs rename to src/using_old/computations/u_xap/reduce.rs index ae679b1..ebfe653 100644 --- a/src/using/computations/u_xap/reduce.rs +++ b/src/using_old/computations/u_xap/reduce.rs @@ -2,9 +2,9 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::Infallible; use crate::orch::NumSpawned; use crate::runner::{ParallelRunner, ParallelRunnerCompute}; -use crate::using::Using; -use crate::using::computations::UX; -use crate::using::runner::parallel_runner_compute::u_reduce; +use crate::using_old::Using; +use crate::using_old::computations::UX; +use crate::using_old::runner::parallel_runner_compute::u_reduce; use orx_concurrent_iter::ConcurrentIter; impl UX diff --git a/src/using/computations/u_xap/tests/collect.rs b/src/using_old/computations/u_xap/tests/collect.rs similarity index 98% rename from src/using/computations/u_xap/tests/collect.rs rename to src/using_old/computations/u_xap/tests/collect.rs index 0fbddb0..3086012 100644 --- a/src/using/computations/u_xap/tests/collect.rs +++ b/src/using_old/computations/u_xap/tests/collect.rs @@ -2,7 +2,7 @@ use crate::{ IterationOrder, Params, generic_values::Vector, runner::DefaultRunner, - using::{UsingClone, computations::UX}, + using_old::{UsingClone, computations::UX}, }; use orx_concurrent_iter::IntoConcurrentIter; use orx_pinned_vec::PinnedVec; diff --git a/src/using/computations/u_xap/tests/find.rs b/src/using_old/computations/u_xap/tests/find.rs similarity index 97% rename from src/using/computations/u_xap/tests/find.rs rename to src/using_old/computations/u_xap/tests/find.rs index c206f2b..c72acbb 100644 --- a/src/using/computations/u_xap/tests/find.rs +++ b/src/using_old/computations/u_xap/tests/find.rs @@ -1,7 +1,7 @@ use crate::{ DefaultRunner, Params, generic_values::Vector, - using::{UsingClone, computations::UX}, + using_old::{UsingClone, computations::UX}, }; use orx_concurrent_iter::IntoConcurrentIter; use test_case::test_matrix; diff --git a/src/using/computations/u_xap/tests/mod.rs b/src/using_old/computations/u_xap/tests/mod.rs similarity index 100% rename from src/using/computations/u_xap/tests/mod.rs rename to src/using_old/computations/u_xap/tests/mod.rs diff --git a/src/using/computations/u_xap/tests/reduce.rs b/src/using_old/computations/u_xap/tests/reduce.rs similarity index 97% rename from src/using/computations/u_xap/tests/reduce.rs rename to src/using_old/computations/u_xap/tests/reduce.rs index cd83065..80f96ba 100644 --- a/src/using/computations/u_xap/tests/reduce.rs +++ b/src/using_old/computations/u_xap/tests/reduce.rs @@ -2,7 +2,7 @@ use crate::{ Params, generic_values::Vector, runner::DefaultRunner, - using::{UsingClone, computations::UX}, + using_old::{UsingClone, computations::UX}, }; use orx_concurrent_iter::IntoConcurrentIter; use test_case::test_matrix; diff --git a/src/using/computations/u_xap/x.rs b/src/using_old/computations/u_xap/x.rs similarity index 97% rename from src/using/computations/u_xap/x.rs rename to src/using_old/computations/u_xap/x.rs index 184b6e1..2e60ccb 100644 --- a/src/using/computations/u_xap/x.rs +++ b/src/using_old/computations/u_xap/x.rs @@ -1,4 +1,4 @@ -use crate::using::Using; +use crate::using_old::Using; use crate::{ChunkSize, IterationOrder, NumThreads, Params, generic_values::Values}; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/mod.rs b/src/using_old/mod.rs similarity index 100% rename from src/using/mod.rs rename to src/using_old/mod.rs diff --git a/src/using/runner/mod.rs b/src/using_old/runner/mod.rs similarity index 100% rename from src/using/runner/mod.rs rename to src/using_old/runner/mod.rs diff --git a/src/using/runner/parallel_runner_compute/mod.rs b/src/using_old/runner/parallel_runner_compute/mod.rs similarity index 100% rename from src/using/runner/parallel_runner_compute/mod.rs rename to src/using_old/runner/parallel_runner_compute/mod.rs diff --git a/src/using/runner/parallel_runner_compute/u_collect_arbitrary.rs b/src/using_old/runner/parallel_runner_compute/u_collect_arbitrary.rs similarity index 97% rename from src/using/runner/parallel_runner_compute/u_collect_arbitrary.rs rename to src/using_old/runner/parallel_runner_compute/u_collect_arbitrary.rs index 666b214..9f97322 100644 --- a/src/using/runner/parallel_runner_compute/u_collect_arbitrary.rs +++ b/src/using_old/runner/parallel_runner_compute/u_collect_arbitrary.rs @@ -3,10 +3,10 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{ParallelCollectArbitrary, ThreadCollectArbitrary}; use crate::orch::NumSpawned; use crate::runner::ParallelRunnerCompute; -use crate::using::Using; +use crate::using_old::Using; #[cfg(test)] -use crate::using::computations::UM; -use crate::using::computations::UX; +use crate::using_old::computations::UM; +use crate::using_old::computations::UX; use orx_concurrent_bag::ConcurrentBag; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; diff --git a/src/using/runner/parallel_runner_compute/u_collect_ordered.rs b/src/using_old/runner/parallel_runner_compute/u_collect_ordered.rs similarity index 98% rename from src/using/runner/parallel_runner_compute/u_collect_ordered.rs rename to src/using_old/runner/parallel_runner_compute/u_collect_ordered.rs index 90d893e..9421610 100644 --- a/src/using/runner/parallel_runner_compute/u_collect_ordered.rs +++ b/src/using_old/runner/parallel_runner_compute/u_collect_ordered.rs @@ -3,8 +3,8 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect, ThreadCollect}; use crate::orch::NumSpawned; use crate::runner::ParallelRunnerCompute; -use crate::using::Using; -use crate::using::computations::{UM, UX}; +use crate::using_old::Using; +use crate::using_old::computations::{UM, UX}; use orx_concurrent_iter::ConcurrentIter; use orx_concurrent_ordered_bag::ConcurrentOrderedBag; use orx_fixed_vec::IntoConcurrentPinnedVec; diff --git a/src/using/runner/parallel_runner_compute/u_next.rs b/src/using_old/runner/parallel_runner_compute/u_next.rs similarity index 98% rename from src/using/runner/parallel_runner_compute/u_next.rs rename to src/using_old/runner/parallel_runner_compute/u_next.rs index 9a61886..10a2d8f 100644 --- a/src/using/runner/parallel_runner_compute/u_next.rs +++ b/src/using_old/runner/parallel_runner_compute/u_next.rs @@ -3,8 +3,8 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; use crate::orch::NumSpawned; use crate::runner::ParallelRunnerCompute; -use crate::using::Using; -use crate::using::computations::{UM, UX}; +use crate::using_old::Using; +use crate::using_old::computations::{UM, UX}; use orx_concurrent_iter::ConcurrentIter; pub fn u_m(runner: C, m: UM) -> (NumSpawned, Option) diff --git a/src/using/runner/parallel_runner_compute/u_next_any.rs b/src/using_old/runner/parallel_runner_compute/u_next_any.rs similarity index 97% rename from src/using/runner/parallel_runner_compute/u_next_any.rs rename to src/using_old/runner/parallel_runner_compute/u_next_any.rs index 21ccad3..12e2077 100644 --- a/src/using/runner/parallel_runner_compute/u_next_any.rs +++ b/src/using_old/runner/parallel_runner_compute/u_next_any.rs @@ -1,8 +1,8 @@ use super::super::thread_runner_compute as th; use crate::generic_values::runner_results::Fallibility; use crate::orch::NumSpawned; -use crate::using::Using; -use crate::using::computations::{UM, UX}; +use crate::using_old::Using; +use crate::using_old::computations::{UM, UX}; use crate::{generic_values::Values, runner::ParallelRunnerCompute}; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/runner/parallel_runner_compute/u_reduce.rs b/src/using_old/runner/parallel_runner_compute/u_reduce.rs similarity index 98% rename from src/using/runner/parallel_runner_compute/u_reduce.rs rename to src/using_old/runner/parallel_runner_compute/u_reduce.rs index 370932b..28b67c9 100644 --- a/src/using/runner/parallel_runner_compute/u_reduce.rs +++ b/src/using_old/runner/parallel_runner_compute/u_reduce.rs @@ -3,8 +3,8 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, Reduce}; use crate::orch::NumSpawned; use crate::runner::ParallelRunnerCompute; -use crate::using::Using; -use crate::using::computations::{UM, UX}; +use crate::using_old::Using; +use crate::using_old::computations::{UM, UX}; use orx_concurrent_iter::ConcurrentIter; // m diff --git a/src/using/runner/thread_runner_compute/mod.rs b/src/using_old/runner/thread_runner_compute/mod.rs similarity index 100% rename from src/using/runner/thread_runner_compute/mod.rs rename to src/using_old/runner/thread_runner_compute/mod.rs diff --git a/src/using/runner/thread_runner_compute/u_collect_arbitrary.rs b/src/using_old/runner/thread_runner_compute/u_collect_arbitrary.rs similarity index 100% rename from src/using/runner/thread_runner_compute/u_collect_arbitrary.rs rename to src/using_old/runner/thread_runner_compute/u_collect_arbitrary.rs diff --git a/src/using/runner/thread_runner_compute/u_collect_ordered.rs b/src/using_old/runner/thread_runner_compute/u_collect_ordered.rs similarity index 100% rename from src/using/runner/thread_runner_compute/u_collect_ordered.rs rename to src/using_old/runner/thread_runner_compute/u_collect_ordered.rs diff --git a/src/using/runner/thread_runner_compute/u_next.rs b/src/using_old/runner/thread_runner_compute/u_next.rs similarity index 100% rename from src/using/runner/thread_runner_compute/u_next.rs rename to src/using_old/runner/thread_runner_compute/u_next.rs diff --git a/src/using/runner/thread_runner_compute/u_next_any.rs b/src/using_old/runner/thread_runner_compute/u_next_any.rs similarity index 100% rename from src/using/runner/thread_runner_compute/u_next_any.rs rename to src/using_old/runner/thread_runner_compute/u_next_any.rs diff --git a/src/using/runner/thread_runner_compute/u_reduce.rs b/src/using_old/runner/thread_runner_compute/u_reduce.rs similarity index 100% rename from src/using/runner/thread_runner_compute/u_reduce.rs rename to src/using_old/runner/thread_runner_compute/u_reduce.rs diff --git a/src/using/u_par_iter.rs b/src/using_old/u_par_iter.rs similarity index 99% rename from src/using/u_par_iter.rs rename to src/using_old/u_par_iter.rs index bc555b2..a7aa241 100644 --- a/src/using/u_par_iter.rs +++ b/src/using_old/u_par_iter.rs @@ -1,7 +1,7 @@ use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, Sum, orch::{DefaultOrchestrator, Orchestrator}, - using::{ + using_old::{ Using, computations::{u_map_clone, u_map_copy, u_map_count, u_reduce_sum, u_reduce_unit}, }, diff --git a/src/using/using_variants.rs b/src/using_old/using_variants.rs similarity index 100% rename from src/using/using_variants.rs rename to src/using_old/using_variants.rs From e166d1fa39407531945892a67fa00fcb3ae43cf0 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 17:17:17 +0200 Subject: [PATCH 141/264] re-init ParIterUsing --- src/lib.rs | 2 + src/using/default_fns.rs | 32 +++ src/using/mod.rs | 3 + src/using/u_par_iter.rs | 399 ++++++++++++++++++++++++++++++++++++ src/using/using_variants.rs | 70 +++++++ 5 files changed, 506 insertions(+) create mode 100644 src/using/default_fns.rs create mode 100644 src/using/mod.rs create mode 100644 src/using/u_par_iter.rs create mode 100644 src/using/using_variants.rs diff --git a/src/lib.rs b/src/lib.rs index b304395..8a4e6d4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -41,6 +41,8 @@ mod parameters; pub mod runner; mod special_type_sets; /// Module defining parallel iterators with mutable access to values distributed to each thread. +pub mod using; +/// Module defining parallel iterators with mutable access to values distributed to each thread. pub mod using_old; /// Module defining the GenericIterator which is a generalization over diff --git a/src/using/default_fns.rs b/src/using/default_fns.rs new file mode 100644 index 0000000..84d9739 --- /dev/null +++ b/src/using/default_fns.rs @@ -0,0 +1,32 @@ +use std::ops::Add; + +#[inline(always)] +pub fn u_map_self(_: &mut U, input: T) -> T { + input +} + +#[inline(always)] +pub fn u_map_copy(_: &mut U, x: &T) -> T { + *x +} + +#[inline(always)] +pub fn u_map_clone(_: &mut U, x: &T) -> T { + x.clone() +} + +#[inline(always)] +pub fn u_map_count(_: &mut U, _: T) -> usize { + 1 +} + +#[inline(always)] +pub fn u_reduce_sum(_: &mut U, a: T, b: T) -> T +where + T: Add, +{ + a + b +} + +#[inline(always)] +pub fn u_reduce_unit(_: &mut U, _: (), _: ()) {} diff --git a/src/using/mod.rs b/src/using/mod.rs new file mode 100644 index 0000000..e105856 --- /dev/null +++ b/src/using/mod.rs @@ -0,0 +1,3 @@ +mod default_fns; +mod u_par_iter; +mod using_variants; diff --git a/src/using/u_par_iter.rs b/src/using/u_par_iter.rs new file mode 100644 index 0000000..da6eb19 --- /dev/null +++ b/src/using/u_par_iter.rs @@ -0,0 +1,399 @@ +use super::default_fns::*; +use crate::{ + ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, Sum, + orch::{DefaultOrchestrator, Orchestrator}, + using::using_variants::Using, +}; +use core::cmp::Ordering; +use orx_concurrent_iter::ConcurrentIter; + +/// Parallel iterator which allows mutable access to a variable of type `U` within its iterator methods. +/// +/// Note that one variable will be created per thread used by the parallel computation. +pub trait ParIterUsing: Sized + Send + Sync +where + R: Orchestrator, + U: Using, +{ + /// Element type of the parallel iterator. + type Item; + + /// Returns a reference to the input concurrent iterator. + fn con_iter(&self) -> &impl ConcurrentIter; + + /// Parameters of the parallel iterator. + /// + /// See [crate::ParIter::params] for details. + fn params(&self) -> Params; + + // params transformations + + /// Sets the number of threads to be used in the parallel execution. + /// Integers can be used as the argument with the following mapping: + /// + /// * `0` -> `NumThreads::Auto` + /// * `1` -> `NumThreads::sequential()` + /// * `n > 0` -> `NumThreads::Max(n)` + /// + /// /// Parameters of the parallel iterator. + /// + /// See [crate::ParIter::num_threads] for details. + fn num_threads(self, num_threads: impl Into) -> Self; + + /// Sets the number of elements to be pulled from the concurrent iterator during the + /// parallel execution. When integers are used as argument, the following mapping applies: + /// + /// * `0` -> `ChunkSize::Auto` + /// * `n > 0` -> `ChunkSize::Exact(n)` + /// + /// Please use the default enum constructor for creating `ChunkSize::Min` variant. + /// + /// See [crate::ParIter::chunk_size] for details. + fn chunk_size(self, chunk_size: impl Into) -> Self; + + /// Sets the iteration order of the parallel computation. + /// + /// See [crate::ParIter::iteration_order] for details. + fn iteration_order(self, collect: IterationOrder) -> Self; + + /// Rather than the [`DefaultOrchestrator`], uses the parallel runner `Q` which implements [`Orchestrator`]. + /// + /// See [crate::ParIter::with_runner] for details. + fn with_runner(self) -> impl ParIterUsing; + + // computation transformations + + /// Takes a closure `map` and creates a parallel iterator which calls that closure on each element. + /// + /// Unlike [crate::ParIter::map], the closure allows access to mutable reference of the used variable. + /// + /// Please see [`crate::ParIter::using`] transformation for details and examples. + /// + /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). + fn map(self, map: Map) -> impl ParIterUsing + where + Map: Fn(&mut U::Item, Self::Item) -> Out + Sync + Clone; + + /// Creates an iterator which uses a closure `filter` to determine if an element should be yielded. + /// + /// Unlike [crate::ParIter::filter], the closure allows access to mutable reference of the used variable. + /// + /// Please see [`crate::ParIter::using`] transformation for details and examples. + /// + /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). + fn filter(self, filter: Filter) -> impl ParIterUsing + where + Filter: Fn(&mut U::Item, &Self::Item) -> bool + Sync + Clone; + + /// Creates an iterator that works like map, but flattens nested structure. + /// + /// Unlike [crate::ParIter::flat_map], the closure allows access to mutable reference of the used variable. + /// + /// Please see [`crate::ParIter::using`] transformation for details and examples. + /// + /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). + fn flat_map( + self, + flat_map: FlatMap, + ) -> impl ParIterUsing + where + IOut: IntoIterator, + FlatMap: Fn(&mut U::Item, Self::Item) -> IOut + Sync + Clone; + + /// Creates an iterator that both filters and maps. + /// + /// The returned iterator yields only the values for which the supplied closure `filter_map` returns `Some(value)`. + /// + /// `filter_map` can be used to make chains of `filter` and `map` more concise. + /// The example below shows how a `map().filter().map()` can be shortened to a single call to `filter_map`. + /// + /// Unlike [crate::ParIter::filter_map], the closure allows access to mutable reference of the used variable. + /// + /// Please see [`crate::ParIter::using`] transformation for details and examples. + /// + /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). + fn filter_map( + self, + filter_map: FilterMap, + ) -> impl ParIterUsing + where + FilterMap: Fn(&mut U::Item, Self::Item) -> Option + Sync + Clone; + + /// Does something with each element of an iterator, passing the value on. + /// + /// Unlike [crate::ParIter::inspect], the closure allows access to mutable reference of the used variable. + /// + /// Please see [`crate::ParIter::using`] transformation for details and examples. + /// + /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). + fn inspect(self, operation: Operation) -> impl ParIterUsing + where + Operation: Fn(&mut U::Item, &Self::Item) + Sync + Clone, + { + let map = move |u: &mut U::Item, x: Self::Item| { + operation(u, &x); + x + }; + self.map(map) + } + + // special item transformations + + /// Creates an iterator which copies all of its elements. + /// + /// Unlike [crate::ParIter::copied], the closure allows access to mutable reference of the used variable. + /// + /// Please see [`crate::ParIter::using`] transformation for details and examples. + /// + /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). + fn copied<'a, T>(self) -> impl ParIterUsing + where + T: 'a + Copy, + Self: ParIterUsing, + { + self.map(u_map_copy) + } + + /// Creates an iterator which clones all of its elements. + /// + /// Unlike [crate::ParIter::cloned], the closure allows access to mutable reference of the used variable. + /// + /// Please see [`crate::ParIter::using`] transformation for details and examples. + /// + /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). + fn cloned<'a, T>(self) -> impl ParIterUsing + where + T: 'a + Clone, + Self: ParIterUsing, + { + self.map(u_map_clone) + } + + /// Creates an iterator that flattens nested structure. + /// + /// Unlike [crate::ParIter::flatten], the closure allows access to mutable reference of the used variable. + /// + /// Please see [`crate::ParIter::using`] transformation for details and examples. + /// + /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). + fn flatten(self) -> impl ParIterUsing::Item> + where + Self::Item: IntoIterator, + { + let map = |_: &mut U::Item, e: Self::Item| e.into_iter(); + self.flat_map(map) + } + + // collect + + /// Collects all the items from an iterator into a collection. + /// + /// Unlike [crate::ParIter::collect_into], the closure allows access to mutable reference of the used variable. + /// + /// Please see [`crate::ParIter::using`] transformation for details and examples. + /// + /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). + fn collect_into(self, output: C) -> C + where + C: ParCollectInto; + + /// Transforms an iterator into a collection. + /// + /// Unlike [crate::ParIter::collect], the closure allows access to mutable reference of the used variable. + /// + /// Please see [`crate::ParIter::using`] transformation for details and examples. + /// + /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). + fn collect(self) -> C + where + C: ParCollectInto, + { + let output = C::empty(self.con_iter().try_get_len()); + self.collect_into(output) + } + + // reduce + + /// Reduces the elements to a single one, by repeatedly applying a reducing operation. + /// + /// See the details here: [crate::ParIter::reduce]. + fn reduce(self, reduce: Reduce) -> Option + where + Self::Item: Send, + Reduce: Fn(&mut U::Item, Self::Item, Self::Item) -> Self::Item + Sync; + + /// Tests if every element of the iterator matches a predicate. + /// + /// Unlike [crate::ParIter::all], the closure allows access to mutable reference of the used variable. + /// + /// Please see [`crate::ParIter::using`] transformation for details and examples. + /// + /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). + fn all(self, predicate: Predicate) -> bool + where + Self::Item: Send, + Predicate: Fn(&mut U::Item, &Self::Item) -> bool + Sync + Clone, + { + let violates = |u: &mut U::Item, x: &Self::Item| !predicate(u, x); + self.find(violates).is_none() + } + + /// Tests if any element of the iterator matches a predicate. + /// + /// Unlike [crate::ParIter::any], the closure allows access to mutable reference of the used variable. + /// + /// Please see [`crate::ParIter::using`] transformation for details and examples. + /// + /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). + fn any(self, predicate: Predicate) -> bool + where + Self::Item: Send, + Predicate: Fn(&mut U::Item, &Self::Item) -> bool + Sync + Clone, + { + self.find(predicate).is_some() + } + + /// Consumes the iterator, counting the number of iterations and returning it. + /// + /// See the details here: [crate::ParIter::count]. + fn count(self) -> usize { + self.map(u_map_count).reduce(u_reduce_sum).unwrap_or(0) + } + + /// Calls a closure on each element of an iterator. + /// + /// Unlike [crate::ParIter::for_each], the closure allows access to mutable reference of the used variable. + /// + /// Please see [`crate::ParIter::using`] transformation for details and examples. + /// + /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). + fn for_each(self, operation: Operation) + where + Operation: Fn(&mut U::Item, Self::Item) + Sync, + { + let map = |u: &mut U::Item, x| operation(u, x); + let _ = self.map(map).reduce(u_reduce_unit); + } + + /// Returns the maximum element of an iterator. + /// + /// See the details here: [crate::ParIter::max]. + fn max(self) -> Option + where + Self::Item: Ord + Send, + { + self.reduce(|_, a, b| Ord::max(a, b)) + } + + /// Returns the element that gives the maximum value with respect to the specified `compare` function. + /// + /// See the details here: [crate::ParIter::max_by]. + fn max_by(self, compare: Compare) -> Option + where + Self::Item: Send, + Compare: Fn(&Self::Item, &Self::Item) -> Ordering + Sync, + { + let reduce = |_: &mut U::Item, x, y| match compare(&x, &y) { + Ordering::Greater | Ordering::Equal => x, + Ordering::Less => y, + }; + self.reduce(reduce) + } + + /// Returns the element that gives the maximum value from the specified function. + /// + /// See the details here: [crate::ParIter::max_by_key]. + fn max_by_key(self, key: GetKey) -> Option + where + Self::Item: Send, + Key: Ord, + GetKey: Fn(&Self::Item) -> Key + Sync, + { + let reduce = |_: &mut U::Item, x, y| match key(&x).cmp(&key(&y)) { + Ordering::Greater | Ordering::Equal => x, + Ordering::Less => y, + }; + self.reduce(reduce) + } + + /// Returns the minimum element of an iterator. + /// + /// See the details here: [crate::ParIter::min]. + fn min(self) -> Option + where + Self::Item: Ord + Send, + { + self.reduce(|_, a, b| Ord::min(a, b)) + } + + /// Returns the element that gives the minimum value with respect to the specified `compare` function. + /// + /// See the details here: [crate::ParIter::min_by]. + fn min_by(self, compare: Compare) -> Option + where + Self::Item: Send, + Compare: Fn(&Self::Item, &Self::Item) -> Ordering + Sync, + { + let reduce = |_: &mut U::Item, x, y| match compare(&x, &y) { + Ordering::Less | Ordering::Equal => x, + Ordering::Greater => y, + }; + self.reduce(reduce) + } + + /// Returns the element that gives the minimum value from the specified function. + /// + /// See the details here: [crate::ParIter::min_by_key]. + fn min_by_key(self, get_key: GetKey) -> Option + where + Self::Item: Send, + Key: Ord, + GetKey: Fn(&Self::Item) -> Key + Sync, + { + let reduce = |_: &mut U::Item, x, y| match get_key(&x).cmp(&get_key(&y)) { + Ordering::Less | Ordering::Equal => x, + Ordering::Greater => y, + }; + self.reduce(reduce) + } + + /// Sums the elements of an iterator. + /// + /// See the details here: [crate::ParIter::sum]. + fn sum(self) -> Out + where + Self::Item: Sum, + Out: Send, + { + self.map(Self::Item::u_map) + .reduce(Self::Item::u_reduce) + .unwrap_or(Self::Item::zero()) + } + + // early exit + + /// Returns the first (or any) element of the iterator; returns None if it is empty. + /// + /// * first element is returned if default iteration order `IterationOrder::Ordered` is used, + /// * any element is returned if `IterationOrder::Arbitrary` is set. + /// + /// See the details here: [crate::ParIter::first]. + fn first(self) -> Option + where + Self::Item: Send; + + /// Searches for an element of an iterator that satisfies a `predicate`. + /// + /// Unlike [crate::ParIter::find], the closure allows access to mutable reference of the used variable. + /// + /// Please see [`crate::ParIter::using`] transformation for details and examples. + /// + /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). + fn find(self, predicate: Predicate) -> Option + where + Self::Item: Send, + Predicate: Fn(&mut U::Item, &Self::Item) -> bool + Sync, + { + self.filter(&predicate).first() + } +} diff --git a/src/using/using_variants.rs b/src/using/using_variants.rs new file mode 100644 index 0000000..5489d23 --- /dev/null +++ b/src/using/using_variants.rs @@ -0,0 +1,70 @@ +/// A type that can [`create`] a value per thread, which will then be send to the thread, +/// and used mutable by the defined computation. +/// +/// [`create`]: crate::using::Using::create +pub trait Using { + /// Item to be used mutably by each threads used in parallel computation. + type Item: Send + 'static; + + /// Creates an instance of the variable to be used by the `thread_idx`-th thread. + fn create(&mut self, thread_idx: usize) -> Self::Item; + + /// Consumes self and creates exactly one instance of the variable. + fn into_inner(self) -> Self::Item; +} + +/// Using variant that creates instances of each thread by cloning an initial value. +pub struct UsingClone(T); + +impl UsingClone { + pub(crate) fn new(value: T) -> Self { + Self(value) + } +} + +impl Using for UsingClone { + type Item = T; + + fn create(&mut self, _: usize) -> T { + self.0.clone() + } + + fn into_inner(self) -> Self::Item { + self.0 + } +} + +/// Using variant that creates instances of each thread using a closure. +pub struct UsingFun +where + T: Send + 'static, + F: FnMut(usize) -> T, +{ + fun: F, +} + +impl UsingFun +where + T: Send + 'static, + F: FnMut(usize) -> T, +{ + pub(crate) fn new(fun: F) -> Self { + Self { fun } + } +} + +impl Using for UsingFun +where + T: Send + 'static, + F: FnMut(usize) -> T, +{ + type Item = T; + + fn create(&mut self, thread_idx: usize) -> Self::Item { + (self.fun)(thread_idx) + } + + fn into_inner(mut self) -> Self::Item { + (self.fun)(0) + } +} From 224338243b52d239fbeba8e6c06d19ca3d5c13e3 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 17:19:47 +0200 Subject: [PATCH 142/264] export ParIterUsing --- src/computational_variants/map.rs | 6 ++--- src/computational_variants/par.rs | 6 ++--- src/computational_variants/xap.rs | 6 ++--- src/lib.rs | 3 ++- src/par_iter.rs | 6 ++--- src/using/mod.rs | 2 ++ src/using_old/computational_variants/u_map.rs | 15 ++++++----- src/using_old/computational_variants/u_par.rs | 14 +++++----- src/using_old/computational_variants/u_xap.rs | 14 +++++----- src/using_old/mod.rs | 2 +- src/using_old/u_par_iter.rs | 27 ++++++++++--------- 11 files changed, 54 insertions(+), 47 deletions(-) diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index 66ce165..fc781b6 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -6,7 +6,7 @@ use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; use crate::runner::parallel_runner_compute as prc; use crate::{ - ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsing, Params, + ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsingOld, Params, using_old::{UsingClone, UsingFun, computational_variants::UParMap}, }; use orx_concurrent_iter::ConcurrentIter; @@ -103,7 +103,7 @@ where fn using( self, using: F, - ) -> impl ParIterUsing, R, Item = >::Item> + ) -> impl ParIterUsingOld, R, Item = >::Item> where U: Send + 'static, F: FnMut(usize) -> U, @@ -117,7 +117,7 @@ where fn using_clone( self, using: U, - ) -> impl ParIterUsing, R, Item = >::Item> + ) -> impl ParIterUsingOld, R, Item = >::Item> where U: Clone + Send + 'static, { diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 37c1c3e..9220757 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -5,7 +5,7 @@ use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; use crate::runner::parallel_runner_compute as prc; use crate::{ - ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsing, Params, + ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsingOld, Params, computations::map_self, using_old::{UsingClone, UsingFun, computational_variants::UPar}, }; @@ -100,7 +100,7 @@ where fn using( self, using: F, - ) -> impl ParIterUsing, R, Item = >::Item> + ) -> impl ParIterUsingOld, R, Item = >::Item> where U: Send + 'static, F: FnMut(usize) -> U, @@ -112,7 +112,7 @@ where fn using_clone( self, using: U, - ) -> impl ParIterUsing, R, Item = >::Item> + ) -> impl ParIterUsingOld, R, Item = >::Item> where U: Clone + Send + 'static, { diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index a15ec48..9f3cbdd 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -6,7 +6,7 @@ use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; use crate::runner::parallel_runner_compute as prc; use crate::{ - ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsing, Params, + ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsingOld, Params, using_old::{UsingClone, UsingFun, computational_variants::UParXap}, }; use orx_concurrent_iter::ConcurrentIter; @@ -110,7 +110,7 @@ where fn using( self, using: F, - ) -> impl ParIterUsing, R, Item = >::Item> + ) -> impl ParIterUsingOld, R, Item = >::Item> where U: Send + 'static, F: FnMut(usize) -> U, @@ -124,7 +124,7 @@ where fn using_clone( self, using: U, - ) -> impl ParIterUsing, R, Item = >::Item> + ) -> impl ParIterUsingOld, R, Item = >::Item> where U: Clone + Send + 'static, { diff --git a/src/lib.rs b/src/lib.rs index 8a4e6d4..68810a5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -70,4 +70,5 @@ pub use parallelizable_collection_mut::ParallelizableCollectionMut; pub use parameters::{ChunkSize, IterationOrder, NumThreads, Params}; pub use runner::{DefaultRunner, ParallelRunner, ThreadRunner}; pub use special_type_sets::Sum; -pub use using_old::ParIterUsing; +pub use using::ParIterUsing; +pub use using_old::ParIterUsingOld; diff --git a/src/par_iter.rs b/src/par_iter.rs index 3a1b04a..cf29837 100644 --- a/src/par_iter.rs +++ b/src/par_iter.rs @@ -5,7 +5,7 @@ use crate::par_iter_option::{IntoOption, ParIterOption}; use crate::par_iter_result::IntoResult; use crate::using_old::{UsingClone, UsingFun}; use crate::{ - ParIterUsing, Params, + ParIterUsingOld, Params, collect_into::ParCollectInto, computations::{map_clone, map_copy, map_count, reduce_sum, reduce_unit}, parameters::{ChunkSize, IterationOrder, NumThreads}, @@ -475,7 +475,7 @@ where fn using( self, using: F, - ) -> impl ParIterUsing, R, Item = >::Item> + ) -> impl ParIterUsingOld, R, Item = >::Item> where U: Send + 'static, F: FnMut(usize) -> U; @@ -496,7 +496,7 @@ where fn using_clone( self, value: U, - ) -> impl ParIterUsing, R, Item = >::Item> + ) -> impl ParIterUsingOld, R, Item = >::Item> where U: Clone + Send + 'static; diff --git a/src/using/mod.rs b/src/using/mod.rs index e105856..457897f 100644 --- a/src/using/mod.rs +++ b/src/using/mod.rs @@ -1,3 +1,5 @@ mod default_fns; mod u_par_iter; mod using_variants; + +pub use u_par_iter::ParIterUsing; diff --git a/src/using_old/computational_variants/u_map.rs b/src/using_old/computational_variants/u_map.rs index d5cf22f..31c9ea2 100644 --- a/src/using_old/computational_variants/u_map.rs +++ b/src/using_old/computational_variants/u_map.rs @@ -3,7 +3,8 @@ use crate::{ generic_values::Vector, orch::{DefaultOrchestrator, Orchestrator}, using_old::{ - Using, computational_variants::u_xap::UParXap, computations::UM, u_par_iter::ParIterUsing, + Using, computational_variants::u_xap::UParXap, computations::UM, + u_par_iter::ParIterUsingOld, }, }; use orx_concurrent_iter::ConcurrentIter; @@ -58,7 +59,7 @@ where { } -impl ParIterUsing for UParMap +impl ParIterUsingOld for UParMap where R: Orchestrator, U: Using, @@ -92,14 +93,14 @@ where self } - fn with_runner(self) -> impl ParIterUsing { + fn with_runner(self) -> impl ParIterUsingOld { let (using, params, iter, map) = self.destruct(); UParMap::new(using, params, iter, map) } // computation transformations - fn map(self, map: Map) -> impl ParIterUsing + fn map(self, map: Map) -> impl ParIterUsingOld where Map: Fn(&mut U::Item, Self::Item) -> Out + Sync + Clone, { @@ -111,7 +112,7 @@ where UParMap::new(using, params, iter, m1) } - fn filter(self, filter: Filter) -> impl ParIterUsing + fn filter(self, filter: Filter) -> impl ParIterUsingOld where Filter: Fn(&mut U::Item, &Self::Item) -> bool + Sync + Clone, { @@ -128,7 +129,7 @@ where fn flat_map( self, flat_map: FlatMap, - ) -> impl ParIterUsing + ) -> impl ParIterUsingOld where IOut: IntoIterator, FlatMap: Fn(&mut U::Item, Self::Item) -> IOut + Sync + Clone, @@ -144,7 +145,7 @@ where fn filter_map( self, filter_map: FilterMap, - ) -> impl ParIterUsing + ) -> impl ParIterUsingOld where FilterMap: Fn(&mut U::Item, Self::Item) -> Option + Sync + Clone, { diff --git a/src/using_old/computational_variants/u_par.rs b/src/using_old/computational_variants/u_par.rs index d417b42..70bb11c 100644 --- a/src/using_old/computational_variants/u_par.rs +++ b/src/using_old/computational_variants/u_par.rs @@ -6,7 +6,7 @@ use crate::{ Using, computational_variants::{u_map::UParMap, u_xap::UParXap}, computations::{UM, u_map_self}, - u_par_iter::ParIterUsing, + u_par_iter::ParIterUsingOld, }, }; use orx_concurrent_iter::ConcurrentIter; @@ -67,7 +67,7 @@ where { } -impl ParIterUsing for UPar +impl ParIterUsingOld for UPar where U: Using, R: Orchestrator, @@ -100,13 +100,13 @@ where self } - fn with_runner(self) -> impl ParIterUsing { + fn with_runner(self) -> impl ParIterUsingOld { UPar::new(self.using, self.params, self.iter) } // computational transformations - fn map(self, map: Map) -> impl ParIterUsing + fn map(self, map: Map) -> impl ParIterUsingOld where Map: Fn(&mut ::Item, Self::Item) -> Out + Sync + Clone, { @@ -115,7 +115,7 @@ where UParMap::new(using, params, iter, map) } - fn filter(self, filter: Filter) -> impl ParIterUsing + fn filter(self, filter: Filter) -> impl ParIterUsingOld where Filter: Fn(&mut U::Item, &Self::Item) -> bool + Sync + Clone, { @@ -127,7 +127,7 @@ where fn flat_map( self, flat_map: FlatMap, - ) -> impl ParIterUsing + ) -> impl ParIterUsingOld where IOut: IntoIterator, FlatMap: Fn(&mut U::Item, Self::Item) -> IOut + Sync + Clone, @@ -140,7 +140,7 @@ where fn filter_map( self, filter_map: FilterMap, - ) -> impl ParIterUsing + ) -> impl ParIterUsingOld where FilterMap: Fn(&mut ::Item, Self::Item) -> Option + Sync + Clone, { diff --git a/src/using_old/computational_variants/u_xap.rs b/src/using_old/computational_variants/u_xap.rs index d3848a2..3875e79 100644 --- a/src/using_old/computational_variants/u_xap.rs +++ b/src/using_old/computational_variants/u_xap.rs @@ -2,7 +2,7 @@ use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, generic_values::{TransformableValues, runner_results::Infallible}, orch::{DefaultOrchestrator, Orchestrator}, - using_old::{Using, computations::UX, u_par_iter::ParIterUsing}, + using_old::{Using, computations::UX, u_par_iter::ParIterUsingOld}, }; use orx_concurrent_iter::ConcurrentIter; use std::marker::PhantomData; @@ -62,7 +62,7 @@ where { } -impl ParIterUsing for UParXap +impl ParIterUsingOld for UParXap where R: Orchestrator, U: Using, @@ -97,14 +97,14 @@ where self } - fn with_runner(self) -> impl ParIterUsing { + fn with_runner(self) -> impl ParIterUsingOld { let (using, params, iter, map1) = self.destruct(); UParXap::new(using, params, iter, map1) } // computation transformations - fn map(self, map: Map) -> impl ParIterUsing + fn map(self, map: Map) -> impl ParIterUsingOld where Map: Fn(&mut U::Item, Self::Item) -> Out + Sync + Clone, { @@ -127,7 +127,7 @@ where UParXap::new(using, params, iter, x1) } - fn filter(self, filter: Filter) -> impl ParIterUsing + fn filter(self, filter: Filter) -> impl ParIterUsingOld where Filter: Fn(&mut U::Item, &Self::Item) -> bool + Sync + Clone, { @@ -151,7 +151,7 @@ where fn flat_map( self, flat_map: FlatMap, - ) -> impl ParIterUsing + ) -> impl ParIterUsingOld where IOut: IntoIterator, FlatMap: Fn(&mut U::Item, Self::Item) -> IOut + Sync + Clone, @@ -176,7 +176,7 @@ where fn filter_map( self, filter_map: FilterMap, - ) -> impl ParIterUsing + ) -> impl ParIterUsingOld where FilterMap: Fn(&mut U::Item, Self::Item) -> Option + Sync + Clone, { diff --git a/src/using_old/mod.rs b/src/using_old/mod.rs index 2298834..22df421 100644 --- a/src/using_old/mod.rs +++ b/src/using_old/mod.rs @@ -7,5 +7,5 @@ mod u_par_iter; mod using_variants; pub(crate) use collect_into::UParCollectIntoCore; -pub use u_par_iter::ParIterUsing; +pub use u_par_iter::ParIterUsingOld; pub use using_variants::{Using, UsingClone, UsingFun}; diff --git a/src/using_old/u_par_iter.rs b/src/using_old/u_par_iter.rs index a7aa241..f64d42c 100644 --- a/src/using_old/u_par_iter.rs +++ b/src/using_old/u_par_iter.rs @@ -12,7 +12,7 @@ use orx_concurrent_iter::ConcurrentIter; /// Parallel iterator which allows mutable access to a variable of type `U` within its iterator methods. /// /// Note that one variable will be created per thread used by the parallel computation. -pub trait ParIterUsing: Sized + Send + Sync +pub trait ParIterUsingOld: Sized + Send + Sync where R: Orchestrator, U: Using, @@ -61,7 +61,7 @@ where /// Rather than the [`DefaultOrchestrator`], uses the parallel runner `Q` which implements [`Orchestrator`]. /// /// See [crate::ParIter::with_runner] for details. - fn with_runner(self) -> impl ParIterUsing; + fn with_runner(self) -> impl ParIterUsingOld; // computation transformations @@ -72,7 +72,7 @@ where /// Please see [`crate::ParIter::using`] transformation for details and examples. /// /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn map(self, map: Map) -> impl ParIterUsing + fn map(self, map: Map) -> impl ParIterUsingOld where Map: Fn(&mut U::Item, Self::Item) -> Out + Sync + Clone; @@ -83,7 +83,7 @@ where /// Please see [`crate::ParIter::using`] transformation for details and examples. /// /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn filter(self, filter: Filter) -> impl ParIterUsing + fn filter(self, filter: Filter) -> impl ParIterUsingOld where Filter: Fn(&mut U::Item, &Self::Item) -> bool + Sync + Clone; @@ -97,7 +97,7 @@ where fn flat_map( self, flat_map: FlatMap, - ) -> impl ParIterUsing + ) -> impl ParIterUsingOld where IOut: IntoIterator, FlatMap: Fn(&mut U::Item, Self::Item) -> IOut + Sync + Clone; @@ -117,7 +117,7 @@ where fn filter_map( self, filter_map: FilterMap, - ) -> impl ParIterUsing + ) -> impl ParIterUsingOld where FilterMap: Fn(&mut U::Item, Self::Item) -> Option + Sync + Clone; @@ -128,7 +128,10 @@ where /// Please see [`crate::ParIter::using`] transformation for details and examples. /// /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn inspect(self, operation: Operation) -> impl ParIterUsing + fn inspect( + self, + operation: Operation, + ) -> impl ParIterUsingOld where Operation: Fn(&mut U::Item, &Self::Item) + Sync + Clone, { @@ -148,10 +151,10 @@ where /// Please see [`crate::ParIter::using`] transformation for details and examples. /// /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn copied<'a, T>(self) -> impl ParIterUsing + fn copied<'a, T>(self) -> impl ParIterUsingOld where T: 'a + Copy, - Self: ParIterUsing, + Self: ParIterUsingOld, { self.map(u_map_copy) } @@ -163,10 +166,10 @@ where /// Please see [`crate::ParIter::using`] transformation for details and examples. /// /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn cloned<'a, T>(self) -> impl ParIterUsing + fn cloned<'a, T>(self) -> impl ParIterUsingOld where T: 'a + Clone, - Self: ParIterUsing, + Self: ParIterUsingOld, { self.map(u_map_clone) } @@ -178,7 +181,7 @@ where /// Please see [`crate::ParIter::using`] transformation for details and examples. /// /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn flatten(self) -> impl ParIterUsing::Item> + fn flatten(self) -> impl ParIterUsingOld::Item> where Self::Item: IntoIterator, { From 06289673b29bd2d2b524e7046b65cca4aa78cef4 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 17:40:27 +0200 Subject: [PATCH 143/264] default-fn mod reorganization --- src/computational_variants/par.rs | 2 +- src/computational_variants/tests/map/find.rs | 2 +- .../tests/map/reduce.rs | 2 +- src/computations/default_fns.rs | 32 ------------------ src/computations/mod.rs | 2 -- src/{using => }/default_fns.rs | 33 +++++++++++++++++++ src/lib.rs | 1 + src/par_iter.rs | 2 +- src/par_iter_option.rs | 2 +- src/par_iter_result.rs | 2 +- src/using/mod.rs | 1 - src/using/u_par_iter.rs | 2 +- 12 files changed, 41 insertions(+), 42 deletions(-) delete mode 100644 src/computations/default_fns.rs rename src/{using => }/default_fns.rs (55%) diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 9220757..630a730 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -6,7 +6,7 @@ use crate::par_iter_result::IntoResult; use crate::runner::parallel_runner_compute as prc; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsingOld, Params, - computations::map_self, + default_fns::map_self, using_old::{UsingClone, UsingFun, computational_variants::UPar}, }; use crate::{IntoParIter, ParIterResult}; diff --git a/src/computational_variants/tests/map/find.rs b/src/computational_variants/tests/map/find.rs index 4421b8f..fcf30c5 100644 --- a/src/computational_variants/tests/map/find.rs +++ b/src/computational_variants/tests/map/find.rs @@ -1,5 +1,5 @@ use crate::{ - Params, computations::map_self, orch::DefaultOrchestrator, runner::parallel_runner_compute, + Params, default_fns::map_self, orch::DefaultOrchestrator, runner::parallel_runner_compute, }; use alloc::format; use alloc::string::{String, ToString}; diff --git a/src/computational_variants/tests/map/reduce.rs b/src/computational_variants/tests/map/reduce.rs index 11b4784..95036d2 100644 --- a/src/computational_variants/tests/map/reduce.rs +++ b/src/computational_variants/tests/map/reduce.rs @@ -1,5 +1,5 @@ use crate::{ - Params, computations::map_self, orch::DefaultOrchestrator, runner::parallel_runner_compute, + Params, default_fns::map_self, orch::DefaultOrchestrator, runner::parallel_runner_compute, }; use alloc::format; use alloc::string::{String, ToString}; diff --git a/src/computations/default_fns.rs b/src/computations/default_fns.rs deleted file mode 100644 index b2e4bf7..0000000 --- a/src/computations/default_fns.rs +++ /dev/null @@ -1,32 +0,0 @@ -use std::ops::Add; - -#[inline(always)] -pub fn map_self(input: T) -> T { - input -} - -#[inline(always)] -pub fn map_count(_: T) -> usize { - 1 -} - -#[inline(always)] -pub fn map_copy(x: &T) -> T { - *x -} - -#[inline(always)] -pub fn map_clone(x: &T) -> T { - x.clone() -} - -#[inline(always)] -pub fn reduce_sum(a: T, b: T) -> T -where - T: Add, -{ - a + b -} - -#[inline(always)] -pub fn reduce_unit(_: (), _: ()) {} diff --git a/src/computations/mod.rs b/src/computations/mod.rs index dd6785d..7c6b281 100644 --- a/src/computations/mod.rs +++ b/src/computations/mod.rs @@ -1,5 +1,3 @@ -mod default_fns; mod heap_sort; -pub(crate) use default_fns::*; pub(crate) use heap_sort::heap_sort_into; diff --git a/src/using/default_fns.rs b/src/default_fns.rs similarity index 55% rename from src/using/default_fns.rs rename to src/default_fns.rs index 84d9739..daa9d11 100644 --- a/src/using/default_fns.rs +++ b/src/default_fns.rs @@ -1,5 +1,38 @@ use std::ops::Add; +#[inline(always)] +pub fn map_self(input: T) -> T { + input +} + +#[inline(always)] +pub fn map_count(_: T) -> usize { + 1 +} + +#[inline(always)] +pub fn map_copy(x: &T) -> T { + *x +} + +#[inline(always)] +pub fn map_clone(x: &T) -> T { + x.clone() +} + +#[inline(always)] +pub fn reduce_sum(a: T, b: T) -> T +where + T: Add, +{ + a + b +} + +#[inline(always)] +pub fn reduce_unit(_: (), _: ()) {} + +// using + #[inline(always)] pub fn u_map_self(_: &mut U, input: T) -> T { input diff --git a/src/lib.rs b/src/lib.rs index 68810a5..fa0f744 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -21,6 +21,7 @@ mod collect_into; /// Module containing variants of parallel iterators. pub mod computational_variants; mod computations; +mod default_fns; mod env; mod generic_values; mod into_par_iter; diff --git a/src/par_iter.rs b/src/par_iter.rs index cf29837..d3169cf 100644 --- a/src/par_iter.rs +++ b/src/par_iter.rs @@ -7,7 +7,7 @@ use crate::using_old::{UsingClone, UsingFun}; use crate::{ ParIterUsingOld, Params, collect_into::ParCollectInto, - computations::{map_clone, map_copy, map_count, reduce_sum, reduce_unit}, + default_fns::{map_clone, map_copy, map_count, reduce_sum, reduce_unit}, parameters::{ChunkSize, IterationOrder, NumThreads}, special_type_sets::Sum, }; diff --git a/src/par_iter_option.rs b/src/par_iter_option.rs index 6ac92bb..94e2df3 100644 --- a/src/par_iter_option.rs +++ b/src/par_iter_option.rs @@ -1,4 +1,4 @@ -use crate::computations::{map_count, reduce_sum, reduce_unit}; +use crate::default_fns::{map_count, reduce_sum, reduce_unit}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, Sum}; use core::cmp::Ordering; diff --git a/src/par_iter_result.rs b/src/par_iter_result.rs index c23647e..fc336da 100644 --- a/src/par_iter_result.rs +++ b/src/par_iter_result.rs @@ -1,4 +1,4 @@ -use crate::computations::{map_count, reduce_sum, reduce_unit}; +use crate::default_fns::{map_count, reduce_sum, reduce_unit}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::{ChunkSize, IterationOrder, NumThreads, Sum}; use crate::{ParCollectInto, ParIter, generic_values::fallible_iterators::ResultOfIter}; diff --git a/src/using/mod.rs b/src/using/mod.rs index 457897f..ff44a01 100644 --- a/src/using/mod.rs +++ b/src/using/mod.rs @@ -1,4 +1,3 @@ -mod default_fns; mod u_par_iter; mod using_variants; diff --git a/src/using/u_par_iter.rs b/src/using/u_par_iter.rs index da6eb19..07a31d9 100644 --- a/src/using/u_par_iter.rs +++ b/src/using/u_par_iter.rs @@ -1,4 +1,4 @@ -use super::default_fns::*; +use crate::default_fns::*; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, Sum, orch::{DefaultOrchestrator, Orchestrator}, From 30de1f474e7cf251597d669bedb053b4cda32845 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 17:54:30 +0200 Subject: [PATCH 144/264] using xap map is implemented --- src/computational_variants/xap.rs | 4 +- src/using/computational_variants/mod.rs | 1 + src/using/computational_variants/u_xap.rs | 186 ++++++++++++++++++++++ src/using/mod.rs | 1 + src/using/u_par_iter.rs | 5 +- 5 files changed, 194 insertions(+), 3 deletions(-) create mode 100644 src/using/computational_variants/mod.rs create mode 100644 src/using/computational_variants/u_xap.rs diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index 9f3cbdd..91e26d4 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -18,7 +18,7 @@ pub struct ParXap where R: Orchestrator, I: ConcurrentIter, - Vo: Values, + Vo: TransformableValues, X1: Fn(I::Item) -> Vo + Sync, { orchestrator: R, @@ -31,7 +31,7 @@ impl ParXap where R: Orchestrator, I: ConcurrentIter, - Vo: Values, + Vo: TransformableValues, X1: Fn(I::Item) -> Vo + Sync, { pub(crate) fn new(orchestrator: R, params: Params, iter: I, xap1: X1) -> Self { diff --git a/src/using/computational_variants/mod.rs b/src/using/computational_variants/mod.rs new file mode 100644 index 0000000..92a663a --- /dev/null +++ b/src/using/computational_variants/mod.rs @@ -0,0 +1 @@ +mod u_xap; diff --git a/src/using/computational_variants/u_xap.rs b/src/using/computational_variants/u_xap.rs new file mode 100644 index 0000000..8cca07e --- /dev/null +++ b/src/using/computational_variants/u_xap.rs @@ -0,0 +1,186 @@ +use crate::{ + ChunkSize, IterationOrder, NumThreads, ParIterUsing, Params, + generic_values::{TransformableValues, runner_results::Infallible}, + orch::{DefaultOrchestrator, Orchestrator}, + using::using_variants::Using, +}; +use orx_concurrent_iter::ConcurrentIter; + +pub struct UParXap +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, + Vo: TransformableValues, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, +{ + using: U, + orchestrator: R, + params: Params, + iter: I, + xap1: X1, +} + +impl UParXap +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, + Vo: TransformableValues, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, +{ + pub(crate) fn new(using: U, orchestrator: R, params: Params, iter: I, xap1: X1) -> Self { + Self { + using, + orchestrator, + params, + iter, + xap1, + } + } + + pub(crate) fn destruct(self) -> (U, R, Params, I, X1) { + ( + self.using, + self.orchestrator, + self.params, + self.iter, + self.xap1, + ) + } +} + +unsafe impl Send for UParXap +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, + Vo: TransformableValues, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, +{ +} + +unsafe impl Sync for UParXap +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, + Vo: TransformableValues, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, +{ +} + +impl ParIterUsing for UParXap +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, + Vo: TransformableValues, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, +{ + type Item = Vo::Item; + + fn con_iter(&self) -> &impl ConcurrentIter { + &self.iter + } + + fn params(&self) -> Params { + self.params + } + + fn num_threads(mut self, num_threads: impl Into) -> Self { + self.params = self.params.with_num_threads(num_threads); + self + } + + fn chunk_size(mut self, chunk_size: impl Into) -> Self { + self.params = self.params.with_chunk_size(chunk_size); + self + } + + fn iteration_order(mut self, collect: IterationOrder) -> Self { + self.params = self.params.with_collect_ordering(collect); + self + } + + fn with_runner( + self, + orchestrator: Q, + ) -> impl ParIterUsing { + let (using, _, params, iter, x1) = self.destruct(); + UParXap::new(using, orchestrator, params, iter, x1) + } + + fn map(self, map: Map) -> impl ParIterUsing + where + Map: Fn(&mut ::Item, Self::Item) -> Out + Sync + Clone, + { + let (using, orchestrator, params, iter, x1) = self.destruct(); + + let x1 = move |u: &mut U::Item, i: I::Item| { + let vo = x1(u, i); + // SAFETY: all threads are guaranteed to have its own Using::Item value that is not shared with other threads. + // This guarantees that there will be no race conditions. + // TODO: the reason to have this unsafe block is the complication in lifetimes, which must be possible to fix; however with a large refactoring. + let u = unsafe { + &mut *{ + let p: *mut U::Item = u; + p + } + }; + vo.u_map(u, map.clone()) + }; + + UParXap::new(using, orchestrator, params, iter, x1) + } + + fn filter(self, filter: Filter) -> impl ParIterUsing + where + Filter: Fn(&mut ::Item, &Self::Item) -> bool + Sync + Clone, + { + todo!() + } + + fn flat_map( + self, + flat_map: FlatMap, + ) -> impl ParIterUsing + where + IOut: IntoIterator, + FlatMap: Fn(&mut ::Item, Self::Item) -> IOut + Sync + Clone, + { + todo!() + } + + fn filter_map( + self, + filter_map: FilterMap, + ) -> impl ParIterUsing + where + FilterMap: Fn(&mut ::Item, Self::Item) -> Option + Sync + Clone, + { + todo!() + } + + fn collect_into(self, output: C) -> C + where + C: crate::ParCollectInto, + { + todo!() + } + + fn reduce(self, reduce: Reduce) -> Option + where + Self::Item: Send, + Reduce: Fn(&mut ::Item, Self::Item, Self::Item) -> Self::Item + Sync, + { + todo!() + } + + fn first(self) -> Option + where + Self::Item: Send, + { + todo!() + } +} diff --git a/src/using/mod.rs b/src/using/mod.rs index ff44a01..ea53917 100644 --- a/src/using/mod.rs +++ b/src/using/mod.rs @@ -1,3 +1,4 @@ +mod computational_variants; mod u_par_iter; mod using_variants; diff --git a/src/using/u_par_iter.rs b/src/using/u_par_iter.rs index 07a31d9..d9abeed 100644 --- a/src/using/u_par_iter.rs +++ b/src/using/u_par_iter.rs @@ -59,7 +59,10 @@ where /// Rather than the [`DefaultOrchestrator`], uses the parallel runner `Q` which implements [`Orchestrator`]. /// /// See [crate::ParIter::with_runner] for details. - fn with_runner(self) -> impl ParIterUsing; + fn with_runner( + self, + orchestrator: Q, + ) -> impl ParIterUsing; // computation transformations From 41a051c0e2e6a277ac29450c70c510c40bcae3a8 Mon Sep 17 00:00:00 2001 From: orxfun Date: Tue, 16 Sep 2025 17:56:14 +0200 Subject: [PATCH 145/264] all transformations on using-xap are implemented --- src/using/computational_variants/u_xap.rs | 48 +++++++++++++++++++++-- 1 file changed, 45 insertions(+), 3 deletions(-) diff --git a/src/using/computational_variants/u_xap.rs b/src/using/computational_variants/u_xap.rs index 8cca07e..d99066b 100644 --- a/src/using/computational_variants/u_xap.rs +++ b/src/using/computational_variants/u_xap.rs @@ -138,7 +138,21 @@ where where Filter: Fn(&mut ::Item, &Self::Item) -> bool + Sync + Clone, { - todo!() + let (using, orchestrator, params, iter, x1) = self.destruct(); + let x1 = move |u: &mut U::Item, i: I::Item| { + let vo = x1(u, i); + // SAFETY: all threads are guaranteed to have its own Using::Item value that is not shared with other threads. + // This guarantees that there will be no race conditions. + // TODO: the reason to have this unsafe block is the complication in lifetimes, which must be possible to fix; however with a large refactoring. + let u = unsafe { + &mut *{ + let p: *mut U::Item = u; + p + } + }; + vo.u_filter(u, filter.clone()) + }; + UParXap::new(using, orchestrator, params, iter, x1) } fn flat_map( @@ -149,7 +163,21 @@ where IOut: IntoIterator, FlatMap: Fn(&mut ::Item, Self::Item) -> IOut + Sync + Clone, { - todo!() + let (using, orchestrator, params, iter, x1) = self.destruct(); + let x1 = move |u: &mut U::Item, i: I::Item| { + let vo = x1(u, i); + // SAFETY: all threads are guaranteed to have its own Using::Item value that is not shared with other threads. + // This guarantees that there will be no race conditions. + // TODO: the reason to have this unsafe block is the complication in lifetimes, which must be possible to fix; however with a large refactoring. + let u = unsafe { + &mut *{ + let p: *mut U::Item = u; + p + } + }; + vo.u_flat_map(u, flat_map.clone()) + }; + UParXap::new(using, orchestrator, params, iter, x1) } fn filter_map( @@ -159,7 +187,21 @@ where where FilterMap: Fn(&mut ::Item, Self::Item) -> Option + Sync + Clone, { - todo!() + let (using, orchestrator, params, iter, x1) = self.destruct(); + let x1 = move |u: &mut U::Item, i: I::Item| { + let vo = x1(u, i); + // SAFETY: all threads are guaranteed to have its own Using::Item value that is not shared with other threads. + // This guarantees that there will be no race conditions. + // TODO: the reason to have this unsafe block is the complication in lifetimes, which must be possible to fix; however with a large refactoring. + let u = unsafe { + &mut *{ + let p: *mut U::Item = u; + p + } + }; + vo.u_filter_map(u, filter_map.clone()) + }; + UParXap::new(using, orchestrator, params, iter, x1) } fn collect_into(self, output: C) -> C From 78943192754e13f40c19757425c114787bf4a384 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 06:05:37 +0200 Subject: [PATCH 146/264] module refactoring --- src/computations/mod.rs | 3 - .../runner_results/collect_ordered.rs | 2 +- src/{computations => }/heap_sort.rs | 0 src/lib.rs | 2 +- src/using/collect_into/fixed_vec.rs | 75 ++++++++++++ src/using/collect_into/mod.rs | 4 + src/using/collect_into/split_vec.rs | 90 ++++++++++++++ src/using/collect_into/u_par_collect_into.rs | 95 +++++++++++++++ src/using/collect_into/vec.rs | 112 ++++++++++++++++++ src/using/mod.rs | 1 + 10 files changed, 379 insertions(+), 5 deletions(-) delete mode 100644 src/computations/mod.rs rename src/{computations => }/heap_sort.rs (100%) create mode 100644 src/using/collect_into/fixed_vec.rs create mode 100644 src/using/collect_into/mod.rs create mode 100644 src/using/collect_into/split_vec.rs create mode 100644 src/using/collect_into/u_par_collect_into.rs create mode 100644 src/using/collect_into/vec.rs diff --git a/src/computations/mod.rs b/src/computations/mod.rs deleted file mode 100644 index 7c6b281..0000000 --- a/src/computations/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod heap_sort; - -pub(crate) use heap_sort::heap_sort_into; diff --git a/src/generic_values/runner_results/collect_ordered.rs b/src/generic_values/runner_results/collect_ordered.rs index 52a6e26..6b3e671 100644 --- a/src/generic_values/runner_results/collect_ordered.rs +++ b/src/generic_values/runner_results/collect_ordered.rs @@ -1,6 +1,6 @@ use crate::{ - computations::heap_sort_into, generic_values::{Values, runner_results::Fallibility}, + heap_sort::heap_sort_into, }; use alloc::vec::Vec; use core::fmt::Debug; diff --git a/src/computations/heap_sort.rs b/src/heap_sort.rs similarity index 100% rename from src/computations/heap_sort.rs rename to src/heap_sort.rs diff --git a/src/lib.rs b/src/lib.rs index fa0f744..712187a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -20,10 +20,10 @@ extern crate std; mod collect_into; /// Module containing variants of parallel iterators. pub mod computational_variants; -mod computations; mod default_fns; mod env; mod generic_values; +mod heap_sort; mod into_par_iter; /// Module for creating special iterators. pub mod iter; diff --git a/src/using/collect_into/fixed_vec.rs b/src/using/collect_into/fixed_vec.rs new file mode 100644 index 0000000..14728b9 --- /dev/null +++ b/src/using/collect_into/fixed_vec.rs @@ -0,0 +1,75 @@ +// use super::par_collect_into::ParCollectIntoCore; +// use crate::Params; +// use crate::generic_values::runner_results::{Fallibility, Infallible}; +// use crate::generic_values::{TransformableValues, Values}; +// use crate::orch::Orchestrator; +// use alloc::vec::Vec; +// use orx_concurrent_iter::ConcurrentIter; +// use orx_fixed_vec::FixedVec; +// #[cfg(test)] +// use orx_pinned_vec::PinnedVec; + +// impl ParCollectIntoCore for FixedVec +// where +// O: Send + Sync, +// { +// type BridgePinnedVec = Self; + +// fn empty(iter_len: Option) -> Self { +// let vec = as ParCollectIntoCore<_>>::empty(iter_len); +// vec.into() +// } + +// fn m_collect_into(self, orchestrator: R, params: Params, iter: I, map1: M1) -> Self +// where +// R: Orchestrator, +// I: ConcurrentIter, +// M1: Fn(I::Item) -> O + Sync, +// O: Send, +// { +// let vec = Vec::from(self); +// FixedVec::from(vec.m_collect_into(orchestrator, params, iter, map1)) +// } + +// fn x_collect_into( +// self, +// orchestrator: R, +// params: Params, +// iter: I, +// xap1: X1, +// ) -> Self +// where +// R: Orchestrator, +// I: ConcurrentIter, +// Vo: TransformableValues, +// X1: Fn(I::Item) -> Vo + Sync, +// { +// let vec = Vec::from(self); +// FixedVec::from(vec.x_collect_into(orchestrator, params, iter, xap1)) +// } + +// fn x_try_collect_into( +// self, +// orchestrator: R, +// params: Params, +// iter: I, +// xap1: X1, +// ) -> Result::Error> +// where +// R: Orchestrator, +// I: ConcurrentIter, +// X1: Fn(I::Item) -> Vo + Sync, +// Vo: Values, +// { +// let vec = Vec::from(self); +// vec.x_try_collect_into(orchestrator, params, iter, xap1) +// .map(FixedVec::from) +// } + +// // test + +// #[cfg(test)] +// fn length(&self) -> usize { +// self.len() +// } +// } diff --git a/src/using/collect_into/mod.rs b/src/using/collect_into/mod.rs new file mode 100644 index 0000000..793605b --- /dev/null +++ b/src/using/collect_into/mod.rs @@ -0,0 +1,4 @@ +mod fixed_vec; +mod split_vec; +mod u_par_collect_into; +mod vec; diff --git a/src/using/collect_into/split_vec.rs b/src/using/collect_into/split_vec.rs new file mode 100644 index 0000000..ff902bf --- /dev/null +++ b/src/using/collect_into/split_vec.rs @@ -0,0 +1,90 @@ +// use super::par_collect_into::ParCollectIntoCore; +// use crate::Params; +// use crate::collect_into::utils::split_vec_reserve; +// use crate::computational_variants::computations::{ +// map_collect_into, xap_collect_into, xap_try_collect_into, +// }; +// use crate::generic_values::runner_results::{Fallibility, Infallible}; +// use crate::generic_values::{TransformableValues, Values}; +// use crate::orch::Orchestrator; +// use orx_concurrent_iter::ConcurrentIter; +// #[cfg(test)] +// use orx_pinned_vec::PinnedVec; +// use orx_split_vec::{GrowthWithConstantTimeAccess, PseudoDefault, SplitVec}; + +// impl ParCollectIntoCore for SplitVec +// where +// O: Send + Sync, +// G: GrowthWithConstantTimeAccess, +// Self: PseudoDefault, +// { +// type BridgePinnedVec = Self; + +// fn empty(iter_len: Option) -> Self { +// let mut vec = Self::pseudo_default(); +// split_vec_reserve(&mut vec, false, iter_len); +// vec +// } + +// fn m_collect_into( +// mut self, +// orchestrator: R, +// params: Params, +// iter: I, +// map1: M1, +// ) -> Self +// where +// R: Orchestrator, +// I: ConcurrentIter, +// M1: Fn(I::Item) -> O + Sync, +// O: Send, +// { +// split_vec_reserve(&mut self, params.is_sequential(), iter.try_get_len()); +// let (_, pinned_vec) = map_collect_into(orchestrator, params, iter, map1, self); +// pinned_vec +// } + +// fn x_collect_into( +// mut self, +// orchestrator: R, +// params: Params, +// iter: I, +// xap1: X1, +// ) -> Self +// where +// R: Orchestrator, +// I: ConcurrentIter, +// Vo: TransformableValues, +// X1: Fn(I::Item) -> Vo + Sync, +// { +// split_vec_reserve(&mut self, params.is_sequential(), iter.try_get_len()); +// let (_num_spawned, pinned_vec) = xap_collect_into(orchestrator, params, iter, xap1, self); +// pinned_vec +// } + +// fn x_try_collect_into( +// mut self, +// orchestrator: R, +// params: Params, +// iter: I, +// xap1: X1, +// ) -> Result::Error> +// where +// R: Orchestrator, +// I: ConcurrentIter, +// X1: Fn(I::Item) -> Vo + Sync, +// Vo: Values, +// Self: Sized, +// { +// split_vec_reserve(&mut self, params.is_sequential(), iter.try_get_len()); +// let (_num_spawned, result) = xap_try_collect_into(orchestrator, params, iter, xap1, self); +// result +// } + +// // test + +// #[cfg(test)] +// fn length(&self) -> usize { +// self.len() +// } +// } diff --git a/src/using/collect_into/u_par_collect_into.rs b/src/using/collect_into/u_par_collect_into.rs new file mode 100644 index 0000000..3f8c2d4 --- /dev/null +++ b/src/using/collect_into/u_par_collect_into.rs @@ -0,0 +1,95 @@ +use crate::Params; +use crate::generic_values::runner_results::{Fallibility, Infallible}; +use crate::generic_values::{TransformableValues, Values}; +use crate::orch::Orchestrator; +use orx_concurrent_iter::ConcurrentIter; +use orx_iterable::Collection; +use orx_pinned_vec::IntoConcurrentPinnedVec; + +pub trait UParCollectIntoCore: Collection { + type BridgePinnedVec: IntoConcurrentPinnedVec; + + fn empty(iter_len: Option) -> Self; + + fn m_collect_into(self, orchestrator: R, params: Params, iter: I, map1: M1) -> Self + where + R: Orchestrator, + I: ConcurrentIter, + M1: Fn(I::Item) -> O + Sync; + + fn x_collect_into( + self, + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + ) -> Self + where + R: Orchestrator, + I: ConcurrentIter, + Vo: TransformableValues, + X1: Fn(I::Item) -> Vo + Sync; + + fn x_try_collect_into( + self, + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + ) -> Result::Error> + where + R: Orchestrator, + I: ConcurrentIter, + X1: Fn(I::Item) -> Vo + Sync, + Vo: Values, + Self: Sized; + + // test + + #[cfg(test)] + fn length(&self) -> usize; + + #[cfg(test)] + fn is_empty(&self) -> bool { + self.length() == 0 + } + + #[cfg(test)] + fn is_equal_to<'a>(&self, b: impl orx_iterable::Iterable) -> bool + where + O: PartialEq + 'a, + { + let mut b = b.iter(); + for x in self.iter() { + match b.next() { + Some(y) if x != y => return false, + None => return false, + _ => {} + } + } + + b.next().is_none() + } + + #[cfg(test)] + fn is_equal_to_ref(&self, b: impl orx_iterable::Iterable) -> bool + where + O: PartialEq, + { + let mut b = b.iter(); + for x in self.iter() { + match b.next() { + Some(y) if x != &y => return false, + None => return false, + _ => {} + } + } + + b.next().is_none() + } +} + +/// Collection types into which outputs of a parallel computations can be collected into. +pub trait UParCollectInto: UParCollectIntoCore + UParCollectIntoCore {} + +impl UParCollectInto for C where C: UParCollectIntoCore + UParCollectIntoCore {} diff --git a/src/using/collect_into/vec.rs b/src/using/collect_into/vec.rs new file mode 100644 index 0000000..6f3bfc8 --- /dev/null +++ b/src/using/collect_into/vec.rs @@ -0,0 +1,112 @@ +// use super::par_collect_into::ParCollectIntoCore; +// use crate::Params; +// use crate::collect_into::utils::extend_vec_from_split; +// use crate::computational_variants::computations::map_collect_into; +// use crate::generic_values::runner_results::{Fallibility, Infallible}; +// use crate::generic_values::{TransformableValues, Values}; +// use crate::orch::Orchestrator; +// use alloc::vec::Vec; +// use orx_concurrent_iter::ConcurrentIter; +// use orx_fixed_vec::FixedVec; +// use orx_split_vec::SplitVec; + +// impl ParCollectIntoCore for Vec +// where +// O: Send + Sync, +// { +// type BridgePinnedVec = FixedVec; + +// fn empty(iter_len: Option) -> Self { +// match iter_len { +// Some(len) => Vec::with_capacity(len), +// None => Vec::new(), +// } +// } + +// fn m_collect_into( +// mut self, +// orchestrator: R, +// params: Params, +// iter: I, +// map1: M1, +// ) -> Self +// where +// R: Orchestrator, +// I: ConcurrentIter, +// M1: Fn(I::Item) -> O + Sync, +// O: Send, +// { +// match iter.try_get_len() { +// None => { +// let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); +// let split_vec = split_vec.m_collect_into(orchestrator, params, iter, map1); +// extend_vec_from_split(self, split_vec) +// } +// Some(len) => { +// self.reserve(len); +// let fixed_vec = FixedVec::from(self); +// let (_, fixed_vec) = map_collect_into(orchestrator, params, iter, map1, fixed_vec); +// Vec::from(fixed_vec) +// } +// } +// } + +// fn x_collect_into( +// self, +// orchestrator: R, +// params: Params, +// iter: I, +// xap1: X1, +// ) -> Self +// where +// R: Orchestrator, +// I: ConcurrentIter, +// Vo: TransformableValues, +// X1: Fn(I::Item) -> Vo + Sync, +// { +// let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); +// let split_vec = split_vec.x_collect_into(orchestrator, params, iter, xap1); +// extend_vec_from_split(self, split_vec) +// } + +// fn x_try_collect_into( +// self, +// orchestrator: R, +// params: Params, +// iter: I, +// xap1: X1, +// ) -> Result::Error> +// where +// R: Orchestrator, +// I: ConcurrentIter, +// X1: Fn(I::Item) -> Vo + Sync, +// Vo: Values, +// Self: Sized, +// { +// let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); +// let result = split_vec.x_try_collect_into(orchestrator, params, iter, xap1); +// result.map(|split_vec| extend_vec_from_split(self, split_vec)) +// } + +// // test + +// #[cfg(test)] +// fn length(&self) -> usize { +// self.len() +// } +// } + +// // #[cfg(test)] +// // mod tsts { +// // use crate::*; +// // use alloc::vec::Vec; +// // use orx_split_vec::SplitVec; + +// // #[test] +// // fn abc() { +// // fn take>(c: C) {} + +// // take(SplitVec::new()); +// // take(Vec::new()); +// // } +// // } diff --git a/src/using/mod.rs b/src/using/mod.rs index ea53917..5e831a3 100644 --- a/src/using/mod.rs +++ b/src/using/mod.rs @@ -1,3 +1,4 @@ +mod collect_into; mod computational_variants; mod u_par_iter; mod using_variants; From 99ce399868e33adf714123101707402482845814 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 06:10:12 +0200 Subject: [PATCH 147/264] reorganization --- .../computations => collect_into}/collect.rs | 0 src/collect_into/mod.rs | 1 + src/collect_into/split_vec.rs | 4 +--- src/collect_into/vec.rs | 2 +- src/computational_variants/computations/mod.rs | 3 --- src/computational_variants/mod.rs | 1 - src/computational_variants/tests/map/collect.rs | 6 ++---- 7 files changed, 5 insertions(+), 12 deletions(-) rename src/{computational_variants/computations => collect_into}/collect.rs (100%) delete mode 100644 src/computational_variants/computations/mod.rs diff --git a/src/computational_variants/computations/collect.rs b/src/collect_into/collect.rs similarity index 100% rename from src/computational_variants/computations/collect.rs rename to src/collect_into/collect.rs diff --git a/src/collect_into/mod.rs b/src/collect_into/mod.rs index 9ba4ccb..b121faf 100644 --- a/src/collect_into/mod.rs +++ b/src/collect_into/mod.rs @@ -1,3 +1,4 @@ +pub(crate) mod collect; mod fixed_vec; mod par_collect_into; mod split_vec; diff --git a/src/collect_into/split_vec.rs b/src/collect_into/split_vec.rs index 1a4d052..fa16533 100644 --- a/src/collect_into/split_vec.rs +++ b/src/collect_into/split_vec.rs @@ -1,9 +1,7 @@ +use super::collect::{map_collect_into, xap_collect_into, xap_try_collect_into}; use super::par_collect_into::ParCollectIntoCore; use crate::Params; use crate::collect_into::utils::split_vec_reserve; -use crate::computational_variants::computations::{ - map_collect_into, xap_collect_into, xap_try_collect_into, -}; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index 663f49f..3f5af17 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -1,7 +1,7 @@ use super::par_collect_into::ParCollectIntoCore; use crate::Params; +use crate::collect_into::collect::map_collect_into; use crate::collect_into::utils::extend_vec_from_split; -use crate::computational_variants::computations::map_collect_into; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; diff --git a/src/computational_variants/computations/mod.rs b/src/computational_variants/computations/mod.rs deleted file mode 100644 index 7f9b9dd..0000000 --- a/src/computational_variants/computations/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod collect; - -pub use collect::{map_collect_into, xap_collect_into, xap_try_collect_into}; diff --git a/src/computational_variants/mod.rs b/src/computational_variants/mod.rs index 8367340..89df0fa 100644 --- a/src/computational_variants/mod.rs +++ b/src/computational_variants/mod.rs @@ -1,7 +1,6 @@ #[cfg(test)] mod tests; -pub(crate) mod computations; /// A parallel iterator for which the computation either completely succeeds, /// or fails and **early exits** with None. pub mod fallible_option; diff --git a/src/computational_variants/tests/map/collect.rs b/src/computational_variants/tests/map/collect.rs index cb526e1..aa28a5b 100644 --- a/src/computational_variants/tests/map/collect.rs +++ b/src/computational_variants/tests/map/collect.rs @@ -1,7 +1,5 @@ -use crate::{ - IterationOrder, Params, computational_variants::computations::map_collect_into, - orch::DefaultOrchestrator, -}; +use crate::collect_into::collect::map_collect_into; +use crate::{IterationOrder, Params, orch::DefaultOrchestrator}; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; From c73104be3a813169c2efd9f1041df2dd4f54fb17 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 06:16:27 +0200 Subject: [PATCH 148/264] init collect computations --- src/using/collect_into/collect.rs | 125 ++++++++++++++++++++++++++++++ src/using/collect_into/mod.rs | 1 + 2 files changed, 126 insertions(+) create mode 100644 src/using/collect_into/collect.rs diff --git a/src/using/collect_into/collect.rs b/src/using/collect_into/collect.rs new file mode 100644 index 0000000..7082c15 --- /dev/null +++ b/src/using/collect_into/collect.rs @@ -0,0 +1,125 @@ +use crate::IterationOrder; +use crate::Params; +use crate::generic_values::Values; +use crate::generic_values::runner_results::Infallible; +use crate::generic_values::runner_results::ParallelCollect; +use crate::generic_values::runner_results::ParallelCollectArbitrary; +use crate::orch::{NumSpawned, Orchestrator}; +use crate::runner::parallel_runner_compute as prc; +use crate::using::using_variants::Using; +use orx_concurrent_iter::ConcurrentIter; +use orx_fixed_vec::IntoConcurrentPinnedVec; + +pub fn map_collect_into( + using: U, + orchestrator: R, + params: Params, + iter: I, + map1: M1, + pinned_vec: P, +) -> (NumSpawned, P) +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, + M1: Fn(&mut U::Item, I::Item) -> O + Sync, + O: Send, + P: IntoConcurrentPinnedVec, +{ + match (params.is_sequential(), params.iteration_order) { + (true, _) => ( + NumSpawned::zero(), + map_collect_into_seq(using, iter, map1, pinned_vec), + ), + #[cfg(test)] + (false, IterationOrder::Arbitrary) => { + prc::collect_arbitrary::m(using, orchestrator, params, iter, map1, pinned_vec) + } + (false, _) => prc::collect_ordered::m(using, orchestrator, params, iter, map1, pinned_vec), + } +} + +fn map_collect_into_seq(using: U, iter: I, map1: M1, mut pinned_vec: P) -> P +where + U: Using, + I: ConcurrentIter, + M1: Fn(&mut U::Item, I::Item) -> O + Sync, + O: Send, + P: IntoConcurrentPinnedVec, +{ + let mut u = using.into_inner(); + let u = &mut u; + let iter = iter.into_seq_iter(); + for i in iter { + pinned_vec.push(map1(u, i)); + } + pinned_vec +} + +pub fn xap_collect_into( + using: U, + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + pinned_vec: P, +) -> (NumSpawned, P) +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, + P: IntoConcurrentPinnedVec, +{ + match (params.is_sequential(), params.iteration_order) { + (true, _) => ( + NumSpawned::zero(), + xap_collect_into_seq(using, iter, xap1, pinned_vec), + ), + (false, IterationOrder::Arbitrary) => { + let (num_threads, result) = + prc::collect_arbitrary::x(orchestrator, params, iter, xap1, pinned_vec); + let pinned_vec = match result { + ParallelCollectArbitrary::AllOrUntilWhileCollected { pinned_vec } => pinned_vec, + }; + (num_threads, pinned_vec) + } + (false, IterationOrder::Ordered) => { + let (num_threads, result) = + prc::collect_ordered::x(orchestrator, params, iter, xap1, pinned_vec); + let pinned_vec = match result { + ParallelCollect::AllCollected { pinned_vec } => pinned_vec, + ParallelCollect::StoppedByWhileCondition { + pinned_vec, + stopped_idx: _, + } => pinned_vec, + }; + (num_threads, pinned_vec) + } + } +} + +fn xap_collect_into_seq(using: U, iter: I, xap1: X1, mut pinned_vec: P) -> P +where + U: Using, + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, + P: IntoConcurrentPinnedVec, +{ + let mut u = using.into_inner(); + let u = &mut u; + let iter = iter.into_seq_iter(); + for i in iter { + let vt = xap1(u, i); + let done = vt.push_to_pinned_vec(&mut pinned_vec); + if Vo::sequential_push_to_stop(done).is_some() { + break; + } + } + + pinned_vec +} diff --git a/src/using/collect_into/mod.rs b/src/using/collect_into/mod.rs index 793605b..898b934 100644 --- a/src/using/collect_into/mod.rs +++ b/src/using/collect_into/mod.rs @@ -1,3 +1,4 @@ +mod collect; mod fixed_vec; mod split_vec; mod u_par_collect_into; From 17af23797fdd1ce4c3048247e66140b83513b881 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 06:27:12 +0200 Subject: [PATCH 149/264] thread reduce computation for using --- src/using/collect_into/collect.rs | 240 +++++++++--------- src/using/mod.rs | 1 + src/using/runner/mod.rs | 2 + .../runner/parallel_runner_compute/mod.rs | 1 + src/using/runner/thread_runner_compute/mod.rs | 5 + 5 files changed, 129 insertions(+), 120 deletions(-) create mode 100644 src/using/runner/mod.rs create mode 100644 src/using/runner/parallel_runner_compute/mod.rs create mode 100644 src/using/runner/thread_runner_compute/mod.rs diff --git a/src/using/collect_into/collect.rs b/src/using/collect_into/collect.rs index 7082c15..58103f4 100644 --- a/src/using/collect_into/collect.rs +++ b/src/using/collect_into/collect.rs @@ -1,125 +1,125 @@ -use crate::IterationOrder; -use crate::Params; -use crate::generic_values::Values; -use crate::generic_values::runner_results::Infallible; -use crate::generic_values::runner_results::ParallelCollect; -use crate::generic_values::runner_results::ParallelCollectArbitrary; -use crate::orch::{NumSpawned, Orchestrator}; -use crate::runner::parallel_runner_compute as prc; -use crate::using::using_variants::Using; -use orx_concurrent_iter::ConcurrentIter; -use orx_fixed_vec::IntoConcurrentPinnedVec; +// use crate::IterationOrder; +// use crate::Params; +// use crate::generic_values::Values; +// use crate::generic_values::runner_results::Infallible; +// use crate::generic_values::runner_results::ParallelCollect; +// use crate::generic_values::runner_results::ParallelCollectArbitrary; +// use crate::orch::{NumSpawned, Orchestrator}; +// use crate::runner::parallel_runner_compute as prc; +// use crate::using::using_variants::Using; +// use orx_concurrent_iter::ConcurrentIter; +// use orx_fixed_vec::IntoConcurrentPinnedVec; -pub fn map_collect_into( - using: U, - orchestrator: R, - params: Params, - iter: I, - map1: M1, - pinned_vec: P, -) -> (NumSpawned, P) -where - U: Using, - R: Orchestrator, - I: ConcurrentIter, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, - O: Send, - P: IntoConcurrentPinnedVec, -{ - match (params.is_sequential(), params.iteration_order) { - (true, _) => ( - NumSpawned::zero(), - map_collect_into_seq(using, iter, map1, pinned_vec), - ), - #[cfg(test)] - (false, IterationOrder::Arbitrary) => { - prc::collect_arbitrary::m(using, orchestrator, params, iter, map1, pinned_vec) - } - (false, _) => prc::collect_ordered::m(using, orchestrator, params, iter, map1, pinned_vec), - } -} +// pub fn map_collect_into( +// using: U, +// orchestrator: R, +// params: Params, +// iter: I, +// map1: M1, +// pinned_vec: P, +// ) -> (NumSpawned, P) +// where +// U: Using, +// R: Orchestrator, +// I: ConcurrentIter, +// M1: Fn(&mut U::Item, I::Item) -> O + Sync, +// O: Send, +// P: IntoConcurrentPinnedVec, +// { +// match (params.is_sequential(), params.iteration_order) { +// (true, _) => ( +// NumSpawned::zero(), +// map_collect_into_seq(using, iter, map1, pinned_vec), +// ), +// #[cfg(test)] +// (false, IterationOrder::Arbitrary) => { +// prc::collect_arbitrary::m(using, orchestrator, params, iter, map1, pinned_vec) +// } +// (false, _) => prc::collect_ordered::m(using, orchestrator, params, iter, map1, pinned_vec), +// } +// } -fn map_collect_into_seq(using: U, iter: I, map1: M1, mut pinned_vec: P) -> P -where - U: Using, - I: ConcurrentIter, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, - O: Send, - P: IntoConcurrentPinnedVec, -{ - let mut u = using.into_inner(); - let u = &mut u; - let iter = iter.into_seq_iter(); - for i in iter { - pinned_vec.push(map1(u, i)); - } - pinned_vec -} +// fn map_collect_into_seq(using: U, iter: I, map1: M1, mut pinned_vec: P) -> P +// where +// U: Using, +// I: ConcurrentIter, +// M1: Fn(&mut U::Item, I::Item) -> O + Sync, +// O: Send, +// P: IntoConcurrentPinnedVec, +// { +// let mut u = using.into_inner(); +// let u = &mut u; +// let iter = iter.into_seq_iter(); +// for i in iter { +// pinned_vec.push(map1(u, i)); +// } +// pinned_vec +// } -pub fn xap_collect_into( - using: U, - orchestrator: R, - params: Params, - iter: I, - xap1: X1, - pinned_vec: P, -) -> (NumSpawned, P) -where - U: Using, - R: Orchestrator, - I: ConcurrentIter, - Vo: Values, - Vo::Item: Send, - X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, - P: IntoConcurrentPinnedVec, -{ - match (params.is_sequential(), params.iteration_order) { - (true, _) => ( - NumSpawned::zero(), - xap_collect_into_seq(using, iter, xap1, pinned_vec), - ), - (false, IterationOrder::Arbitrary) => { - let (num_threads, result) = - prc::collect_arbitrary::x(orchestrator, params, iter, xap1, pinned_vec); - let pinned_vec = match result { - ParallelCollectArbitrary::AllOrUntilWhileCollected { pinned_vec } => pinned_vec, - }; - (num_threads, pinned_vec) - } - (false, IterationOrder::Ordered) => { - let (num_threads, result) = - prc::collect_ordered::x(orchestrator, params, iter, xap1, pinned_vec); - let pinned_vec = match result { - ParallelCollect::AllCollected { pinned_vec } => pinned_vec, - ParallelCollect::StoppedByWhileCondition { - pinned_vec, - stopped_idx: _, - } => pinned_vec, - }; - (num_threads, pinned_vec) - } - } -} +// pub fn xap_collect_into( +// using: U, +// orchestrator: R, +// params: Params, +// iter: I, +// xap1: X1, +// pinned_vec: P, +// ) -> (NumSpawned, P) +// where +// U: Using, +// R: Orchestrator, +// I: ConcurrentIter, +// Vo: Values, +// Vo::Item: Send, +// X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, +// P: IntoConcurrentPinnedVec, +// { +// match (params.is_sequential(), params.iteration_order) { +// (true, _) => ( +// NumSpawned::zero(), +// xap_collect_into_seq(using, iter, xap1, pinned_vec), +// ), +// (false, IterationOrder::Arbitrary) => { +// let (num_threads, result) = +// prc::collect_arbitrary::x(orchestrator, params, iter, xap1, pinned_vec); +// let pinned_vec = match result { +// ParallelCollectArbitrary::AllOrUntilWhileCollected { pinned_vec } => pinned_vec, +// }; +// (num_threads, pinned_vec) +// } +// (false, IterationOrder::Ordered) => { +// let (num_threads, result) = +// prc::collect_ordered::x(orchestrator, params, iter, xap1, pinned_vec); +// let pinned_vec = match result { +// ParallelCollect::AllCollected { pinned_vec } => pinned_vec, +// ParallelCollect::StoppedByWhileCondition { +// pinned_vec, +// stopped_idx: _, +// } => pinned_vec, +// }; +// (num_threads, pinned_vec) +// } +// } +// } -fn xap_collect_into_seq(using: U, iter: I, xap1: X1, mut pinned_vec: P) -> P -where - U: Using, - I: ConcurrentIter, - Vo: Values, - Vo::Item: Send, - X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, - P: IntoConcurrentPinnedVec, -{ - let mut u = using.into_inner(); - let u = &mut u; - let iter = iter.into_seq_iter(); - for i in iter { - let vt = xap1(u, i); - let done = vt.push_to_pinned_vec(&mut pinned_vec); - if Vo::sequential_push_to_stop(done).is_some() { - break; - } - } +// fn xap_collect_into_seq(using: U, iter: I, xap1: X1, mut pinned_vec: P) -> P +// where +// U: Using, +// I: ConcurrentIter, +// Vo: Values, +// Vo::Item: Send, +// X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, +// P: IntoConcurrentPinnedVec, +// { +// let mut u = using.into_inner(); +// let u = &mut u; +// let iter = iter.into_seq_iter(); +// for i in iter { +// let vt = xap1(u, i); +// let done = vt.push_to_pinned_vec(&mut pinned_vec); +// if Vo::sequential_push_to_stop(done).is_some() { +// break; +// } +// } - pinned_vec -} +// pinned_vec +// } diff --git a/src/using/mod.rs b/src/using/mod.rs index 5e831a3..e41688f 100644 --- a/src/using/mod.rs +++ b/src/using/mod.rs @@ -1,5 +1,6 @@ mod collect_into; mod computational_variants; +mod runner; mod u_par_iter; mod using_variants; diff --git a/src/using/runner/mod.rs b/src/using/runner/mod.rs new file mode 100644 index 0000000..0f50140 --- /dev/null +++ b/src/using/runner/mod.rs @@ -0,0 +1,2 @@ +mod parallel_runner_compute; +mod thread_runner_compute; diff --git a/src/using/runner/parallel_runner_compute/mod.rs b/src/using/runner/parallel_runner_compute/mod.rs new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/src/using/runner/parallel_runner_compute/mod.rs @@ -0,0 +1 @@ + diff --git a/src/using/runner/thread_runner_compute/mod.rs b/src/using/runner/thread_runner_compute/mod.rs new file mode 100644 index 0000000..ddc6e7e --- /dev/null +++ b/src/using/runner/thread_runner_compute/mod.rs @@ -0,0 +1,5 @@ +pub(super) mod collect_arbitrary; +pub(super) mod collect_ordered; +pub(super) mod next; +pub(super) mod next_any; +pub(super) mod reduce; From f5f775c08b763a45f344acdc5d694278cf125342 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 06:27:23 +0200 Subject: [PATCH 150/264] init thread using modules --- src/runner/thread_runner_compute/mod.rs | 10 +- .../collect_arbitrary.rs | 0 .../thread_runner_compute/collect_ordered.rs | 0 .../runner/thread_runner_compute/next.rs | 0 .../runner/thread_runner_compute/next_any.rs | 0 .../runner/thread_runner_compute/reduce.rs | 172 ++++++++++++++++++ 6 files changed, 177 insertions(+), 5 deletions(-) create mode 100644 src/using/runner/thread_runner_compute/collect_arbitrary.rs create mode 100644 src/using/runner/thread_runner_compute/collect_ordered.rs create mode 100644 src/using/runner/thread_runner_compute/next.rs create mode 100644 src/using/runner/thread_runner_compute/next_any.rs create mode 100644 src/using/runner/thread_runner_compute/reduce.rs diff --git a/src/runner/thread_runner_compute/mod.rs b/src/runner/thread_runner_compute/mod.rs index 6ccd3f7..ddc6e7e 100644 --- a/src/runner/thread_runner_compute/mod.rs +++ b/src/runner/thread_runner_compute/mod.rs @@ -1,5 +1,5 @@ -pub(crate) mod collect_arbitrary; -pub(crate) mod collect_ordered; -pub(crate) mod next; -pub(crate) mod next_any; -pub(crate) mod reduce; +pub(super) mod collect_arbitrary; +pub(super) mod collect_ordered; +pub(super) mod next; +pub(super) mod next_any; +pub(super) mod reduce; diff --git a/src/using/runner/thread_runner_compute/collect_arbitrary.rs b/src/using/runner/thread_runner_compute/collect_arbitrary.rs new file mode 100644 index 0000000..e69de29 diff --git a/src/using/runner/thread_runner_compute/collect_ordered.rs b/src/using/runner/thread_runner_compute/collect_ordered.rs new file mode 100644 index 0000000..e69de29 diff --git a/src/using/runner/thread_runner_compute/next.rs b/src/using/runner/thread_runner_compute/next.rs new file mode 100644 index 0000000..e69de29 diff --git a/src/using/runner/thread_runner_compute/next_any.rs b/src/using/runner/thread_runner_compute/next_any.rs new file mode 100644 index 0000000..e69de29 diff --git a/src/using/runner/thread_runner_compute/reduce.rs b/src/using/runner/thread_runner_compute/reduce.rs new file mode 100644 index 0000000..11f70ea --- /dev/null +++ b/src/using/runner/thread_runner_compute/reduce.rs @@ -0,0 +1,172 @@ +use crate::{ + ThreadRunner, + generic_values::{ + Values, + runner_results::{Reduce, StopReduce}, + }, +}; +use orx_concurrent_iter::{ChunkPuller, ConcurrentIter}; + +// m + +pub fn m( + mut u: U, + mut runner: C, + iter: &I, + shared_state: &C::SharedState, + map1: &M1, + reduce: &Red, +) -> Option +where + C: ThreadRunner, + I: ConcurrentIter, + M1: Fn(&mut U, I::Item) -> O, + Red: Fn(&mut U, O, O) -> O, +{ + let u = &mut u; + let mut chunk_puller = iter.chunk_puller(0); + let mut item_puller = iter.item_puller(); + + let mut acc = None; + loop { + let chunk_size = runner.next_chunk_size(shared_state, iter); + + runner.begin_chunk(chunk_size); + + match chunk_size { + 0 | 1 => match item_puller.next() { + Some(i) => { + let y = map1(u, i); + acc = match acc { + Some(x) => Some(reduce(u, x, y)), + None => Some(y), + }; + } + None => break, + }, + c => { + if c > chunk_puller.chunk_size() { + chunk_puller = iter.chunk_puller(c); + } + + match chunk_puller.pull() { + Some(chunk) => { + let mut res = None; + for x in chunk { + let b = map1(u, x); + res = match res { + Some(a) => Some(reduce(u, a, b)), + None => Some(b), + } + } + acc = match acc { + Some(x) => match res { + Some(y) => Some(reduce(u, x, y)), + None => Some(x), + }, + None => res, + }; + } + None => break, + } + } + } + + runner.complete_chunk(shared_state, chunk_size); + } + + runner.complete_task(shared_state); + acc +} + +// x + +pub fn x( + mut u: U, + mut runner: C, + iter: &I, + shared_state: &C::SharedState, + xap1: &X1, + reduce: &Red, +) -> Reduce +where + C: ThreadRunner, + I: ConcurrentIter, + Vo: Values, + X1: Fn(&mut U, I::Item) -> Vo, + Red: Fn(&mut U, Vo::Item, Vo::Item) -> Vo::Item, +{ + let u = &mut u; + let mut chunk_puller = iter.chunk_puller(0); + let mut item_puller = iter.item_puller(); + + let mut acc = None; + loop { + let chunk_size = runner.next_chunk_size(shared_state, iter); + + runner.begin_chunk(chunk_size); + + match chunk_size { + 0 | 1 => match item_puller.next() { + Some(i) => { + let vo = xap1(u, i); + let reduce = vo.u_acc_reduce(u, acc, reduce); + acc = match Vo::reduce_to_stop(reduce) { + Ok(acc) => acc, + Err(stop) => { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + match stop { + StopReduce::DueToWhile { acc } => { + return Reduce::StoppedByWhileCondition { acc }; + } + StopReduce::DueToError { error } => { + return Reduce::StoppedByError { error }; + } + } + } + }; + } + None => break, + }, + c => { + if c > chunk_puller.chunk_size() { + chunk_puller = iter.chunk_puller(c); + } + + match chunk_puller.pull() { + Some(chunk) => { + for i in chunk { + let vo = xap1(u, i); + let reduce = vo.u_acc_reduce(u, acc, reduce); + acc = match Vo::reduce_to_stop(reduce) { + Ok(acc) => acc, + Err(stop) => { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + match stop { + StopReduce::DueToWhile { acc } => { + return Reduce::StoppedByWhileCondition { acc }; + } + StopReduce::DueToError { error } => { + return Reduce::StoppedByError { error }; + } + } + } + }; + } + } + None => break, + } + } + } + + runner.complete_chunk(shared_state, chunk_size); + } + + runner.complete_task(shared_state); + + Reduce::Done { acc } +} From 19fb23b884b365c06e675461d13893daafeadd8f Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 06:29:10 +0200 Subject: [PATCH 151/264] next thread computation for using --- .../runner/thread_runner_compute/next.rs | 161 ++++++++++++++++++ 1 file changed, 161 insertions(+) diff --git a/src/using/runner/thread_runner_compute/next.rs b/src/using/runner/thread_runner_compute/next.rs index e69de29..8aa33e8 100644 --- a/src/using/runner/thread_runner_compute/next.rs +++ b/src/using/runner/thread_runner_compute/next.rs @@ -0,0 +1,161 @@ +use crate::{ + ThreadRunner, + generic_values::Values, + generic_values::runner_results::{Next, NextWithIdx}, +}; +use orx_concurrent_iter::{ChunkPuller, ConcurrentIter}; + +pub fn m( + mut using: U, + mut runner: C, + iter: &I, + shared_state: &C::SharedState, + map1: &M1, +) -> Option<(usize, O)> +where + C: ThreadRunner, + I: ConcurrentIter, + M1: Fn(&mut U, I::Item) -> O, +{ + let u = &mut using; + let mut chunk_puller = iter.chunk_puller(0); + let mut item_puller = iter.item_puller_with_idx(); + + loop { + let chunk_size = runner.next_chunk_size(shared_state, iter); + + runner.begin_chunk(chunk_size); + + match chunk_size { + 0 | 1 => match item_puller.next() { + Some((idx, i)) => { + let first = map1(u, i); + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + return Some((idx, first)); + } + None => break, + }, + c => { + if c > chunk_puller.chunk_size() { + chunk_puller = iter.chunk_puller(c); + } + + match chunk_puller.pull_with_idx() { + Some((idx, mut chunk)) => { + if let Some(i) = chunk.next() { + let first = map1(u, i); + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + return Some((idx, first)); + } + } + None => break, + } + } + } + + runner.complete_chunk(shared_state, chunk_size); + } + + runner.complete_task(shared_state); + None +} + +pub fn x( + mut using: U, + mut runner: C, + iter: &I, + shared_state: &C::SharedState, + xap1: &X1, +) -> NextWithIdx +where + C: ThreadRunner, + I: ConcurrentIter, + Vo: Values, + X1: Fn(&mut U, I::Item) -> Vo, +{ + let u = &mut using; + let mut chunk_puller = iter.chunk_puller(0); + let mut item_puller = iter.item_puller_with_idx(); + + loop { + let chunk_size = runner.next_chunk_size(shared_state, iter); + + runner.begin_chunk(chunk_size); + + match chunk_size { + 0 | 1 => match item_puller.next() { + Some((idx, i)) => { + let vt = xap1(u, i); + match vt.next() { + Next::Done { value } => { + if let Some(value) = value { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + return NextWithIdx::Found { idx, value }; + } + } + Next::StoppedByError { error } => { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + return NextWithIdx::StoppedByError { error }; + } + Next::StoppedByWhileCondition => { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + return NextWithIdx::StoppedByWhileCondition { idx }; + } + } + } + None => break, + }, + c => { + if c > chunk_puller.chunk_size() { + chunk_puller = iter.chunk_puller(c); + } + + match chunk_puller.pull_with_idx() { + Some((idx, chunk)) => { + for i in chunk { + let vt = xap1(u, i); + match vt.next() { + Next::Done { value } => { + if let Some(value) = value { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + return NextWithIdx::Found { idx, value }; + } + } + Next::StoppedByError { error } => { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + return NextWithIdx::StoppedByError { error }; + } + Next::StoppedByWhileCondition => { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + return NextWithIdx::StoppedByWhileCondition { idx }; + } + } + } + } + None => break, + } + } + } + + runner.complete_chunk(shared_state, chunk_size); + } + + runner.complete_task(shared_state); + NextWithIdx::NotFound +} From 2374c70662eb57af474e86eb00fc119be15b9107 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 06:30:26 +0200 Subject: [PATCH 152/264] thread next-any computation for using --- .../runner/thread_runner_compute/next_any.rs | 163 ++++++++++++++++++ 1 file changed, 163 insertions(+) diff --git a/src/using/runner/thread_runner_compute/next_any.rs b/src/using/runner/thread_runner_compute/next_any.rs index e69de29..c44d320 100644 --- a/src/using/runner/thread_runner_compute/next_any.rs +++ b/src/using/runner/thread_runner_compute/next_any.rs @@ -0,0 +1,163 @@ +use crate::{ + ThreadRunner, + generic_values::Values, + generic_values::runner_results::{Fallibility, Next}, +}; +use orx_concurrent_iter::{ChunkPuller, ConcurrentIter}; + +pub fn m( + mut using: U, + mut runner: C, + iter: &I, + shared_state: &C::SharedState, + map1: &M1, +) -> Option +where + C: ThreadRunner, + I: ConcurrentIter, + O: Send, + M1: Fn(&mut U, I::Item) -> O, +{ + let u = &mut using; + let mut chunk_puller = iter.chunk_puller(0); + let mut item_puller = iter.item_puller(); + + loop { + let chunk_size = runner.next_chunk_size(shared_state, iter); + + runner.begin_chunk(chunk_size); + + match chunk_size { + 0 | 1 => match item_puller.next() { + Some(i) => { + let first = map1(u, i); + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + return Some(first); + } + None => break, + }, + c => { + if c > chunk_puller.chunk_size() { + chunk_puller = iter.chunk_puller(c); + } + + match chunk_puller.pull() { + Some(mut chunk) => { + if let Some(i) = chunk.next() { + let first = map1(u, i); + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + return Some(first); + } + } + None => break, + } + } + } + + runner.complete_chunk(shared_state, chunk_size); + } + + runner.complete_task(shared_state); + None +} + +pub fn x( + mut using: U, + mut runner: C, + iter: &I, + shared_state: &C::SharedState, + xap1: &X1, +) -> Result, ::Error> +where + C: ThreadRunner, + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + X1: Fn(&mut U, I::Item) -> Vo, +{ + let u = &mut using; + let mut chunk_puller = iter.chunk_puller(0); + let mut item_puller = iter.item_puller(); + + loop { + let chunk_size = runner.next_chunk_size(shared_state, iter); + + runner.begin_chunk(chunk_size); + + match chunk_size { + 0 | 1 => match item_puller.next() { + Some(i) => { + let vt = xap1(u, i); + match vt.next() { + Next::Done { value } => { + if let Some(value) = value { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + return Ok(Some(value)); + } + } + Next::StoppedByError { error } => { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + return Err(error); + } + Next::StoppedByWhileCondition => { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + return Ok(None); + } + } + } + None => break, + }, + c => { + if c > chunk_puller.chunk_size() { + chunk_puller = iter.chunk_puller(c); + } + + match chunk_puller.pull() { + Some(chunk) => { + for i in chunk { + let vt = xap1(u, i); + match vt.next() { + Next::Done { value } => { + if let Some(value) = value { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + return Ok(Some(value)); + } + } + Next::StoppedByError { error } => { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + return Err(error); + } + Next::StoppedByWhileCondition => { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + return Ok(None); + } + } + } + } + None => break, + } + } + } + + runner.complete_chunk(shared_state, chunk_size); + } + + runner.complete_task(shared_state); + Ok(None) +} From cb9f7a5cad4d2bf12b34e188b1cff0fae11b23a9 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 06:31:41 +0200 Subject: [PATCH 153/264] collect_ordered thread computation for using --- .../thread_runner_compute/collect_ordered.rs | 146 ++++++++++++++++++ 1 file changed, 146 insertions(+) diff --git a/src/using/runner/thread_runner_compute/collect_ordered.rs b/src/using/runner/thread_runner_compute/collect_ordered.rs index e69de29..e6716c6 100644 --- a/src/using/runner/thread_runner_compute/collect_ordered.rs +++ b/src/using/runner/thread_runner_compute/collect_ordered.rs @@ -0,0 +1,146 @@ +use crate::ThreadRunner; +use crate::generic_values::Values; +use crate::generic_values::runner_results::{StopWithIdx, ThreadCollect}; +use alloc::vec::Vec; +use orx_concurrent_iter::{ChunkPuller, ConcurrentIter}; +use orx_concurrent_ordered_bag::ConcurrentOrderedBag; +use orx_fixed_vec::IntoConcurrentPinnedVec; + +pub fn m( + mut using: U, + mut runner: C, + iter: &I, + shared_state: &C::SharedState, + map1: &M1, + o_bag: &ConcurrentOrderedBag, + offset: usize, +) where + C: ThreadRunner, + I: ConcurrentIter, + M1: Fn(&mut U, I::Item) -> O, + P: IntoConcurrentPinnedVec, + O: Send, +{ + let u = &mut using; + let mut chunk_puller = iter.chunk_puller(0); + let mut item_puller = iter.item_puller_with_idx(); + + loop { + let chunk_size = runner.next_chunk_size(shared_state, iter); + runner.begin_chunk(chunk_size); + + match chunk_size { + 0 | 1 => match item_puller.next() { + Some((idx, value)) => unsafe { o_bag.set_value(offset + idx, map1(u, value)) }, + None => break, + }, + c => { + if c > chunk_puller.chunk_size() { + chunk_puller = iter.chunk_puller(c); + } + + match chunk_puller.pull_with_idx() { + Some((begin_idx, chunk)) => { + let values = chunk.map(|x| map1(u, x)); + unsafe { o_bag.set_values(offset + begin_idx, values) }; + } + None => break, + } + } + } + + runner.complete_chunk(shared_state, chunk_size); + } + + runner.complete_task(shared_state); +} + +pub fn x( + mut using: U, + mut runner: C, + iter: &I, + shared_state: &C::SharedState, + xap1: &X1, +) -> ThreadCollect +where + C: ThreadRunner, + I: ConcurrentIter, + Vo: Values, + X1: Fn(&mut U, I::Item) -> Vo, +{ + let u = &mut using; + let mut collected = Vec::new(); + let out_vec = &mut collected; + + let mut chunk_puller = iter.chunk_puller(0); + let mut item_puller = iter.item_puller_with_idx(); + + loop { + let chunk_size = runner.next_chunk_size(shared_state, iter); + + runner.begin_chunk(chunk_size); + + match chunk_size { + 0 | 1 => match item_puller.next() { + Some((idx, i)) => { + let vo = xap1(u, i); + let done = vo.push_to_vec_with_idx(idx, out_vec); + if let Some(stop) = Vo::ordered_push_to_stop(done) { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + match stop { + StopWithIdx::DueToWhile { idx } => { + return ThreadCollect::StoppedByWhileCondition { + vec: collected, + stopped_idx: idx, + }; + } + StopWithIdx::DueToError { idx: _, error } => { + return ThreadCollect::StoppedByError { error }; + } + } + } + } + None => break, + }, + c => { + if c > chunk_puller.chunk_size() { + chunk_puller = iter.chunk_puller(c); + } + + match chunk_puller.pull_with_idx() { + Some((chunk_begin_idx, chunk)) => { + for (within_chunk_idx, value) in chunk.enumerate() { + let vo = xap1(u, value); + let done = vo.push_to_vec_with_idx(chunk_begin_idx, out_vec); + if let Some(stop) = Vo::ordered_push_to_stop(done) { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + match stop { + StopWithIdx::DueToWhile { idx } => { + return ThreadCollect::StoppedByWhileCondition { + vec: collected, + stopped_idx: idx + within_chunk_idx, + }; + } + StopWithIdx::DueToError { idx: _, error } => { + return ThreadCollect::StoppedByError { error }; + } + } + } + } + } + None => break, + } + } + } + + runner.complete_chunk(shared_state, chunk_size); + } + + runner.complete_task(shared_state); + + ThreadCollect::AllCollected { vec: collected } +} From fef7a0723f7b46bdaab33354721bbaac29f3fb04 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 06:33:01 +0200 Subject: [PATCH 154/264] collect_arbitrary thread computation of using --- .../collect_arbitrary.rs | 144 ++++++++++++++++++ 1 file changed, 144 insertions(+) diff --git a/src/using/runner/thread_runner_compute/collect_arbitrary.rs b/src/using/runner/thread_runner_compute/collect_arbitrary.rs index e69de29..bcba9ca 100644 --- a/src/using/runner/thread_runner_compute/collect_arbitrary.rs +++ b/src/using/runner/thread_runner_compute/collect_arbitrary.rs @@ -0,0 +1,144 @@ +use crate::ThreadRunner; +use crate::generic_values::Values; +use crate::generic_values::runner_results::{Stop, ThreadCollectArbitrary}; +use orx_concurrent_bag::ConcurrentBag; +use orx_concurrent_iter::{ChunkPuller, ConcurrentIter}; +use orx_fixed_vec::IntoConcurrentPinnedVec; + +// m + +#[cfg(test)] +pub fn m( + mut using: U, + mut runner: C, + iter: &I, + shared_state: &C::SharedState, + map1: &M1, + bag: &ConcurrentBag, +) where + C: ThreadRunner, + I: ConcurrentIter, + M1: Fn(&mut U, I::Item) -> O, + P: IntoConcurrentPinnedVec, + O: Send, +{ + let u = &mut using; + let mut chunk_puller = iter.chunk_puller(0); + let mut item_puller = iter.item_puller(); + + loop { + let chunk_size = runner.next_chunk_size(shared_state, iter); + + runner.begin_chunk(chunk_size); + + match chunk_size { + 0 | 1 => match item_puller.next() { + Some(value) => _ = bag.push(map1(u, value)), + None => break, + }, + c => { + if c > chunk_puller.chunk_size() { + chunk_puller = iter.chunk_puller(c); + } + + match chunk_puller.pull() { + Some(chunk) => _ = bag.extend(chunk.map(|x| map1(u, x))), + None => break, + } + } + } + + runner.complete_chunk(shared_state, chunk_size); + } + + runner.complete_task(shared_state); +} + +// x + +pub fn x( + mut using: U, + mut runner: C, + iter: &I, + shared_state: &C::SharedState, + xap1: &X1, + bag: &ConcurrentBag, +) -> ThreadCollectArbitrary +where + C: ThreadRunner, + I: ConcurrentIter, + Vo: Values, + X1: Fn(&mut U, I::Item) -> Vo, + P: IntoConcurrentPinnedVec, + Vo::Item: Send, +{ + let u = &mut using; + let mut chunk_puller = iter.chunk_puller(0); + let mut item_puller = iter.item_puller(); + + loop { + let chunk_size = runner.next_chunk_size(shared_state, iter); + + runner.begin_chunk(chunk_size); + + match chunk_size { + 0 | 1 => match item_puller.next() { + Some(value) => { + // TODO: possible to try to get len and bag.extend(values_vt.values()) when available, same holds for chunk below + let vo = xap1(u, value); + let done = vo.push_to_bag(bag); + + if let Some(stop) = Vo::arbitrary_push_to_stop(done) { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + match stop { + Stop::DueToWhile => { + return ThreadCollectArbitrary::StoppedByWhileCondition; + } + Stop::DueToError { error } => { + return ThreadCollectArbitrary::StoppedByError { error }; + } + } + } + } + None => break, + }, + c => { + if c > chunk_puller.chunk_size() { + chunk_puller = iter.chunk_puller(c); + } + + match chunk_puller.pull() { + Some(chunk) => { + for value in chunk { + let vo = xap1(u, value); + let done = vo.push_to_bag(bag); + + if let Some(stop) = Vo::arbitrary_push_to_stop(done) { + iter.skip_to_end(); + runner.complete_chunk(shared_state, chunk_size); + runner.complete_task(shared_state); + match stop { + Stop::DueToWhile => { + return ThreadCollectArbitrary::StoppedByWhileCondition; + } + Stop::DueToError { error } => { + return ThreadCollectArbitrary::StoppedByError { error }; + } + } + } + } + } + None => break, + } + } + } + + runner.complete_chunk(shared_state, chunk_size); + } + + runner.complete_task(shared_state); + + ThreadCollectArbitrary::AllCollected +} From 05d9f8f8d2d3a12e5c575f0e211cb9f8b7b6ba88 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 06:37:09 +0200 Subject: [PATCH 155/264] wip --- .../collect_arbitrary.rs | 0 .../collect_ordered.rs | 0 .../runner/parallel_runner_compute/mod.rs | 6 +- .../runner/parallel_runner_compute/next.rs | 0 .../parallel_runner_compute/next_any.rs | 0 .../runner/parallel_runner_compute/reduce.rs | 74 +++++++++++++++++++ 6 files changed, 79 insertions(+), 1 deletion(-) create mode 100644 src/using/runner/parallel_runner_compute/collect_arbitrary.rs create mode 100644 src/using/runner/parallel_runner_compute/collect_ordered.rs create mode 100644 src/using/runner/parallel_runner_compute/next.rs create mode 100644 src/using/runner/parallel_runner_compute/next_any.rs create mode 100644 src/using/runner/parallel_runner_compute/reduce.rs diff --git a/src/using/runner/parallel_runner_compute/collect_arbitrary.rs b/src/using/runner/parallel_runner_compute/collect_arbitrary.rs new file mode 100644 index 0000000..e69de29 diff --git a/src/using/runner/parallel_runner_compute/collect_ordered.rs b/src/using/runner/parallel_runner_compute/collect_ordered.rs new file mode 100644 index 0000000..e69de29 diff --git a/src/using/runner/parallel_runner_compute/mod.rs b/src/using/runner/parallel_runner_compute/mod.rs index 8b13789..6ccd3f7 100644 --- a/src/using/runner/parallel_runner_compute/mod.rs +++ b/src/using/runner/parallel_runner_compute/mod.rs @@ -1 +1,5 @@ - +pub(crate) mod collect_arbitrary; +pub(crate) mod collect_ordered; +pub(crate) mod next; +pub(crate) mod next_any; +pub(crate) mod reduce; diff --git a/src/using/runner/parallel_runner_compute/next.rs b/src/using/runner/parallel_runner_compute/next.rs new file mode 100644 index 0000000..e69de29 diff --git a/src/using/runner/parallel_runner_compute/next_any.rs b/src/using/runner/parallel_runner_compute/next_any.rs new file mode 100644 index 0000000..e69de29 diff --git a/src/using/runner/parallel_runner_compute/reduce.rs b/src/using/runner/parallel_runner_compute/reduce.rs new file mode 100644 index 0000000..c517c41 --- /dev/null +++ b/src/using/runner/parallel_runner_compute/reduce.rs @@ -0,0 +1,74 @@ +use crate::Params; +use crate::generic_values::Values; +use crate::generic_values::runner_results::Fallibility; +use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; +use crate::runner::ComputationKind; +use crate::using::runner::thread_runner_compute as th; +use crate::using::using_variants::Using; +use orx_concurrent_iter::ConcurrentIter; + +// // m + +// pub fn m( +// using: U, +// mut orchestrator: C, +// params: Params, +// iter: I, +// map1: M1, +// reduce: Red, +// ) -> (NumSpawned, Option) +// where +// U: Using, +// C: Orchestrator, +// I: ConcurrentIter, +// M1: Fn(I::Item) -> O + Sync, +// Red: Fn(O, O) -> O + Sync, +// O: Send, +// { +// let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { +// Ok(th::reduce::m(thread_runner, iter, state, &map1, &reduce)) +// }; +// let (num_spawned, result) = +// orchestrator.map_infallible(params, iter, ComputationKind::Collect, thread_map); + +// let acc = match result { +// Ok(results) => results.into_iter().filter_map(|x| x).reduce(reduce), +// }; + +// (num_spawned, acc) +// } + +// // x + +// type ResultReduce = +// Result::Item>, <::Fallibility as Fallibility>::Error>; + +// pub fn x( +// using: U, +// mut orchestrator: C, +// params: Params, +// iter: I, +// xap1: X1, +// reduce: Red, +// ) -> (NumSpawned, ResultReduce) +// where +// U: Using, +// C: Orchestrator, +// I: ConcurrentIter, +// Vo: Values, +// Vo::Item: Send, +// X1: Fn(I::Item) -> Vo + Sync, +// Red: Fn(Vo::Item, Vo::Item) -> Vo::Item + Sync, +// { +// let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { +// th::reduce::x(thread_runner, iter, state, &xap1, &reduce).into_result() +// }; +// let (num_spawned, result) = orchestrator.map_all::( +// params, +// iter, +// ComputationKind::Collect, +// thread_map, +// ); +// let acc = result.map(|results| results.into_iter().filter_map(|x| x).reduce(reduce)); +// (num_spawned, acc) +// } From 1eba058d6581db2da170b37a33790a78d30fb42f Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 06:42:16 +0200 Subject: [PATCH 156/264] pool run in scope takes ownership of work --- src/orch/implementations/default_std_orchestrator.rs | 6 +++--- src/orch/implementations/rayon.rs | 12 ++++++------ src/orch/implementations/scoped_threadpool.rs | 8 ++++---- src/orch/par_thread_pool.rs | 4 ++-- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/orch/implementations/default_std_orchestrator.rs b/src/orch/implementations/default_std_orchestrator.rs index 7380c97..1f43f5b 100644 --- a/src/orch/implementations/default_std_orchestrator.rs +++ b/src/orch/implementations/default_std_orchestrator.rs @@ -56,13 +56,13 @@ impl ParThreadPool for StdDefaultPool { std::thread::scope(|s| f(&s)) } - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) where 'scope: 's, 'env: 'scope + 's, - W: Fn() + Sync + 'scope + 'env, + W: Fn() + Send + 'scope + 'env, { - s.spawn(|| work()); + s.spawn(move || work()); } } diff --git a/src/orch/implementations/rayon.rs b/src/orch/implementations/rayon.rs index 59cb8db..99f43f8 100644 --- a/src/orch/implementations/rayon.rs +++ b/src/orch/implementations/rayon.rs @@ -15,13 +15,13 @@ impl ParThreadPool for ThreadPool { 'scope: 's, 'env: 'scope + 's; - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) where 'scope: 's, 'env: 'scope + 's, - W: Fn() + Sync + 'scope + 'env, + W: Fn() + Send + 'scope + 'env, { - s.spawn(|_| work()); + s.spawn(move |_| work()); } fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) @@ -44,13 +44,13 @@ impl<'a> ParThreadPool for &'a rayon::ThreadPool { 'scope: 's, 'env: 'scope + 's; - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) where 'scope: 's, 'env: 'scope + 's, - W: Fn() + Sync + 'scope + 'env, + W: Fn() + Send + 'scope + 'env, { - s.spawn(|_| work()); + s.spawn(move |_| work()); } fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) diff --git a/src/orch/implementations/scoped_threadpool.rs b/src/orch/implementations/scoped_threadpool.rs index 950d0e3..27311d4 100644 --- a/src/orch/implementations/scoped_threadpool.rs +++ b/src/orch/implementations/scoped_threadpool.rs @@ -15,11 +15,11 @@ impl ParThreadPool for Pool { 'scope: 's, 'env: 'scope + 's; - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) where 'scope: 's, 'env: 'scope + 's, - W: Fn() + Sync + 'scope + 'env, + W: Fn() + Send + 'scope + 'env, { s.execute(work); } @@ -44,11 +44,11 @@ impl<'a> ParThreadPool for &'a mut Pool { 'scope: 's, 'env: 'scope + 's; - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) where 'scope: 's, 'env: 'scope + 's, - W: Fn() + Sync + 'scope + 'env, + W: Fn() + Send + 'scope + 'env, { s.execute(work); } diff --git a/src/orch/par_thread_pool.rs b/src/orch/par_thread_pool.rs index 7b07f4a..670632b 100644 --- a/src/orch/par_thread_pool.rs +++ b/src/orch/par_thread_pool.rs @@ -9,11 +9,11 @@ pub trait ParThreadPool { 'scope: 's, 'env: 'scope + 's; - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) where 'scope: 's, 'env: 'scope + 's, - W: Fn() + Sync + 'scope + 'env; + W: Fn() + Send + 'scope + 'env; fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) where From ad1a973752004a18dd9f17fc9cf1bf0289f3a304 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 06:46:02 +0200 Subject: [PATCH 157/264] thread pool run closure uses num spawned before --- src/orch/orchestrator.rs | 6 +++--- src/orch/par_thread_pool.rs | 5 +++-- src/runner/parallel_runner_compute/collect_arbitrary.rs | 2 +- src/runner/parallel_runner_compute/collect_ordered.rs | 2 +- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index ed3378a..35b117d 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -36,13 +36,13 @@ pub trait Orchestrator { ) -> NumSpawned where I: ConcurrentIter, - F: Fn(&I, &SharedStateOf, ThreadRunnerOf) + Sync, + F: Fn(NumSpawned, &I, &SharedStateOf, ThreadRunnerOf) + Sync, { let runner = Self::new_runner(kind, params, iter.try_get_len()); let state = runner.new_shared_state(); let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); - let work = || { - thread_do(&iter, &state, runner.new_thread_runner(&state)); + let work = |num_spawned| { + thread_do(num_spawned, &iter, &state, runner.new_thread_runner(&state)); }; self.thread_pool_mut().run(do_spawn, work) } diff --git a/src/orch/par_thread_pool.rs b/src/orch/par_thread_pool.rs index 670632b..c9f7260 100644 --- a/src/orch/par_thread_pool.rs +++ b/src/orch/par_thread_pool.rs @@ -29,13 +29,14 @@ pub trait ParThreadPoolCompute: ParThreadPool { fn run(&mut self, do_spawn: S, thread_do: F) -> NumSpawned where S: Fn(NumSpawned) -> bool + Sync, - F: Fn() + Sync, + F: Fn(NumSpawned) + Sync, { + let thread_do = &thread_do; let mut nt = NumSpawned::zero(); self.scoped_computation(|s| { while do_spawn(nt) { nt.increment(); - Self::run_in_scope(&s, &thread_do); + Self::run_in_scope(&s, move || thread_do(nt)); } }); nt diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 62039f4..22eb55d 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -33,7 +33,7 @@ where None => bag.reserve_maximum_capacity(capacity_bound), }; - let thread_work = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_work = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { th::collect_arbitrary::m(thread_runner, iter, state, &map1, &bag); }; let num_spawned = orchestrator.run_all(params, iter, ComputationKind::Collect, thread_work); diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index f544a32..c676563 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -26,7 +26,7 @@ where let offset = pinned_vec.len(); let o_bag: ConcurrentOrderedBag = pinned_vec.into(); - let thread_do = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_do = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { th::collect_ordered::m(thread_runner, iter, state, &map1, &o_bag, offset); }; let num_spawned = orchestrator.run_all(params, iter, ComputationKind::Collect, thread_do); From baa911ecd5ad69da4318c4129b9831b3b787c4e5 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 06:48:32 +0200 Subject: [PATCH 158/264] pool map closure uses num spawned --- src/orch/orchestrator.rs | 9 ++++++--- src/orch/par_thread_pool.rs | 6 +++--- src/runner/parallel_runner_compute/collect_arbitrary.rs | 2 +- src/runner/parallel_runner_compute/collect_ordered.rs | 2 +- src/runner/parallel_runner_compute/next.rs | 4 ++-- src/runner/parallel_runner_compute/next_any.rs | 4 ++-- src/runner/parallel_runner_compute/reduce.rs | 4 ++-- 7 files changed, 17 insertions(+), 14 deletions(-) diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 35b117d..434bcbe 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -57,7 +57,8 @@ pub trait Orchestrator { where F: Fallibility, I: ConcurrentIter, - M: Fn(&I, &SharedStateOf, ThreadRunnerOf) -> Result + Sync, + M: Fn(NumSpawned, &I, &SharedStateOf, ThreadRunnerOf) -> Result + + Sync, T: Send, F::Error: Send, { @@ -65,7 +66,8 @@ pub trait Orchestrator { let runner = Self::new_runner(kind, params, iter_len); let state = runner.new_shared_state(); let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); - let work = || thread_map(&iter, &state, runner.new_thread_runner(&state)); + let work = + |num_spawned| thread_map(num_spawned, &iter, &state, runner.new_thread_runner(&state)); let max_num_threads = self.max_num_threads_for_computation(params, iter_len); self.thread_pool_mut() .map_all::(do_spawn, work, max_num_threads) @@ -80,7 +82,8 @@ pub trait Orchestrator { ) -> (NumSpawned, Result, Never>) where I: ConcurrentIter, - M: Fn(&I, &SharedStateOf, ThreadRunnerOf) -> Result + Sync, + M: Fn(NumSpawned, &I, &SharedStateOf, ThreadRunnerOf) -> Result + + Sync, T: Send, { self.map_all::(params, iter, kind, thread_map) diff --git a/src/orch/par_thread_pool.rs b/src/orch/par_thread_pool.rs index c9f7260..8209599 100644 --- a/src/orch/par_thread_pool.rs +++ b/src/orch/par_thread_pool.rs @@ -51,17 +51,17 @@ pub trait ParThreadPoolCompute: ParThreadPool { where F: Fallibility, S: Fn(NumSpawned) -> bool + Sync, - M: Fn() -> Result + Sync, + M: Fn(NumSpawned) -> Result + Sync, T: Send, F::Error: Send, { let mut nt = NumSpawned::zero(); let thread_results = ConcurrentBag::with_fixed_capacity(max_num_threads.into()); - let work = || _ = thread_results.push(thread_map()); + let work = |nt| _ = thread_results.push(thread_map(nt)); self.scoped_computation(|s| { while do_spawn(nt) { nt.increment(); - Self::run_in_scope(&s, &work); + Self::run_in_scope(&s, move || work(nt)); } }); diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 22eb55d..4db01d8 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -68,7 +68,7 @@ where None => bag.reserve_maximum_capacity(capacity_bound), }; - let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { th::collect_arbitrary::x(thread_runner, iter, state, &xap1, &bag).into_result() }; let (num_spawned, result) = orchestrator.map_all::( diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index c676563..b16ee0c 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -53,7 +53,7 @@ where X1: Fn(I::Item) -> Vo + Sync, P: IntoConcurrentPinnedVec, { - let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { th::collect_ordered::x(thread_runner, iter, state, &xap1).into_result() }; let (num_spawned, result) = orchestrator.map_all::( diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index 9081721..e2aaa7b 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -17,7 +17,7 @@ where O: Send, M1: Fn(I::Item) -> O + Sync, { - let thread_map = |iter: &I, state: &SharedStateOf, thread_runner| { + let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner| { Ok(th::next::m(thread_runner, iter, state, &map1)) }; let (num_spawned, result) = @@ -51,7 +51,7 @@ where Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, { - let thread_map = |iter: &I, state: &SharedStateOf, th_runner| match th::next::x( + let thread_map = |_, iter: &I, state: &SharedStateOf, th_runner| match th::next::x( th_runner, iter, state, &xap1, ) { NextWithIdx::Found { idx, value } => Ok(Some(NextSuccess::Found { idx, value })), diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index 8dedbba..f5f1514 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -17,7 +17,7 @@ where O: Send, M1: Fn(I::Item) -> O + Sync, { - let thread_map = |iter: &I, state: &SharedStateOf, thread_runner| { + let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner| { Ok(th::next_any::m(thread_runner, iter, state, &map1)) }; let (num_spawned, result) = @@ -45,7 +45,7 @@ where Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, { - let thread_map = |iter: &I, state: &SharedStateOf, th_runner| { + let thread_map = |_, iter: &I, state: &SharedStateOf, th_runner| { th::next_any::x(th_runner, iter, state, &xap1) }; let (num_spawned, result) = orchestrator.map_all::( diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index 15c072a..aa2c5ed 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -21,7 +21,7 @@ where Red: Fn(O, O) -> O + Sync, O: Send, { - let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { Ok(th::reduce::m(thread_runner, iter, state, &map1, &reduce)) }; let (num_spawned, result) = @@ -54,7 +54,7 @@ where X1: Fn(I::Item) -> Vo + Sync, Red: Fn(Vo::Item, Vo::Item) -> Vo::Item + Sync, { - let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { th::reduce::x(thread_runner, iter, state, &xap1, &reduce).into_result() }; let (num_spawned, result) = orchestrator.map_all::( From a9cee0d78f2bdfd4a6479716c5611c7b512c1abb Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 08:42:38 +0200 Subject: [PATCH 159/264] rvert numspawned in closure --- .../default_std_orchestrator.rs | 6 ++--- src/orch/implementations/rayon.rs | 12 +++++----- src/orch/implementations/scoped_threadpool.rs | 8 +++---- src/orch/orchestrator.rs | 15 +++++-------- src/orch/par_thread_pool.rs | 15 ++++++------- .../collect_arbitrary.rs | 4 ++-- .../collect_ordered.rs | 4 ++-- src/runner/parallel_runner_compute/next.rs | 4 ++-- .../parallel_runner_compute/next_any.rs | 4 ++-- src/runner/parallel_runner_compute/reduce.rs | 4 ++-- .../runner/parallel_runner_compute/reduce.rs | 22 ++++++++++++------- 11 files changed, 50 insertions(+), 48 deletions(-) diff --git a/src/orch/implementations/default_std_orchestrator.rs b/src/orch/implementations/default_std_orchestrator.rs index 1f43f5b..7380c97 100644 --- a/src/orch/implementations/default_std_orchestrator.rs +++ b/src/orch/implementations/default_std_orchestrator.rs @@ -56,13 +56,13 @@ impl ParThreadPool for StdDefaultPool { std::thread::scope(|s| f(&s)) } - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) where 'scope: 's, 'env: 'scope + 's, - W: Fn() + Send + 'scope + 'env, + W: Fn() + Sync + 'scope + 'env, { - s.spawn(move || work()); + s.spawn(|| work()); } } diff --git a/src/orch/implementations/rayon.rs b/src/orch/implementations/rayon.rs index 99f43f8..59cb8db 100644 --- a/src/orch/implementations/rayon.rs +++ b/src/orch/implementations/rayon.rs @@ -15,13 +15,13 @@ impl ParThreadPool for ThreadPool { 'scope: 's, 'env: 'scope + 's; - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) where 'scope: 's, 'env: 'scope + 's, - W: Fn() + Send + 'scope + 'env, + W: Fn() + Sync + 'scope + 'env, { - s.spawn(move |_| work()); + s.spawn(|_| work()); } fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) @@ -44,13 +44,13 @@ impl<'a> ParThreadPool for &'a rayon::ThreadPool { 'scope: 's, 'env: 'scope + 's; - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) where 'scope: 's, 'env: 'scope + 's, - W: Fn() + Send + 'scope + 'env, + W: Fn() + Sync + 'scope + 'env, { - s.spawn(move |_| work()); + s.spawn(|_| work()); } fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) diff --git a/src/orch/implementations/scoped_threadpool.rs b/src/orch/implementations/scoped_threadpool.rs index 27311d4..950d0e3 100644 --- a/src/orch/implementations/scoped_threadpool.rs +++ b/src/orch/implementations/scoped_threadpool.rs @@ -15,11 +15,11 @@ impl ParThreadPool for Pool { 'scope: 's, 'env: 'scope + 's; - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) where 'scope: 's, 'env: 'scope + 's, - W: Fn() + Send + 'scope + 'env, + W: Fn() + Sync + 'scope + 'env, { s.execute(work); } @@ -44,11 +44,11 @@ impl<'a> ParThreadPool for &'a mut Pool { 'scope: 's, 'env: 'scope + 's; - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) where 'scope: 's, 'env: 'scope + 's, - W: Fn() + Send + 'scope + 'env, + W: Fn() + Sync + 'scope + 'env, { s.execute(work); } diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 434bcbe..ed3378a 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -36,13 +36,13 @@ pub trait Orchestrator { ) -> NumSpawned where I: ConcurrentIter, - F: Fn(NumSpawned, &I, &SharedStateOf, ThreadRunnerOf) + Sync, + F: Fn(&I, &SharedStateOf, ThreadRunnerOf) + Sync, { let runner = Self::new_runner(kind, params, iter.try_get_len()); let state = runner.new_shared_state(); let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); - let work = |num_spawned| { - thread_do(num_spawned, &iter, &state, runner.new_thread_runner(&state)); + let work = || { + thread_do(&iter, &state, runner.new_thread_runner(&state)); }; self.thread_pool_mut().run(do_spawn, work) } @@ -57,8 +57,7 @@ pub trait Orchestrator { where F: Fallibility, I: ConcurrentIter, - M: Fn(NumSpawned, &I, &SharedStateOf, ThreadRunnerOf) -> Result - + Sync, + M: Fn(&I, &SharedStateOf, ThreadRunnerOf) -> Result + Sync, T: Send, F::Error: Send, { @@ -66,8 +65,7 @@ pub trait Orchestrator { let runner = Self::new_runner(kind, params, iter_len); let state = runner.new_shared_state(); let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); - let work = - |num_spawned| thread_map(num_spawned, &iter, &state, runner.new_thread_runner(&state)); + let work = || thread_map(&iter, &state, runner.new_thread_runner(&state)); let max_num_threads = self.max_num_threads_for_computation(params, iter_len); self.thread_pool_mut() .map_all::(do_spawn, work, max_num_threads) @@ -82,8 +80,7 @@ pub trait Orchestrator { ) -> (NumSpawned, Result, Never>) where I: ConcurrentIter, - M: Fn(NumSpawned, &I, &SharedStateOf, ThreadRunnerOf) -> Result - + Sync, + M: Fn(&I, &SharedStateOf, ThreadRunnerOf) -> Result + Sync, T: Send, { self.map_all::(params, iter, kind, thread_map) diff --git a/src/orch/par_thread_pool.rs b/src/orch/par_thread_pool.rs index 8209599..7b07f4a 100644 --- a/src/orch/par_thread_pool.rs +++ b/src/orch/par_thread_pool.rs @@ -9,11 +9,11 @@ pub trait ParThreadPool { 'scope: 's, 'env: 'scope + 's; - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) where 'scope: 's, 'env: 'scope + 's, - W: Fn() + Send + 'scope + 'env; + W: Fn() + Sync + 'scope + 'env; fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) where @@ -29,14 +29,13 @@ pub trait ParThreadPoolCompute: ParThreadPool { fn run(&mut self, do_spawn: S, thread_do: F) -> NumSpawned where S: Fn(NumSpawned) -> bool + Sync, - F: Fn(NumSpawned) + Sync, + F: Fn() + Sync, { - let thread_do = &thread_do; let mut nt = NumSpawned::zero(); self.scoped_computation(|s| { while do_spawn(nt) { nt.increment(); - Self::run_in_scope(&s, move || thread_do(nt)); + Self::run_in_scope(&s, &thread_do); } }); nt @@ -51,17 +50,17 @@ pub trait ParThreadPoolCompute: ParThreadPool { where F: Fallibility, S: Fn(NumSpawned) -> bool + Sync, - M: Fn(NumSpawned) -> Result + Sync, + M: Fn() -> Result + Sync, T: Send, F::Error: Send, { let mut nt = NumSpawned::zero(); let thread_results = ConcurrentBag::with_fixed_capacity(max_num_threads.into()); - let work = |nt| _ = thread_results.push(thread_map(nt)); + let work = || _ = thread_results.push(thread_map()); self.scoped_computation(|s| { while do_spawn(nt) { nt.increment(); - Self::run_in_scope(&s, move || work(nt)); + Self::run_in_scope(&s, &work); } }); diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 4db01d8..62039f4 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -33,7 +33,7 @@ where None => bag.reserve_maximum_capacity(capacity_bound), }; - let thread_work = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_work = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { th::collect_arbitrary::m(thread_runner, iter, state, &map1, &bag); }; let num_spawned = orchestrator.run_all(params, iter, ComputationKind::Collect, thread_work); @@ -68,7 +68,7 @@ where None => bag.reserve_maximum_capacity(capacity_bound), }; - let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { th::collect_arbitrary::x(thread_runner, iter, state, &xap1, &bag).into_result() }; let (num_spawned, result) = orchestrator.map_all::( diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index b16ee0c..f544a32 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -26,7 +26,7 @@ where let offset = pinned_vec.len(); let o_bag: ConcurrentOrderedBag = pinned_vec.into(); - let thread_do = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_do = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { th::collect_ordered::m(thread_runner, iter, state, &map1, &o_bag, offset); }; let num_spawned = orchestrator.run_all(params, iter, ComputationKind::Collect, thread_do); @@ -53,7 +53,7 @@ where X1: Fn(I::Item) -> Vo + Sync, P: IntoConcurrentPinnedVec, { - let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { th::collect_ordered::x(thread_runner, iter, state, &xap1).into_result() }; let (num_spawned, result) = orchestrator.map_all::( diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index e2aaa7b..9081721 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -17,7 +17,7 @@ where O: Send, M1: Fn(I::Item) -> O + Sync, { - let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner| { + let thread_map = |iter: &I, state: &SharedStateOf, thread_runner| { Ok(th::next::m(thread_runner, iter, state, &map1)) }; let (num_spawned, result) = @@ -51,7 +51,7 @@ where Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, { - let thread_map = |_, iter: &I, state: &SharedStateOf, th_runner| match th::next::x( + let thread_map = |iter: &I, state: &SharedStateOf, th_runner| match th::next::x( th_runner, iter, state, &xap1, ) { NextWithIdx::Found { idx, value } => Ok(Some(NextSuccess::Found { idx, value })), diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index f5f1514..8dedbba 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -17,7 +17,7 @@ where O: Send, M1: Fn(I::Item) -> O + Sync, { - let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner| { + let thread_map = |iter: &I, state: &SharedStateOf, thread_runner| { Ok(th::next_any::m(thread_runner, iter, state, &map1)) }; let (num_spawned, result) = @@ -45,7 +45,7 @@ where Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, { - let thread_map = |_, iter: &I, state: &SharedStateOf, th_runner| { + let thread_map = |iter: &I, state: &SharedStateOf, th_runner| { th::next_any::x(th_runner, iter, state, &xap1) }; let (num_spawned, result) = orchestrator.map_all::( diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index aa2c5ed..15c072a 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -21,7 +21,7 @@ where Red: Fn(O, O) -> O + Sync, O: Send, { - let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { Ok(th::reduce::m(thread_runner, iter, state, &map1, &reduce)) }; let (num_spawned, result) = @@ -54,7 +54,7 @@ where X1: Fn(I::Item) -> Vo + Sync, Red: Fn(Vo::Item, Vo::Item) -> Vo::Item + Sync, { - let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { th::reduce::x(thread_runner, iter, state, &xap1, &reduce).into_result() }; let (num_spawned, result) = orchestrator.map_all::( diff --git a/src/using/runner/parallel_runner_compute/reduce.rs b/src/using/runner/parallel_runner_compute/reduce.rs index c517c41..d5c34ec 100644 --- a/src/using/runner/parallel_runner_compute/reduce.rs +++ b/src/using/runner/parallel_runner_compute/reduce.rs @@ -7,10 +7,10 @@ use crate::using::runner::thread_runner_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; -// // m +// m // pub fn m( -// using: U, +// mut using: U, // mut orchestrator: C, // params: Params, // iter: I, @@ -21,18 +21,24 @@ use orx_concurrent_iter::ConcurrentIter; // U: Using, // C: Orchestrator, // I: ConcurrentIter, -// M1: Fn(I::Item) -> O + Sync, -// Red: Fn(O, O) -> O + Sync, +// M1: Fn(&mut U::Item, I::Item) -> O + Sync, +// Red: Fn(&mut U::Item, O, O) -> O + Sync, // O: Send, // { -// let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { -// Ok(th::reduce::m(thread_runner, iter, state, &map1, &reduce)) -// }; +// let thread_map = +// |nt: NumSpawned, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { +// let u = using.create(nt.into_inner()); +// Ok(th::reduce::m(u, thread_runner, iter, state, &map1, &reduce)) +// }; // let (num_spawned, result) = // orchestrator.map_infallible(params, iter, ComputationKind::Collect, thread_map); +// let mut u = using.into_inner(); // let acc = match result { -// Ok(results) => results.into_iter().filter_map(|x| x).reduce(reduce), +// Ok(results) => results +// .into_iter() +// .filter_map(|x| x) +// .reduce(|a, b| reduce(&mut u, a, b)), // }; // (num_spawned, acc) From 10294b26a6f2fe989900568306074bb7f1aa02ef Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 10:01:37 +0200 Subject: [PATCH 160/264] first working collect-arbitrary for map with mutable variable --- .../default_std_orchestrator.rs | 9 ++++ src/orch/implementations/rayon.rs | 18 +++++++ src/orch/implementations/scoped_threadpool.rs | 18 +++++++ src/orch/orchestrator.rs | 20 ++++++++ src/orch/par_thread_pool.rs | 29 ++++++++++- .../collect_arbitrary.rs | 51 +++++++++++++++++++ src/using/using_variants.rs | 14 ++--- 7 files changed, 151 insertions(+), 8 deletions(-) diff --git a/src/orch/implementations/default_std_orchestrator.rs b/src/orch/implementations/default_std_orchestrator.rs index 7380c97..fd0e584 100644 --- a/src/orch/implementations/default_std_orchestrator.rs +++ b/src/orch/implementations/default_std_orchestrator.rs @@ -64,6 +64,15 @@ impl ParThreadPool for StdDefaultPool { { s.spawn(|| work()); } + + fn run_in_scope2<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Send + 'scope + 'env, + { + s.spawn(move || work()); + } } // ORCH diff --git a/src/orch/implementations/rayon.rs b/src/orch/implementations/rayon.rs index 59cb8db..8267376 100644 --- a/src/orch/implementations/rayon.rs +++ b/src/orch/implementations/rayon.rs @@ -35,6 +35,15 @@ impl ParThreadPool for ThreadPool { fn max_num_threads(&self) -> NonZeroUsize { NonZeroUsize::new(self.current_num_threads().max(1)).expect(">0") } + + fn run_in_scope2<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Send + 'scope + 'env, + { + s.spawn(move |_| work()); + } } impl<'a> ParThreadPool for &'a rayon::ThreadPool { @@ -64,6 +73,15 @@ impl<'a> ParThreadPool for &'a rayon::ThreadPool { fn max_num_threads(&self) -> NonZeroUsize { NonZeroUsize::new(self.current_num_threads().max(1)).expect(">0") } + + fn run_in_scope2<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Send + 'scope + 'env, + { + s.spawn(move |_| work()); + } } // ORCH diff --git a/src/orch/implementations/scoped_threadpool.rs b/src/orch/implementations/scoped_threadpool.rs index 950d0e3..5974ac3 100644 --- a/src/orch/implementations/scoped_threadpool.rs +++ b/src/orch/implementations/scoped_threadpool.rs @@ -24,6 +24,15 @@ impl ParThreadPool for Pool { s.execute(work); } + fn run_in_scope2<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Send + 'scope + 'env, + { + s.execute(work); + } + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) where 'env: 'scope, @@ -53,6 +62,15 @@ impl<'a> ParThreadPool for &'a mut Pool { s.execute(work); } + fn run_in_scope2<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Send + 'scope + 'env, + { + s.execute(work); + } + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) where 'env: 'scope, diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index ed3378a..d545f89 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -102,6 +102,26 @@ pub trait Orchestrator { req.min(ava) } + + fn run_all_using( + &mut self, + params: Params, + iter: I, + kind: ComputationKind, + thread_do: F, + ) -> NumSpawned + where + I: ConcurrentIter, + F: Fn(NumSpawned, &I, &SharedStateOf, ThreadRunnerOf) + Sync, + { + let runner = Self::new_runner(kind, params, iter.try_get_len()); + let state = runner.new_shared_state(); + let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); + let work = |nt| { + thread_do(nt, &iter, &state, runner.new_thread_runner(&state)); + }; + self.thread_pool_mut().run_using(do_spawn, work) + } } pub(crate) type SharedStateOf = <::Runner as ParallelRunner>::SharedState; diff --git a/src/orch/par_thread_pool.rs b/src/orch/par_thread_pool.rs index 7b07f4a..c83a925 100644 --- a/src/orch/par_thread_pool.rs +++ b/src/orch/par_thread_pool.rs @@ -1,4 +1,6 @@ -use crate::{generic_values::runner_results::Fallibility, orch::num_spawned::NumSpawned}; +use crate::{ + generic_values::runner_results::Fallibility, orch::num_spawned::NumSpawned, using_old::Using, +}; use alloc::vec::Vec; use core::num::NonZeroUsize; use orx_concurrent_bag::ConcurrentBag; @@ -15,6 +17,12 @@ pub trait ParThreadPool { 'env: 'scope + 's, W: Fn() + Sync + 'scope + 'env; + fn run_in_scope2<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Send + 'scope + 'env; + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) where 'env: 'scope, @@ -69,6 +77,25 @@ pub trait ParThreadPoolCompute: ParThreadPool { (nt, result) } + + // using + + fn run_using(&mut self, do_spawn: S, thread_do: F) -> NumSpawned + where + S: Fn(NumSpawned) -> bool + Sync, + F: Fn(NumSpawned) + Sync, + { + let thread_do = &thread_do; + let mut nt = NumSpawned::zero(); + self.scoped_computation(|s| { + while do_spawn(nt) { + nt.increment(); + let work = move || thread_do(nt); + Self::run_in_scope2(&s, work); + } + }); + nt + } } impl ParThreadPoolCompute for X {} diff --git a/src/using/runner/parallel_runner_compute/collect_arbitrary.rs b/src/using/runner/parallel_runner_compute/collect_arbitrary.rs index e69de29..9b49e35 100644 --- a/src/using/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/using/runner/parallel_runner_compute/collect_arbitrary.rs @@ -0,0 +1,51 @@ +use crate::Params; +use crate::generic_values::Values; +use crate::generic_values::runner_results::ParallelCollectArbitrary; +use crate::orch::Orchestrator; +use crate::orch::{NumSpawned, SharedStateOf, ThreadRunnerOf}; +use crate::runner::ComputationKind; +use crate::using::runner::thread_runner_compute as th; +#[cfg(test)] +use crate::using::using_variants::Using; +use orx_concurrent_bag::ConcurrentBag; +use orx_concurrent_iter::ConcurrentIter; +use orx_fixed_vec::IntoConcurrentPinnedVec; + +// m + +#[cfg(test)] +pub fn m( + using: U, + mut orchestrator: C, + params: Params, + iter: I, + map1: M1, + pinned_vec: P, +) -> (NumSpawned, P) +where + U: Using + Sync, + U::Item: Send, + C: Orchestrator, + I: ConcurrentIter, + O: Send, + M1: Fn(&mut U::Item, I::Item) -> O + Sync, + P: IntoConcurrentPinnedVec, +{ + let capacity_bound = pinned_vec.capacity_bound(); + let offset = pinned_vec.len(); + let mut bag: ConcurrentBag = pinned_vec.into(); + match iter.try_get_len() { + Some(iter_len) => bag.reserve_maximum_capacity(offset + iter_len), + None => bag.reserve_maximum_capacity(capacity_bound), + }; + let thread_work = + |nt: NumSpawned, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let u = using.create(nt.into_inner()); + th::collect_arbitrary::m(u, thread_runner, iter, state, &map1, &bag); + }; + let num_spawned = + orchestrator.run_all_using(params, iter, ComputationKind::Collect, thread_work); + + let values = bag.into_inner(); + (num_spawned, values) +} diff --git a/src/using/using_variants.rs b/src/using/using_variants.rs index 5489d23..0f5a961 100644 --- a/src/using/using_variants.rs +++ b/src/using/using_variants.rs @@ -7,7 +7,7 @@ pub trait Using { type Item: Send + 'static; /// Creates an instance of the variable to be used by the `thread_idx`-th thread. - fn create(&mut self, thread_idx: usize) -> Self::Item; + fn create(&self, thread_idx: usize) -> Self::Item; /// Consumes self and creates exactly one instance of the variable. fn into_inner(self) -> Self::Item; @@ -25,7 +25,7 @@ impl UsingClone { impl Using for UsingClone { type Item = T; - fn create(&mut self, _: usize) -> T { + fn create(&self, _: usize) -> T { self.0.clone() } @@ -38,7 +38,7 @@ impl Using for UsingClone { pub struct UsingFun where T: Send + 'static, - F: FnMut(usize) -> T, + F: Fn(usize) -> T, { fun: F, } @@ -46,7 +46,7 @@ where impl UsingFun where T: Send + 'static, - F: FnMut(usize) -> T, + F: Fn(usize) -> T, { pub(crate) fn new(fun: F) -> Self { Self { fun } @@ -56,15 +56,15 @@ where impl Using for UsingFun where T: Send + 'static, - F: FnMut(usize) -> T, + F: Fn(usize) -> T, { type Item = T; - fn create(&mut self, thread_idx: usize) -> Self::Item { + fn create(&self, thread_idx: usize) -> Self::Item { (self.fun)(thread_idx) } - fn into_inner(mut self) -> Self::Item { + fn into_inner(self) -> Self::Item { (self.fun)(0) } } From 5fcd69a23505cb9072c4223df88c2c57eeb9c3ee Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 10:05:23 +0200 Subject: [PATCH 161/264] unify run in scope with owned work --- .../default_std_orchestrator.rs | 11 +------ src/orch/implementations/rayon.rs | 30 ++++--------------- src/orch/implementations/scoped_threadpool.rs | 22 ++------------ src/orch/par_thread_pool.rs | 10 ++----- 4 files changed, 11 insertions(+), 62 deletions(-) diff --git a/src/orch/implementations/default_std_orchestrator.rs b/src/orch/implementations/default_std_orchestrator.rs index fd0e584..1f43f5b 100644 --- a/src/orch/implementations/default_std_orchestrator.rs +++ b/src/orch/implementations/default_std_orchestrator.rs @@ -56,16 +56,7 @@ impl ParThreadPool for StdDefaultPool { std::thread::scope(|s| f(&s)) } - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) - where - 'scope: 's, - 'env: 'scope + 's, - W: Fn() + Sync + 'scope + 'env, - { - s.spawn(|| work()); - } - - fn run_in_scope2<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) where 'scope: 's, 'env: 'scope + 's, diff --git a/src/orch/implementations/rayon.rs b/src/orch/implementations/rayon.rs index 8267376..99f43f8 100644 --- a/src/orch/implementations/rayon.rs +++ b/src/orch/implementations/rayon.rs @@ -15,13 +15,13 @@ impl ParThreadPool for ThreadPool { 'scope: 's, 'env: 'scope + 's; - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) where 'scope: 's, 'env: 'scope + 's, - W: Fn() + Sync + 'scope + 'env, + W: Fn() + Send + 'scope + 'env, { - s.spawn(|_| work()); + s.spawn(move |_| work()); } fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) @@ -35,15 +35,6 @@ impl ParThreadPool for ThreadPool { fn max_num_threads(&self) -> NonZeroUsize { NonZeroUsize::new(self.current_num_threads().max(1)).expect(">0") } - - fn run_in_scope2<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) - where - 'scope: 's, - 'env: 'scope + 's, - W: Fn() + Send + 'scope + 'env, - { - s.spawn(move |_| work()); - } } impl<'a> ParThreadPool for &'a rayon::ThreadPool { @@ -53,13 +44,13 @@ impl<'a> ParThreadPool for &'a rayon::ThreadPool { 'scope: 's, 'env: 'scope + 's; - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) where 'scope: 's, 'env: 'scope + 's, - W: Fn() + Sync + 'scope + 'env, + W: Fn() + Send + 'scope + 'env, { - s.spawn(|_| work()); + s.spawn(move |_| work()); } fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) @@ -73,15 +64,6 @@ impl<'a> ParThreadPool for &'a rayon::ThreadPool { fn max_num_threads(&self) -> NonZeroUsize { NonZeroUsize::new(self.current_num_threads().max(1)).expect(">0") } - - fn run_in_scope2<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) - where - 'scope: 's, - 'env: 'scope + 's, - W: Fn() + Send + 'scope + 'env, - { - s.spawn(move |_| work()); - } } // ORCH diff --git a/src/orch/implementations/scoped_threadpool.rs b/src/orch/implementations/scoped_threadpool.rs index 5974ac3..27311d4 100644 --- a/src/orch/implementations/scoped_threadpool.rs +++ b/src/orch/implementations/scoped_threadpool.rs @@ -15,16 +15,7 @@ impl ParThreadPool for Pool { 'scope: 's, 'env: 'scope + 's; - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) - where - 'scope: 's, - 'env: 'scope + 's, - W: Fn() + Sync + 'scope + 'env, - { - s.execute(work); - } - - fn run_in_scope2<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) where 'scope: 's, 'env: 'scope + 's, @@ -53,16 +44,7 @@ impl<'a> ParThreadPool for &'a mut Pool { 'scope: 's, 'env: 'scope + 's; - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) - where - 'scope: 's, - 'env: 'scope + 's, - W: Fn() + Sync + 'scope + 'env, - { - s.execute(work); - } - - fn run_in_scope2<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) where 'scope: 's, 'env: 'scope + 's, diff --git a/src/orch/par_thread_pool.rs b/src/orch/par_thread_pool.rs index c83a925..e1806a8 100644 --- a/src/orch/par_thread_pool.rs +++ b/src/orch/par_thread_pool.rs @@ -11,13 +11,7 @@ pub trait ParThreadPool { 'scope: 's, 'env: 'scope + 's; - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: &'env W) - where - 'scope: 's, - 'env: 'scope + 's, - W: Fn() + Sync + 'scope + 'env; - - fn run_in_scope2<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) where 'scope: 's, 'env: 'scope + 's, @@ -91,7 +85,7 @@ pub trait ParThreadPoolCompute: ParThreadPool { while do_spawn(nt) { nt.increment(); let work = move || thread_do(nt); - Self::run_in_scope2(&s, work); + Self::run_in_scope(&s, work); } }); nt From 5cd64245985ed89dd53202ecfc19370a49a81e17 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 10:07:29 +0200 Subject: [PATCH 162/264] unify pool run fn for both regular and using computations --- src/orch/orchestrator.rs | 12 +++++------ src/orch/par_thread_pool.rs | 21 ++----------------- .../collect_arbitrary.rs | 2 +- .../collect_ordered.rs | 2 +- 4 files changed, 10 insertions(+), 27 deletions(-) diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index d545f89..4616621 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -36,15 +36,15 @@ pub trait Orchestrator { ) -> NumSpawned where I: ConcurrentIter, - F: Fn(&I, &SharedStateOf, ThreadRunnerOf) + Sync, + F: Fn(NumSpawned, &I, &SharedStateOf, ThreadRunnerOf) + Sync, { let runner = Self::new_runner(kind, params, iter.try_get_len()); let state = runner.new_shared_state(); let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); - let work = || { - thread_do(&iter, &state, runner.new_thread_runner(&state)); + let work = |num_spawned| { + thread_do(num_spawned, &iter, &state, runner.new_thread_runner(&state)); }; - self.thread_pool_mut().run(do_spawn, work) + self.thread_pool_mut().run_in_pool(do_spawn, work) } fn map_all( @@ -68,7 +68,7 @@ pub trait Orchestrator { let work = || thread_map(&iter, &state, runner.new_thread_runner(&state)); let max_num_threads = self.max_num_threads_for_computation(params, iter_len); self.thread_pool_mut() - .map_all::(do_spawn, work, max_num_threads) + .map_in_pool::(do_spawn, work, max_num_threads) } fn map_infallible( @@ -120,7 +120,7 @@ pub trait Orchestrator { let work = |nt| { thread_do(nt, &iter, &state, runner.new_thread_runner(&state)); }; - self.thread_pool_mut().run_using(do_spawn, work) + self.thread_pool_mut().run_in_pool(do_spawn, work) } } diff --git a/src/orch/par_thread_pool.rs b/src/orch/par_thread_pool.rs index e1806a8..571f612 100644 --- a/src/orch/par_thread_pool.rs +++ b/src/orch/par_thread_pool.rs @@ -28,22 +28,7 @@ pub trait ParThreadPool { // derived pub trait ParThreadPoolCompute: ParThreadPool { - fn run(&mut self, do_spawn: S, thread_do: F) -> NumSpawned - where - S: Fn(NumSpawned) -> bool + Sync, - F: Fn() + Sync, - { - let mut nt = NumSpawned::zero(); - self.scoped_computation(|s| { - while do_spawn(nt) { - nt.increment(); - Self::run_in_scope(&s, &thread_do); - } - }); - nt - } - - fn map_all( + fn map_in_pool( &mut self, do_spawn: S, thread_map: M, @@ -72,9 +57,7 @@ pub trait ParThreadPoolCompute: ParThreadPool { (nt, result) } - // using - - fn run_using(&mut self, do_spawn: S, thread_do: F) -> NumSpawned + fn run_in_pool(&mut self, do_spawn: S, thread_do: F) -> NumSpawned where S: Fn(NumSpawned) -> bool + Sync, F: Fn(NumSpawned) + Sync, diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 62039f4..22eb55d 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -33,7 +33,7 @@ where None => bag.reserve_maximum_capacity(capacity_bound), }; - let thread_work = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_work = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { th::collect_arbitrary::m(thread_runner, iter, state, &map1, &bag); }; let num_spawned = orchestrator.run_all(params, iter, ComputationKind::Collect, thread_work); diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index f544a32..c676563 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -26,7 +26,7 @@ where let offset = pinned_vec.len(); let o_bag: ConcurrentOrderedBag = pinned_vec.into(); - let thread_do = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_do = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { th::collect_ordered::m(thread_runner, iter, state, &map1, &o_bag, offset); }; let num_spawned = orchestrator.run_all(params, iter, ComputationKind::Collect, thread_do); From 22cdbae3e6591dfe0e6630ad00f706b6994156db Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 10:14:44 +0200 Subject: [PATCH 163/264] unify map_in_pool for using and regular computations --- src/orch/orchestrator.rs | 8 +++++--- src/orch/par_thread_pool.rs | 10 +++++++--- .../parallel_runner_compute/collect_arbitrary.rs | 2 +- src/runner/parallel_runner_compute/collect_ordered.rs | 2 +- src/runner/parallel_runner_compute/next.rs | 4 ++-- src/runner/parallel_runner_compute/next_any.rs | 4 ++-- src/runner/parallel_runner_compute/reduce.rs | 4 ++-- 7 files changed, 20 insertions(+), 14 deletions(-) diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 4616621..66b5b01 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -57,7 +57,8 @@ pub trait Orchestrator { where F: Fallibility, I: ConcurrentIter, - M: Fn(&I, &SharedStateOf, ThreadRunnerOf) -> Result + Sync, + M: Fn(NumSpawned, &I, &SharedStateOf, ThreadRunnerOf) -> Result + + Sync, T: Send, F::Error: Send, { @@ -65,7 +66,7 @@ pub trait Orchestrator { let runner = Self::new_runner(kind, params, iter_len); let state = runner.new_shared_state(); let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); - let work = || thread_map(&iter, &state, runner.new_thread_runner(&state)); + let work = |nt| thread_map(nt, &iter, &state, runner.new_thread_runner(&state)); let max_num_threads = self.max_num_threads_for_computation(params, iter_len); self.thread_pool_mut() .map_in_pool::(do_spawn, work, max_num_threads) @@ -80,7 +81,8 @@ pub trait Orchestrator { ) -> (NumSpawned, Result, Never>) where I: ConcurrentIter, - M: Fn(&I, &SharedStateOf, ThreadRunnerOf) -> Result + Sync, + M: Fn(NumSpawned, &I, &SharedStateOf, ThreadRunnerOf) -> Result + + Sync, T: Send, { self.map_all::(params, iter, kind, thread_map) diff --git a/src/orch/par_thread_pool.rs b/src/orch/par_thread_pool.rs index 571f612..fcaf71c 100644 --- a/src/orch/par_thread_pool.rs +++ b/src/orch/par_thread_pool.rs @@ -37,17 +37,21 @@ pub trait ParThreadPoolCompute: ParThreadPool { where F: Fallibility, S: Fn(NumSpawned) -> bool + Sync, - M: Fn() -> Result + Sync, + M: Fn(NumSpawned) -> Result + Sync, T: Send, F::Error: Send, { + let thread_map = &thread_map; let mut nt = NumSpawned::zero(); let thread_results = ConcurrentBag::with_fixed_capacity(max_num_threads.into()); - let work = || _ = thread_results.push(thread_map()); + let bag = &thread_results; self.scoped_computation(|s| { while do_spawn(nt) { nt.increment(); - Self::run_in_scope(&s, &work); + let work = move || { + bag.push(thread_map(nt)); + }; + Self::run_in_scope(&s, work); } }); diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 22eb55d..4db01d8 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -68,7 +68,7 @@ where None => bag.reserve_maximum_capacity(capacity_bound), }; - let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { th::collect_arbitrary::x(thread_runner, iter, state, &xap1, &bag).into_result() }; let (num_spawned, result) = orchestrator.map_all::( diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index c676563..b16ee0c 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -53,7 +53,7 @@ where X1: Fn(I::Item) -> Vo + Sync, P: IntoConcurrentPinnedVec, { - let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { th::collect_ordered::x(thread_runner, iter, state, &xap1).into_result() }; let (num_spawned, result) = orchestrator.map_all::( diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_runner_compute/next.rs index 9081721..e2aaa7b 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_runner_compute/next.rs @@ -17,7 +17,7 @@ where O: Send, M1: Fn(I::Item) -> O + Sync, { - let thread_map = |iter: &I, state: &SharedStateOf, thread_runner| { + let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner| { Ok(th::next::m(thread_runner, iter, state, &map1)) }; let (num_spawned, result) = @@ -51,7 +51,7 @@ where Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, { - let thread_map = |iter: &I, state: &SharedStateOf, th_runner| match th::next::x( + let thread_map = |_, iter: &I, state: &SharedStateOf, th_runner| match th::next::x( th_runner, iter, state, &xap1, ) { NextWithIdx::Found { idx, value } => Ok(Some(NextSuccess::Found { idx, value })), diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_runner_compute/next_any.rs index 8dedbba..f5f1514 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_runner_compute/next_any.rs @@ -17,7 +17,7 @@ where O: Send, M1: Fn(I::Item) -> O + Sync, { - let thread_map = |iter: &I, state: &SharedStateOf, thread_runner| { + let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner| { Ok(th::next_any::m(thread_runner, iter, state, &map1)) }; let (num_spawned, result) = @@ -45,7 +45,7 @@ where Vo::Item: Send, X1: Fn(I::Item) -> Vo + Sync, { - let thread_map = |iter: &I, state: &SharedStateOf, th_runner| { + let thread_map = |_, iter: &I, state: &SharedStateOf, th_runner| { th::next_any::x(th_runner, iter, state, &xap1) }; let (num_spawned, result) = orchestrator.map_all::( diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index 15c072a..aa2c5ed 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -21,7 +21,7 @@ where Red: Fn(O, O) -> O + Sync, O: Send, { - let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { Ok(th::reduce::m(thread_runner, iter, state, &map1, &reduce)) }; let (num_spawned, result) = @@ -54,7 +54,7 @@ where X1: Fn(I::Item) -> Vo + Sync, Red: Fn(Vo::Item, Vo::Item) -> Vo::Item + Sync, { - let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let thread_map = |_, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { th::reduce::x(thread_runner, iter, state, &xap1, &reduce).into_result() }; let (num_spawned, result) = orchestrator.map_all::( From fa5c16088c4fbce3bfb3027b57b7b5c38f607014 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 10:15:28 +0200 Subject: [PATCH 164/264] clean up --- src/orch/orchestrator.rs | 20 ------------------- .../collect_arbitrary.rs | 3 +-- 2 files changed, 1 insertion(+), 22 deletions(-) diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 66b5b01..95a74a1 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -104,26 +104,6 @@ pub trait Orchestrator { req.min(ava) } - - fn run_all_using( - &mut self, - params: Params, - iter: I, - kind: ComputationKind, - thread_do: F, - ) -> NumSpawned - where - I: ConcurrentIter, - F: Fn(NumSpawned, &I, &SharedStateOf, ThreadRunnerOf) + Sync, - { - let runner = Self::new_runner(kind, params, iter.try_get_len()); - let state = runner.new_shared_state(); - let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); - let work = |nt| { - thread_do(nt, &iter, &state, runner.new_thread_runner(&state)); - }; - self.thread_pool_mut().run_in_pool(do_spawn, work) - } } pub(crate) type SharedStateOf = <::Runner as ParallelRunner>::SharedState; diff --git a/src/using/runner/parallel_runner_compute/collect_arbitrary.rs b/src/using/runner/parallel_runner_compute/collect_arbitrary.rs index 9b49e35..87144f3 100644 --- a/src/using/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/using/runner/parallel_runner_compute/collect_arbitrary.rs @@ -43,8 +43,7 @@ where let u = using.create(nt.into_inner()); th::collect_arbitrary::m(u, thread_runner, iter, state, &map1, &bag); }; - let num_spawned = - orchestrator.run_all_using(params, iter, ComputationKind::Collect, thread_work); + let num_spawned = orchestrator.run_all(params, iter, ComputationKind::Collect, thread_work); let values = bag.into_inner(); (num_spawned, values) From b6e452610263e20c7f745187818321ea52846939 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 10:17:26 +0200 Subject: [PATCH 165/264] xap collect arbitrary using mutable variable --- .../collect_arbitrary.rs | 4 -- .../collect_arbitrary.rs | 52 +++++++++++++++++-- 2 files changed, 49 insertions(+), 7 deletions(-) diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_runner_compute/collect_arbitrary.rs index 4db01d8..9da669b 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_runner_compute/collect_arbitrary.rs @@ -8,8 +8,6 @@ use orx_concurrent_bag::ConcurrentBag; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; -// m - #[cfg(test)] pub fn m( mut orchestrator: C, @@ -42,8 +40,6 @@ where (num_spawned, values) } -// x - pub fn x( mut orchestrator: C, params: Params, diff --git a/src/using/runner/parallel_runner_compute/collect_arbitrary.rs b/src/using/runner/parallel_runner_compute/collect_arbitrary.rs index 87144f3..b1f1f32 100644 --- a/src/using/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/using/runner/parallel_runner_compute/collect_arbitrary.rs @@ -5,14 +5,11 @@ use crate::orch::Orchestrator; use crate::orch::{NumSpawned, SharedStateOf, ThreadRunnerOf}; use crate::runner::ComputationKind; use crate::using::runner::thread_runner_compute as th; -#[cfg(test)] use crate::using::using_variants::Using; use orx_concurrent_bag::ConcurrentBag; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; -// m - #[cfg(test)] pub fn m( using: U, @@ -48,3 +45,52 @@ where let values = bag.into_inner(); (num_spawned, values) } + +pub fn x( + using: U, + mut orchestrator: C, + params: Params, + iter: I, + xap1: X1, + pinned_vec: P, +) -> (NumSpawned, ParallelCollectArbitrary) +where + U: Using + Sync, + U::Item: Send, + C: Orchestrator, + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, + P: IntoConcurrentPinnedVec, +{ + let capacity_bound = pinned_vec.capacity_bound(); + let offset = pinned_vec.len(); + + let mut bag: ConcurrentBag = pinned_vec.into(); + match iter.try_get_len() { + Some(iter_len) => bag.reserve_maximum_capacity(offset + iter_len), + None => bag.reserve_maximum_capacity(capacity_bound), + }; + + let thread_map = + |nt: NumSpawned, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let u = using.create(nt.into_inner()); + th::collect_arbitrary::x(u, thread_runner, iter, state, &xap1, &bag).into_result() + }; + let (num_spawned, result) = orchestrator.map_all::( + params, + iter, + ComputationKind::Collect, + thread_map, + ); + + let result = match result { + Err(error) => ParallelCollectArbitrary::StoppedByError { error }, + Ok(_) => ParallelCollectArbitrary::AllOrUntilWhileCollected { + pinned_vec: bag.into_inner(), + }, + }; + + (num_spawned, result) +} From 900c25b793d13a7fd7627796902e6b82084c9fb8 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 10:19:21 +0200 Subject: [PATCH 166/264] using collect ordered with new orchestrator --- .../collect_ordered.rs | 4 - .../collect_ordered.rs | 79 +++++++++++++++++++ 2 files changed, 79 insertions(+), 4 deletions(-) diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_runner_compute/collect_ordered.rs index b16ee0c..43460f3 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_runner_compute/collect_ordered.rs @@ -7,8 +7,6 @@ use orx_concurrent_iter::ConcurrentIter; use orx_concurrent_ordered_bag::ConcurrentOrderedBag; use orx_fixed_vec::IntoConcurrentPinnedVec; -// m - pub fn m( mut orchestrator: C, params: Params, @@ -35,8 +33,6 @@ where (num_spawned, values) } -// x - pub fn x( mut orchestrator: C, params: Params, diff --git a/src/using/runner/parallel_runner_compute/collect_ordered.rs b/src/using/runner/parallel_runner_compute/collect_ordered.rs index e69de29..2acead3 100644 --- a/src/using/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/using/runner/parallel_runner_compute/collect_ordered.rs @@ -0,0 +1,79 @@ +use crate::Params; +use crate::generic_values::Values; +use crate::generic_values::runner_results::{Fallibility, ParallelCollect}; +use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; +use crate::runner::ComputationKind; +use crate::using::runner::thread_runner_compute as th; +use crate::using::using_variants::Using; +use orx_concurrent_iter::ConcurrentIter; +use orx_concurrent_ordered_bag::ConcurrentOrderedBag; +use orx_fixed_vec::IntoConcurrentPinnedVec; + +pub fn m( + using: U, + mut orchestrator: C, + params: Params, + iter: I, + map1: M1, + pinned_vec: P, +) -> (NumSpawned, P) +where + U: Using + Sync, + U::Item: Send, + C: Orchestrator, + I: ConcurrentIter, + O: Send, + M1: Fn(&mut U::Item, I::Item) -> O + Sync, + P: IntoConcurrentPinnedVec, +{ + let offset = pinned_vec.len(); + let o_bag: ConcurrentOrderedBag = pinned_vec.into(); + + let thread_do = + |nt: NumSpawned, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let u = using.create(nt.into_inner()); + th::collect_ordered::m(u, thread_runner, iter, state, &map1, &o_bag, offset); + }; + let num_spawned = orchestrator.run_all(params, iter, ComputationKind::Collect, thread_do); + + let values = unsafe { o_bag.into_inner().unwrap_only_if_counts_match() }; + (num_spawned, values) +} + +pub fn x( + using: U, + mut orchestrator: C, + params: Params, + iter: I, + xap1: X1, + pinned_vec: P, +) -> (NumSpawned, ParallelCollect) +where + U: Using + Sync, + U::Item: Send, + C: Orchestrator, + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + ::Error: Send, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, + P: IntoConcurrentPinnedVec, +{ + let thread_map = + |nt: NumSpawned, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let u = using.create(nt.into_inner()); + th::collect_ordered::x(u, thread_runner, iter, state, &xap1).into_result() + }; + let (num_spawned, result) = orchestrator.map_all::( + params, + iter, + ComputationKind::Collect, + thread_map, + ); + + let result = match result { + Err(error) => ParallelCollect::StoppedByError { error }, + Ok(results) => ParallelCollect::reduce(results, pinned_vec), + }; + (num_spawned, result) +} From 3a0733e470705ba02324ce993bd892f568586287 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 10:20:52 +0200 Subject: [PATCH 167/264] orchestrator using next-any --- .../parallel_runner_compute/next_any.rs | 69 +++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/src/using/runner/parallel_runner_compute/next_any.rs b/src/using/runner/parallel_runner_compute/next_any.rs index e69de29..89f3fac 100644 --- a/src/using/runner/parallel_runner_compute/next_any.rs +++ b/src/using/runner/parallel_runner_compute/next_any.rs @@ -0,0 +1,69 @@ +use crate::Params; +use crate::generic_values::Values; +use crate::generic_values::runner_results::Fallibility; +use crate::orch::{NumSpawned, Orchestrator, SharedStateOf}; +use crate::runner::ComputationKind; +use crate::using::runner::thread_runner_compute as th; +use crate::using::using_variants::Using; +use orx_concurrent_iter::ConcurrentIter; + +pub fn m( + using: U, + mut orchestrator: C, + params: Params, + iter: I, + map1: M1, +) -> (NumSpawned, Option) +where + U: Using + Sync, + U::Item: Send, + C: Orchestrator, + I: ConcurrentIter, + O: Send, + M1: Fn(&mut U::Item, I::Item) -> O + Sync, +{ + let thread_map = |nt: NumSpawned, iter: &I, state: &SharedStateOf, thread_runner| { + let u = using.create(nt.into_inner()); + Ok(th::next_any::m(u, thread_runner, iter, state, &map1)) + }; + let (num_spawned, result) = + orchestrator.map_infallible(params, iter, ComputationKind::Collect, thread_map); + + let next = match result { + Ok(results) => results.into_iter().filter_map(|x| x).next(), + }; + (num_spawned, next) +} + +type ResultNextAny = + Result::Item>, <::Fallibility as Fallibility>::Error>; + +pub fn x( + using: U, + mut orchestrator: C, + params: Params, + iter: I, + xap1: X1, +) -> (NumSpawned, ResultNextAny) +where + U: Using + Sync, + U::Item: Send, + C: Orchestrator, + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, +{ + let thread_map = |nt: NumSpawned, iter: &I, state: &SharedStateOf, th_runner| { + let u = using.create(nt.into_inner()); + th::next_any::x(u, th_runner, iter, state, &xap1) + }; + let (num_spawned, result) = orchestrator.map_all::( + params, + iter, + ComputationKind::Collect, + thread_map, + ); + let next = result.map(|results| results.into_iter().filter_map(|x| x).next()); + (num_spawned, next) +} From 2e77848ec21b9b0d0853b7e95e7fc96ccd001db5 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 10:23:49 +0200 Subject: [PATCH 168/264] using next over orchestrator --- .../collect_arbitrary.rs | 2 - .../collect_ordered.rs | 2 - .../runner/parallel_runner_compute/next.rs | 80 +++++++++++++++++++ .../parallel_runner_compute/next_any.rs | 2 - 4 files changed, 80 insertions(+), 6 deletions(-) diff --git a/src/using/runner/parallel_runner_compute/collect_arbitrary.rs b/src/using/runner/parallel_runner_compute/collect_arbitrary.rs index b1f1f32..4ab6245 100644 --- a/src/using/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/using/runner/parallel_runner_compute/collect_arbitrary.rs @@ -21,7 +21,6 @@ pub fn m( ) -> (NumSpawned, P) where U: Using + Sync, - U::Item: Send, C: Orchestrator, I: ConcurrentIter, O: Send, @@ -56,7 +55,6 @@ pub fn x( ) -> (NumSpawned, ParallelCollectArbitrary) where U: Using + Sync, - U::Item: Send, C: Orchestrator, I: ConcurrentIter, Vo: Values, diff --git a/src/using/runner/parallel_runner_compute/collect_ordered.rs b/src/using/runner/parallel_runner_compute/collect_ordered.rs index 2acead3..016ce20 100644 --- a/src/using/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/using/runner/parallel_runner_compute/collect_ordered.rs @@ -19,7 +19,6 @@ pub fn m( ) -> (NumSpawned, P) where U: Using + Sync, - U::Item: Send, C: Orchestrator, I: ConcurrentIter, O: Send, @@ -50,7 +49,6 @@ pub fn x( ) -> (NumSpawned, ParallelCollect) where U: Using + Sync, - U::Item: Send, C: Orchestrator, I: ConcurrentIter, Vo: Values, diff --git a/src/using/runner/parallel_runner_compute/next.rs b/src/using/runner/parallel_runner_compute/next.rs index e69de29..e03bd9e 100644 --- a/src/using/runner/parallel_runner_compute/next.rs +++ b/src/using/runner/parallel_runner_compute/next.rs @@ -0,0 +1,80 @@ +use crate::Params; +use crate::generic_values::Values; +use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; +use crate::orch::{NumSpawned, Orchestrator, SharedStateOf}; +use crate::runner::ComputationKind; +use crate::using::runner::thread_runner_compute as th; +use crate::using::using_variants::Using; +use orx_concurrent_iter::ConcurrentIter; + +pub fn m( + using: U, + mut orchestrator: C, + params: Params, + iter: I, + map1: M1, +) -> (NumSpawned, Option) +where + U: Using + Sync, + C: Orchestrator, + I: ConcurrentIter, + O: Send, + M1: Fn(&mut U::Item, I::Item) -> O + Sync, +{ + let thread_map = |nt: NumSpawned, iter: &I, state: &SharedStateOf, thread_runner| { + let u = using.create(nt.into_inner()); + Ok(th::next::m(u, thread_runner, iter, state, &map1)) + }; + let (num_spawned, result) = + orchestrator.map_infallible(params, iter, ComputationKind::Collect, thread_map); + + let next = match result { + Ok(results) => results + .into_iter() + .filter_map(|x| x) + .min_by_key(|x| x.0) + .map(|x| x.1), + }; + (num_spawned, next) +} + +type ResultNext = Result< + Option<(usize, ::Item)>, + <::Fallibility as Fallibility>::Error, +>; + +pub fn x( + using: U, + mut orchestrator: C, + params: Params, + iter: I, + xap1: X1, +) -> (NumSpawned, ResultNext) +where + U: Using + Sync, + C: Orchestrator, + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, +{ + let thread_map = |nt: NumSpawned, iter: &I, state: &SharedStateOf, th_runner| { + let u = using.create(nt.into_inner()); + match th::next::x(u, th_runner, iter, state, &xap1) { + NextWithIdx::Found { idx, value } => Ok(Some(NextSuccess::Found { idx, value })), + NextWithIdx::NotFound => Ok(None), + NextWithIdx::StoppedByWhileCondition { idx } => { + Ok(Some(NextSuccess::StoppedByWhileCondition { idx })) + } + NextWithIdx::StoppedByError { error } => Err(error), + } + }; + let (num_spawned, result) = orchestrator.map_all::( + params, + iter, + ComputationKind::Collect, + thread_map, + ); + let next = result.map(|results| NextSuccess::reduce(results.into_iter().filter_map(|x| x))); + (num_spawned, next) +} diff --git a/src/using/runner/parallel_runner_compute/next_any.rs b/src/using/runner/parallel_runner_compute/next_any.rs index 89f3fac..5b2de15 100644 --- a/src/using/runner/parallel_runner_compute/next_any.rs +++ b/src/using/runner/parallel_runner_compute/next_any.rs @@ -16,7 +16,6 @@ pub fn m( ) -> (NumSpawned, Option) where U: Using + Sync, - U::Item: Send, C: Orchestrator, I: ConcurrentIter, O: Send, @@ -47,7 +46,6 @@ pub fn x( ) -> (NumSpawned, ResultNextAny) where U: Using + Sync, - U::Item: Send, C: Orchestrator, I: ConcurrentIter, Vo: Values, From e591d2d84c50f0ae092d2143d381c882a9014518 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 10:25:57 +0200 Subject: [PATCH 169/264] using reduce with orchestrator --- src/runner/parallel_runner_compute/reduce.rs | 4 - .../runner/parallel_runner_compute/reduce.rs | 138 +++++++++--------- 2 files changed, 71 insertions(+), 71 deletions(-) diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_runner_compute/reduce.rs index aa2c5ed..e455049 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_runner_compute/reduce.rs @@ -5,8 +5,6 @@ use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; use crate::runner::{ComputationKind, thread_runner_compute as th}; use orx_concurrent_iter::ConcurrentIter; -// m - pub fn m( mut orchestrator: C, params: Params, @@ -34,8 +32,6 @@ where (num_spawned, acc) } -// x - type ResultReduce = Result::Item>, <::Fallibility as Fallibility>::Error>; diff --git a/src/using/runner/parallel_runner_compute/reduce.rs b/src/using/runner/parallel_runner_compute/reduce.rs index d5c34ec..3627375 100644 --- a/src/using/runner/parallel_runner_compute/reduce.rs +++ b/src/using/runner/parallel_runner_compute/reduce.rs @@ -7,74 +7,78 @@ use crate::using::runner::thread_runner_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; -// m +pub fn m( + using: U, + mut orchestrator: C, + params: Params, + iter: I, + map1: M1, + reduce: Red, +) -> (NumSpawned, Option) +where + U: Using + Sync, + C: Orchestrator, + I: ConcurrentIter, + M1: Fn(&mut U::Item, I::Item) -> O + Sync, + Red: Fn(&mut U::Item, O, O) -> O + Sync, + O: Send, +{ + let thread_map = + |nt: NumSpawned, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let u = using.create(nt.into_inner()); + Ok(th::reduce::m(u, thread_runner, iter, state, &map1, &reduce)) + }; + let (num_spawned, result) = + orchestrator.map_infallible(params, iter, ComputationKind::Collect, thread_map); -// pub fn m( -// mut using: U, -// mut orchestrator: C, -// params: Params, -// iter: I, -// map1: M1, -// reduce: Red, -// ) -> (NumSpawned, Option) -// where -// U: Using, -// C: Orchestrator, -// I: ConcurrentIter, -// M1: Fn(&mut U::Item, I::Item) -> O + Sync, -// Red: Fn(&mut U::Item, O, O) -> O + Sync, -// O: Send, -// { -// let thread_map = -// |nt: NumSpawned, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { -// let u = using.create(nt.into_inner()); -// Ok(th::reduce::m(u, thread_runner, iter, state, &map1, &reduce)) -// }; -// let (num_spawned, result) = -// orchestrator.map_infallible(params, iter, ComputationKind::Collect, thread_map); + let mut u = using.into_inner(); + let acc = match result { + Ok(results) => results + .into_iter() + .filter_map(|x| x) + .reduce(|a, b| reduce(&mut u, a, b)), + }; -// let mut u = using.into_inner(); -// let acc = match result { -// Ok(results) => results -// .into_iter() -// .filter_map(|x| x) -// .reduce(|a, b| reduce(&mut u, a, b)), -// }; + (num_spawned, acc) +} -// (num_spawned, acc) -// } +type ResultReduce = + Result::Item>, <::Fallibility as Fallibility>::Error>; -// // x - -// type ResultReduce = -// Result::Item>, <::Fallibility as Fallibility>::Error>; - -// pub fn x( -// using: U, -// mut orchestrator: C, -// params: Params, -// iter: I, -// xap1: X1, -// reduce: Red, -// ) -> (NumSpawned, ResultReduce) -// where -// U: Using, -// C: Orchestrator, -// I: ConcurrentIter, -// Vo: Values, -// Vo::Item: Send, -// X1: Fn(I::Item) -> Vo + Sync, -// Red: Fn(Vo::Item, Vo::Item) -> Vo::Item + Sync, -// { -// let thread_map = |iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { -// th::reduce::x(thread_runner, iter, state, &xap1, &reduce).into_result() -// }; -// let (num_spawned, result) = orchestrator.map_all::( -// params, -// iter, -// ComputationKind::Collect, -// thread_map, -// ); -// let acc = result.map(|results| results.into_iter().filter_map(|x| x).reduce(reduce)); -// (num_spawned, acc) -// } +pub fn x( + using: U, + mut orchestrator: C, + params: Params, + iter: I, + xap1: X1, + reduce: Red, +) -> (NumSpawned, ResultReduce) +where + U: Using + Sync, + C: Orchestrator, + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, + Red: Fn(&mut U::Item, Vo::Item, Vo::Item) -> Vo::Item + Sync, +{ + let thread_map = + |nt: NumSpawned, iter: &I, state: &SharedStateOf, thread_runner: ThreadRunnerOf| { + let u = using.create(nt.into_inner()); + th::reduce::x(u, thread_runner, iter, state, &xap1, &reduce).into_result() + }; + let (num_spawned, result) = orchestrator.map_all::( + params, + iter, + ComputationKind::Collect, + thread_map, + ); + let mut u = using.into_inner(); + let acc = result.map(|results| { + results + .into_iter() + .filter_map(|x| x) + .reduce(|a, b| reduce(&mut u, a, b)) + }); + (num_spawned, acc) +} From 771d7ff9d85d70a6609adfe265938816e5156cff Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 10:33:16 +0200 Subject: [PATCH 170/264] reduce and first implemented for xap using --- src/computational_variants/xap.rs | 2 +- src/using/computational_variants/u_xap.rs | 27 ++++++++++++++++++----- src/using/runner/mod.rs | 2 +- src/using/using_variants.rs | 22 +++++++++--------- 4 files changed, 35 insertions(+), 18 deletions(-) diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index 91e26d4..612021a 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -1,7 +1,7 @@ use crate::ParIterResult; use crate::computational_variants::fallible_result::ParXapResult; +use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; -use crate::generic_values::{TransformableValues, Values}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; use crate::runner::parallel_runner_compute as prc; diff --git a/src/using/computational_variants/u_xap.rs b/src/using/computational_variants/u_xap.rs index d99066b..702bb04 100644 --- a/src/using/computational_variants/u_xap.rs +++ b/src/using/computational_variants/u_xap.rs @@ -1,10 +1,11 @@ use crate::{ - ChunkSize, IterationOrder, NumThreads, ParIterUsing, Params, + ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIterUsing, Params, generic_values::{TransformableValues, runner_results::Infallible}, orch::{DefaultOrchestrator, Orchestrator}, - using::using_variants::Using, + using::{runner::parallel_runner_compute as prc, using_variants::Using}, }; use orx_concurrent_iter::ConcurrentIter; +// use crate::runner::parallel_runner_compute as prc; pub struct UParXap where @@ -206,8 +207,10 @@ where fn collect_into(self, output: C) -> C where - C: crate::ParCollectInto, + C: ParCollectInto, { + // let (using, orchestrator, params, iter, x1) = self.destruct(); + // output.x_collect_into(using, orchestrator, params, iter, x1) todo!() } @@ -216,13 +219,27 @@ where Self::Item: Send, Reduce: Fn(&mut ::Item, Self::Item, Self::Item) -> Self::Item + Sync, { - todo!() + let (using, orchestrator, params, iter, x1) = self.destruct(); + let (_, Ok(acc)) = prc::reduce::x(using, orchestrator, params, iter, x1, reduce); + acc } fn first(self) -> Option where Self::Item: Send, { - todo!() + let (using, orchestrator, params, iter, x1) = self.destruct(); + match params.iteration_order { + IterationOrder::Ordered => { + let (_num_threads, Ok(result)) = + prc::next::x(using, orchestrator, params, iter, x1); + result.map(|x| x.1) + } + IterationOrder::Arbitrary => { + let (_num_threads, Ok(result)) = + prc::next_any::x(using, orchestrator, params, iter, x1); + result + } + } } } diff --git a/src/using/runner/mod.rs b/src/using/runner/mod.rs index 0f50140..298ff9c 100644 --- a/src/using/runner/mod.rs +++ b/src/using/runner/mod.rs @@ -1,2 +1,2 @@ -mod parallel_runner_compute; +pub(super) mod parallel_runner_compute; mod thread_runner_compute; diff --git a/src/using/using_variants.rs b/src/using/using_variants.rs index 0f5a961..b501d36 100644 --- a/src/using/using_variants.rs +++ b/src/using/using_variants.rs @@ -2,9 +2,9 @@ /// and used mutable by the defined computation. /// /// [`create`]: crate::using::Using::create -pub trait Using { +pub trait Using: Sync { /// Item to be used mutably by each threads used in parallel computation. - type Item: Send + 'static; + type Item: 'static; /// Creates an instance of the variable to be used by the `thread_idx`-th thread. fn create(&self, thread_idx: usize) -> Self::Item; @@ -14,15 +14,15 @@ pub trait Using { } /// Using variant that creates instances of each thread by cloning an initial value. -pub struct UsingClone(T); +pub struct UsingClone(T); -impl UsingClone { +impl UsingClone { pub(crate) fn new(value: T) -> Self { Self(value) } } -impl Using for UsingClone { +impl Using for UsingClone { type Item = T; fn create(&self, _: usize) -> T { @@ -37,16 +37,16 @@ impl Using for UsingClone { /// Using variant that creates instances of each thread using a closure. pub struct UsingFun where - T: Send + 'static, - F: Fn(usize) -> T, + T: 'static, + F: Fn(usize) -> T + Sync, { fun: F, } impl UsingFun where - T: Send + 'static, - F: Fn(usize) -> T, + T: 'static, + F: Fn(usize) -> T + Sync, { pub(crate) fn new(fun: F) -> Self { Self { fun } @@ -55,8 +55,8 @@ where impl Using for UsingFun where - T: Send + 'static, - F: Fn(usize) -> T, + T: 'static, + F: Fn(usize) -> T + Sync, { type Item = T; From 7ab7bbca8a58d61144a505f4b12a3ef40f58c34a Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 10:35:35 +0200 Subject: [PATCH 171/264] clean up --- src/using/collect_into/collect.rs | 52 ++++++++----------- .../collect_arbitrary.rs | 4 +- .../collect_ordered.rs | 4 +- .../runner/parallel_runner_compute/next.rs | 4 +- .../parallel_runner_compute/next_any.rs | 4 +- .../runner/parallel_runner_compute/reduce.rs | 4 +- 6 files changed, 32 insertions(+), 40 deletions(-) diff --git a/src/using/collect_into/collect.rs b/src/using/collect_into/collect.rs index 58103f4..bc30b46 100644 --- a/src/using/collect_into/collect.rs +++ b/src/using/collect_into/collect.rs @@ -1,14 +1,13 @@ -// use crate::IterationOrder; -// use crate::Params; -// use crate::generic_values::Values; -// use crate::generic_values::runner_results::Infallible; -// use crate::generic_values::runner_results::ParallelCollect; -// use crate::generic_values::runner_results::ParallelCollectArbitrary; -// use crate::orch::{NumSpawned, Orchestrator}; -// use crate::runner::parallel_runner_compute as prc; -// use crate::using::using_variants::Using; -// use orx_concurrent_iter::ConcurrentIter; -// use orx_fixed_vec::IntoConcurrentPinnedVec; +use crate::Params; +use crate::generic_values::runner_results::{ + Fallibility, Infallible, ParallelCollect, ParallelCollectArbitrary, Stop, +}; +use crate::orch::{NumSpawned, Orchestrator}; +use crate::runner::parallel_runner_compute as prc; +use crate::using::using_variants::Using; +use crate::{IterationOrder, generic_values::Values}; +use orx_concurrent_iter::ConcurrentIter; +use orx_fixed_vec::IntoConcurrentPinnedVec; // pub fn map_collect_into( // using: U, @@ -22,20 +21,20 @@ // U: Using, // R: Orchestrator, // I: ConcurrentIter, -// M1: Fn(&mut U::Item, I::Item) -> O + Sync, +// M1: Fn(I::Item) -> O + Sync, // O: Send, // P: IntoConcurrentPinnedVec, // { // match (params.is_sequential(), params.iteration_order) { // (true, _) => ( // NumSpawned::zero(), -// map_collect_into_seq(using, iter, map1, pinned_vec), +// map_collect_into_seq(iter, map1, pinned_vec), // ), // #[cfg(test)] // (false, IterationOrder::Arbitrary) => { -// prc::collect_arbitrary::m(using, orchestrator, params, iter, map1, pinned_vec) +// prc::collect_arbitrary::m(orchestrator, params, iter, map1, pinned_vec) // } -// (false, _) => prc::collect_ordered::m(using, orchestrator, params, iter, map1, pinned_vec), +// (false, _) => prc::collect_ordered::m(orchestrator, params, iter, map1, pinned_vec), // } // } @@ -43,21 +42,18 @@ // where // U: Using, // I: ConcurrentIter, -// M1: Fn(&mut U::Item, I::Item) -> O + Sync, +// M1: Fn(I::Item) -> O + Sync, // O: Send, // P: IntoConcurrentPinnedVec, // { -// let mut u = using.into_inner(); -// let u = &mut u; // let iter = iter.into_seq_iter(); // for i in iter { -// pinned_vec.push(map1(u, i)); +// pinned_vec.push(map1(i)); // } // pinned_vec // } -// pub fn xap_collect_into( -// using: U, +// pub fn xap_collect_into( // orchestrator: R, // params: Params, // iter: I, @@ -65,18 +61,17 @@ // pinned_vec: P, // ) -> (NumSpawned, P) // where -// U: Using, // R: Orchestrator, // I: ConcurrentIter, // Vo: Values, // Vo::Item: Send, -// X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, +// X1: Fn(I::Item) -> Vo + Sync, // P: IntoConcurrentPinnedVec, // { // match (params.is_sequential(), params.iteration_order) { // (true, _) => ( // NumSpawned::zero(), -// xap_collect_into_seq(using, iter, xap1, pinned_vec), +// xap_collect_into_seq(iter, xap1, pinned_vec), // ), // (false, IterationOrder::Arbitrary) => { // let (num_threads, result) = @@ -101,20 +96,17 @@ // } // } -// fn xap_collect_into_seq(using: U, iter: I, xap1: X1, mut pinned_vec: P) -> P +// fn xap_collect_into_seq(iter: I, xap1: X1, mut pinned_vec: P) -> P // where -// U: Using, // I: ConcurrentIter, // Vo: Values, // Vo::Item: Send, -// X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, +// X1: Fn(I::Item) -> Vo + Sync, // P: IntoConcurrentPinnedVec, // { -// let mut u = using.into_inner(); -// let u = &mut u; // let iter = iter.into_seq_iter(); // for i in iter { -// let vt = xap1(u, i); +// let vt = xap1(i); // let done = vt.push_to_pinned_vec(&mut pinned_vec); // if Vo::sequential_push_to_stop(done).is_some() { // break; diff --git a/src/using/runner/parallel_runner_compute/collect_arbitrary.rs b/src/using/runner/parallel_runner_compute/collect_arbitrary.rs index 4ab6245..0b67c3d 100644 --- a/src/using/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/using/runner/parallel_runner_compute/collect_arbitrary.rs @@ -20,7 +20,7 @@ pub fn m( pinned_vec: P, ) -> (NumSpawned, P) where - U: Using + Sync, + U: Using, C: Orchestrator, I: ConcurrentIter, O: Send, @@ -54,7 +54,7 @@ pub fn x( pinned_vec: P, ) -> (NumSpawned, ParallelCollectArbitrary) where - U: Using + Sync, + U: Using, C: Orchestrator, I: ConcurrentIter, Vo: Values, diff --git a/src/using/runner/parallel_runner_compute/collect_ordered.rs b/src/using/runner/parallel_runner_compute/collect_ordered.rs index 016ce20..5c5224a 100644 --- a/src/using/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/using/runner/parallel_runner_compute/collect_ordered.rs @@ -18,7 +18,7 @@ pub fn m( pinned_vec: P, ) -> (NumSpawned, P) where - U: Using + Sync, + U: Using, C: Orchestrator, I: ConcurrentIter, O: Send, @@ -48,7 +48,7 @@ pub fn x( pinned_vec: P, ) -> (NumSpawned, ParallelCollect) where - U: Using + Sync, + U: Using, C: Orchestrator, I: ConcurrentIter, Vo: Values, diff --git a/src/using/runner/parallel_runner_compute/next.rs b/src/using/runner/parallel_runner_compute/next.rs index e03bd9e..5dbaf0a 100644 --- a/src/using/runner/parallel_runner_compute/next.rs +++ b/src/using/runner/parallel_runner_compute/next.rs @@ -15,7 +15,7 @@ pub fn m( map1: M1, ) -> (NumSpawned, Option) where - U: Using + Sync, + U: Using, C: Orchestrator, I: ConcurrentIter, O: Send, @@ -51,7 +51,7 @@ pub fn x( xap1: X1, ) -> (NumSpawned, ResultNext) where - U: Using + Sync, + U: Using, C: Orchestrator, I: ConcurrentIter, Vo: Values, diff --git a/src/using/runner/parallel_runner_compute/next_any.rs b/src/using/runner/parallel_runner_compute/next_any.rs index 5b2de15..303d268 100644 --- a/src/using/runner/parallel_runner_compute/next_any.rs +++ b/src/using/runner/parallel_runner_compute/next_any.rs @@ -15,7 +15,7 @@ pub fn m( map1: M1, ) -> (NumSpawned, Option) where - U: Using + Sync, + U: Using, C: Orchestrator, I: ConcurrentIter, O: Send, @@ -45,7 +45,7 @@ pub fn x( xap1: X1, ) -> (NumSpawned, ResultNextAny) where - U: Using + Sync, + U: Using, C: Orchestrator, I: ConcurrentIter, Vo: Values, diff --git a/src/using/runner/parallel_runner_compute/reduce.rs b/src/using/runner/parallel_runner_compute/reduce.rs index 3627375..7eb8812 100644 --- a/src/using/runner/parallel_runner_compute/reduce.rs +++ b/src/using/runner/parallel_runner_compute/reduce.rs @@ -16,7 +16,7 @@ pub fn m( reduce: Red, ) -> (NumSpawned, Option) where - U: Using + Sync, + U: Using, C: Orchestrator, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, @@ -54,7 +54,7 @@ pub fn x( reduce: Red, ) -> (NumSpawned, ResultReduce) where - U: Using + Sync, + U: Using, C: Orchestrator, I: ConcurrentIter, Vo: Values, From 02fb47874a5c0691069e6000cef69b7bd79d570b Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 10:37:30 +0200 Subject: [PATCH 172/264] using map collect into --- src/using/collect_into/collect.rs | 87 ++++++++++++++++--------------- 1 file changed, 44 insertions(+), 43 deletions(-) diff --git a/src/using/collect_into/collect.rs b/src/using/collect_into/collect.rs index bc30b46..c4f1ffd 100644 --- a/src/using/collect_into/collect.rs +++ b/src/using/collect_into/collect.rs @@ -3,55 +3,56 @@ use crate::generic_values::runner_results::{ Fallibility, Infallible, ParallelCollect, ParallelCollectArbitrary, Stop, }; use crate::orch::{NumSpawned, Orchestrator}; -use crate::runner::parallel_runner_compute as prc; +use crate::using::runner::parallel_runner_compute as prc; use crate::using::using_variants::Using; use crate::{IterationOrder, generic_values::Values}; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; -// pub fn map_collect_into( -// using: U, -// orchestrator: R, -// params: Params, -// iter: I, -// map1: M1, -// pinned_vec: P, -// ) -> (NumSpawned, P) -// where -// U: Using, -// R: Orchestrator, -// I: ConcurrentIter, -// M1: Fn(I::Item) -> O + Sync, -// O: Send, -// P: IntoConcurrentPinnedVec, -// { -// match (params.is_sequential(), params.iteration_order) { -// (true, _) => ( -// NumSpawned::zero(), -// map_collect_into_seq(iter, map1, pinned_vec), -// ), -// #[cfg(test)] -// (false, IterationOrder::Arbitrary) => { -// prc::collect_arbitrary::m(orchestrator, params, iter, map1, pinned_vec) -// } -// (false, _) => prc::collect_ordered::m(orchestrator, params, iter, map1, pinned_vec), -// } -// } +pub fn map_collect_into( + using: U, + orchestrator: R, + params: Params, + iter: I, + map1: M1, + pinned_vec: P, +) -> (NumSpawned, P) +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, + M1: Fn(&mut U::Item, I::Item) -> O + Sync, + O: Send, + P: IntoConcurrentPinnedVec, +{ + match (params.is_sequential(), params.iteration_order) { + (true, _) => ( + NumSpawned::zero(), + map_collect_into_seq(using, iter, map1, pinned_vec), + ), + #[cfg(test)] + (false, IterationOrder::Arbitrary) => { + prc::collect_arbitrary::m(using, orchestrator, params, iter, map1, pinned_vec) + } + (false, _) => prc::collect_ordered::m(using, orchestrator, params, iter, map1, pinned_vec), + } +} -// fn map_collect_into_seq(using: U, iter: I, map1: M1, mut pinned_vec: P) -> P -// where -// U: Using, -// I: ConcurrentIter, -// M1: Fn(I::Item) -> O + Sync, -// O: Send, -// P: IntoConcurrentPinnedVec, -// { -// let iter = iter.into_seq_iter(); -// for i in iter { -// pinned_vec.push(map1(i)); -// } -// pinned_vec -// } +fn map_collect_into_seq(using: U, iter: I, map1: M1, mut pinned_vec: P) -> P +where + U: Using, + I: ConcurrentIter, + M1: Fn(&mut U::Item, I::Item) -> O + Sync, + O: Send, + P: IntoConcurrentPinnedVec, +{ + let mut u = using.into_inner(); + let iter = iter.into_seq_iter(); + for i in iter { + pinned_vec.push(map1(&mut u, i)); + } + pinned_vec +} // pub fn xap_collect_into( // orchestrator: R, From b15afff75f316e51ace963199f883c7aab7dc119 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 10:39:16 +0200 Subject: [PATCH 173/264] xap collect into for using computations --- src/using/collect_into/collect.rs | 126 +++++++++++++++--------------- 1 file changed, 65 insertions(+), 61 deletions(-) diff --git a/src/using/collect_into/collect.rs b/src/using/collect_into/collect.rs index c4f1ffd..1042eca 100644 --- a/src/using/collect_into/collect.rs +++ b/src/using/collect_into/collect.rs @@ -1,6 +1,6 @@ use crate::Params; use crate::generic_values::runner_results::{ - Fallibility, Infallible, ParallelCollect, ParallelCollectArbitrary, Stop, + Infallible, ParallelCollect, ParallelCollectArbitrary, }; use crate::orch::{NumSpawned, Orchestrator}; use crate::using::runner::parallel_runner_compute as prc; @@ -54,65 +54,69 @@ where pinned_vec } -// pub fn xap_collect_into( -// orchestrator: R, -// params: Params, -// iter: I, -// xap1: X1, -// pinned_vec: P, -// ) -> (NumSpawned, P) -// where -// R: Orchestrator, -// I: ConcurrentIter, -// Vo: Values, -// Vo::Item: Send, -// X1: Fn(I::Item) -> Vo + Sync, -// P: IntoConcurrentPinnedVec, -// { -// match (params.is_sequential(), params.iteration_order) { -// (true, _) => ( -// NumSpawned::zero(), -// xap_collect_into_seq(iter, xap1, pinned_vec), -// ), -// (false, IterationOrder::Arbitrary) => { -// let (num_threads, result) = -// prc::collect_arbitrary::x(orchestrator, params, iter, xap1, pinned_vec); -// let pinned_vec = match result { -// ParallelCollectArbitrary::AllOrUntilWhileCollected { pinned_vec } => pinned_vec, -// }; -// (num_threads, pinned_vec) -// } -// (false, IterationOrder::Ordered) => { -// let (num_threads, result) = -// prc::collect_ordered::x(orchestrator, params, iter, xap1, pinned_vec); -// let pinned_vec = match result { -// ParallelCollect::AllCollected { pinned_vec } => pinned_vec, -// ParallelCollect::StoppedByWhileCondition { -// pinned_vec, -// stopped_idx: _, -// } => pinned_vec, -// }; -// (num_threads, pinned_vec) -// } -// } -// } +pub fn xap_collect_into( + using: U, + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + pinned_vec: P, +) -> (NumSpawned, P) +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, + P: IntoConcurrentPinnedVec, +{ + match (params.is_sequential(), params.iteration_order) { + (true, _) => ( + NumSpawned::zero(), + xap_collect_into_seq(using, iter, xap1, pinned_vec), + ), + (false, IterationOrder::Arbitrary) => { + let (num_threads, result) = + prc::collect_arbitrary::x(using, orchestrator, params, iter, xap1, pinned_vec); + let pinned_vec = match result { + ParallelCollectArbitrary::AllOrUntilWhileCollected { pinned_vec } => pinned_vec, + }; + (num_threads, pinned_vec) + } + (false, IterationOrder::Ordered) => { + let (num_threads, result) = + prc::collect_ordered::x(using, orchestrator, params, iter, xap1, pinned_vec); + let pinned_vec = match result { + ParallelCollect::AllCollected { pinned_vec } => pinned_vec, + ParallelCollect::StoppedByWhileCondition { + pinned_vec, + stopped_idx: _, + } => pinned_vec, + }; + (num_threads, pinned_vec) + } + } +} -// fn xap_collect_into_seq(iter: I, xap1: X1, mut pinned_vec: P) -> P -// where -// I: ConcurrentIter, -// Vo: Values, -// Vo::Item: Send, -// X1: Fn(I::Item) -> Vo + Sync, -// P: IntoConcurrentPinnedVec, -// { -// let iter = iter.into_seq_iter(); -// for i in iter { -// let vt = xap1(i); -// let done = vt.push_to_pinned_vec(&mut pinned_vec); -// if Vo::sequential_push_to_stop(done).is_some() { -// break; -// } -// } +fn xap_collect_into_seq(using: U, iter: I, xap1: X1, mut pinned_vec: P) -> P +where + U: Using, + I: ConcurrentIter, + Vo: Values, + Vo::Item: Send, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, + P: IntoConcurrentPinnedVec, +{ + let mut u = using.into_inner(); + let iter = iter.into_seq_iter(); + for i in iter { + let vt = xap1(&mut u, i); + let done = vt.push_to_pinned_vec(&mut pinned_vec); + if Vo::sequential_push_to_stop(done).is_some() { + break; + } + } -// pinned_vec -// } + pinned_vec +} From 1ec1fafda9fbcf513a9c0535c87c843a0ad94caf Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 11:08:49 +0200 Subject: [PATCH 174/264] collect into implementations for vec and pinned vecs --- src/using/collect_into/fixed_vec.rs | 121 +++++-------- src/using/collect_into/split_vec.rs | 142 ++++++--------- src/using/collect_into/u_par_collect_into.rs | 88 ++-------- src/using/collect_into/vec.rs | 175 +++++++------------ 4 files changed, 185 insertions(+), 341 deletions(-) diff --git a/src/using/collect_into/fixed_vec.rs b/src/using/collect_into/fixed_vec.rs index 14728b9..54b958d 100644 --- a/src/using/collect_into/fixed_vec.rs +++ b/src/using/collect_into/fixed_vec.rs @@ -1,75 +1,50 @@ -// use super::par_collect_into::ParCollectIntoCore; -// use crate::Params; -// use crate::generic_values::runner_results::{Fallibility, Infallible}; -// use crate::generic_values::{TransformableValues, Values}; -// use crate::orch::Orchestrator; -// use alloc::vec::Vec; -// use orx_concurrent_iter::ConcurrentIter; -// use orx_fixed_vec::FixedVec; -// #[cfg(test)] -// use orx_pinned_vec::PinnedVec; +use crate::Params; +use crate::generic_values::TransformableValues; +use crate::generic_values::runner_results::Infallible; +use crate::orch::Orchestrator; +use crate::using::collect_into::u_par_collect_into::UParCollectIntoCore; +use alloc::vec::Vec; +use orx_concurrent_iter::ConcurrentIter; +use orx_fixed_vec::FixedVec; -// impl ParCollectIntoCore for FixedVec -// where -// O: Send + Sync, -// { -// type BridgePinnedVec = Self; +impl UParCollectIntoCore for FixedVec +where + O: Send + Sync, +{ + fn u_m_collect_into( + self, + using: U, + orchestrator: R, + params: Params, + iter: I, + map1: M1, + ) -> Self + where + U: crate::using::using_variants::Using, + R: Orchestrator, + I: ConcurrentIter, + M1: Fn(&mut U::Item, I::Item) -> O + Sync, + { + let vec = Vec::from(self); + FixedVec::from(vec.u_m_collect_into(using, orchestrator, params, iter, map1)) + } -// fn empty(iter_len: Option) -> Self { -// let vec = as ParCollectIntoCore<_>>::empty(iter_len); -// vec.into() -// } - -// fn m_collect_into(self, orchestrator: R, params: Params, iter: I, map1: M1) -> Self -// where -// R: Orchestrator, -// I: ConcurrentIter, -// M1: Fn(I::Item) -> O + Sync, -// O: Send, -// { -// let vec = Vec::from(self); -// FixedVec::from(vec.m_collect_into(orchestrator, params, iter, map1)) -// } - -// fn x_collect_into( -// self, -// orchestrator: R, -// params: Params, -// iter: I, -// xap1: X1, -// ) -> Self -// where -// R: Orchestrator, -// I: ConcurrentIter, -// Vo: TransformableValues, -// X1: Fn(I::Item) -> Vo + Sync, -// { -// let vec = Vec::from(self); -// FixedVec::from(vec.x_collect_into(orchestrator, params, iter, xap1)) -// } - -// fn x_try_collect_into( -// self, -// orchestrator: R, -// params: Params, -// iter: I, -// xap1: X1, -// ) -> Result::Error> -// where -// R: Orchestrator, -// I: ConcurrentIter, -// X1: Fn(I::Item) -> Vo + Sync, -// Vo: Values, -// { -// let vec = Vec::from(self); -// vec.x_try_collect_into(orchestrator, params, iter, xap1) -// .map(FixedVec::from) -// } - -// // test - -// #[cfg(test)] -// fn length(&self) -> usize { -// self.len() -// } -// } + fn u_x_collect_into( + self, + using: U, + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + ) -> Self + where + U: crate::using::using_variants::Using, + R: Orchestrator, + I: ConcurrentIter, + Vo: TransformableValues, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, + { + let vec = Vec::from(self); + FixedVec::from(vec.u_x_collect_into(using, orchestrator, params, iter, xap1)) + } +} diff --git a/src/using/collect_into/split_vec.rs b/src/using/collect_into/split_vec.rs index ff902bf..47e1bf7 100644 --- a/src/using/collect_into/split_vec.rs +++ b/src/using/collect_into/split_vec.rs @@ -1,90 +1,56 @@ -// use super::par_collect_into::ParCollectIntoCore; -// use crate::Params; -// use crate::collect_into::utils::split_vec_reserve; -// use crate::computational_variants::computations::{ -// map_collect_into, xap_collect_into, xap_try_collect_into, -// }; -// use crate::generic_values::runner_results::{Fallibility, Infallible}; -// use crate::generic_values::{TransformableValues, Values}; -// use crate::orch::Orchestrator; -// use orx_concurrent_iter::ConcurrentIter; -// #[cfg(test)] -// use orx_pinned_vec::PinnedVec; -// use orx_split_vec::{GrowthWithConstantTimeAccess, PseudoDefault, SplitVec}; +use crate::Params; +use crate::collect_into::utils::split_vec_reserve; +use crate::generic_values::TransformableValues; +use crate::generic_values::runner_results::Infallible; +use crate::orch::Orchestrator; +use crate::using::collect_into::collect::{map_collect_into, xap_collect_into}; +use crate::using::collect_into::u_par_collect_into::UParCollectIntoCore; +use orx_concurrent_iter::ConcurrentIter; +use orx_split_vec::{GrowthWithConstantTimeAccess, PseudoDefault, SplitVec}; -// impl ParCollectIntoCore for SplitVec -// where -// O: Send + Sync, -// G: GrowthWithConstantTimeAccess, -// Self: PseudoDefault, -// { -// type BridgePinnedVec = Self; +impl UParCollectIntoCore for SplitVec +where + O: Send + Sync, + G: GrowthWithConstantTimeAccess, + Self: PseudoDefault, +{ + fn u_m_collect_into( + mut self, + using: U, + orchestrator: R, + params: Params, + iter: I, + map1: M1, + ) -> Self + where + U: crate::using::using_variants::Using, + R: Orchestrator, + I: ConcurrentIter, + M1: Fn(&mut U::Item, I::Item) -> O + Sync, + { + split_vec_reserve(&mut self, params.is_sequential(), iter.try_get_len()); + let (_, pinned_vec) = map_collect_into(using, orchestrator, params, iter, map1, self); + pinned_vec + } -// fn empty(iter_len: Option) -> Self { -// let mut vec = Self::pseudo_default(); -// split_vec_reserve(&mut vec, false, iter_len); -// vec -// } - -// fn m_collect_into( -// mut self, -// orchestrator: R, -// params: Params, -// iter: I, -// map1: M1, -// ) -> Self -// where -// R: Orchestrator, -// I: ConcurrentIter, -// M1: Fn(I::Item) -> O + Sync, -// O: Send, -// { -// split_vec_reserve(&mut self, params.is_sequential(), iter.try_get_len()); -// let (_, pinned_vec) = map_collect_into(orchestrator, params, iter, map1, self); -// pinned_vec -// } - -// fn x_collect_into( -// mut self, -// orchestrator: R, -// params: Params, -// iter: I, -// xap1: X1, -// ) -> Self -// where -// R: Orchestrator, -// I: ConcurrentIter, -// Vo: TransformableValues, -// X1: Fn(I::Item) -> Vo + Sync, -// { -// split_vec_reserve(&mut self, params.is_sequential(), iter.try_get_len()); -// let (_num_spawned, pinned_vec) = xap_collect_into(orchestrator, params, iter, xap1, self); -// pinned_vec -// } - -// fn x_try_collect_into( -// mut self, -// orchestrator: R, -// params: Params, -// iter: I, -// xap1: X1, -// ) -> Result::Error> -// where -// R: Orchestrator, -// I: ConcurrentIter, -// X1: Fn(I::Item) -> Vo + Sync, -// Vo: Values, -// Self: Sized, -// { -// split_vec_reserve(&mut self, params.is_sequential(), iter.try_get_len()); -// let (_num_spawned, result) = xap_try_collect_into(orchestrator, params, iter, xap1, self); -// result -// } - -// // test - -// #[cfg(test)] -// fn length(&self) -> usize { -// self.len() -// } -// } + fn u_x_collect_into( + mut self, + using: U, + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + ) -> Self + where + U: crate::using::using_variants::Using, + R: Orchestrator, + I: ConcurrentIter, + Vo: TransformableValues, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, + { + split_vec_reserve(&mut self, params.is_sequential(), iter.try_get_len()); + let (_num_spawned, pinned_vec) = + xap_collect_into(using, orchestrator, params, iter, xap1, self); + pinned_vec + } +} diff --git a/src/using/collect_into/u_par_collect_into.rs b/src/using/collect_into/u_par_collect_into.rs index 3f8c2d4..e41b63d 100644 --- a/src/using/collect_into/u_par_collect_into.rs +++ b/src/using/collect_into/u_par_collect_into.rs @@ -1,95 +1,43 @@ use crate::Params; -use crate::generic_values::runner_results::{Fallibility, Infallible}; -use crate::generic_values::{TransformableValues, Values}; +use crate::collect_into::ParCollectIntoCore; +use crate::generic_values::TransformableValues; +use crate::generic_values::runner_results::Infallible; use crate::orch::Orchestrator; +use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; -use orx_iterable::Collection; -use orx_pinned_vec::IntoConcurrentPinnedVec; -pub trait UParCollectIntoCore: Collection { - type BridgePinnedVec: IntoConcurrentPinnedVec; - - fn empty(iter_len: Option) -> Self; - - fn m_collect_into(self, orchestrator: R, params: Params, iter: I, map1: M1) -> Self - where - R: Orchestrator, - I: ConcurrentIter, - M1: Fn(I::Item) -> O + Sync; - - fn x_collect_into( +pub trait UParCollectIntoCore: ParCollectIntoCore { + fn u_m_collect_into( self, + using: U, orchestrator: R, params: Params, iter: I, - xap1: X1, + map1: M1, ) -> Self where + U: Using, R: Orchestrator, I: ConcurrentIter, - Vo: TransformableValues, - X1: Fn(I::Item) -> Vo + Sync; + M1: Fn(&mut U::Item, I::Item) -> O + Sync; - fn x_try_collect_into( + fn u_x_collect_into( self, + using: U, orchestrator: R, params: Params, iter: I, xap1: X1, - ) -> Result::Error> + ) -> Self where + U: Using, R: Orchestrator, I: ConcurrentIter, - X1: Fn(I::Item) -> Vo + Sync, - Vo: Values, - Self: Sized; - - // test - - #[cfg(test)] - fn length(&self) -> usize; - - #[cfg(test)] - fn is_empty(&self) -> bool { - self.length() == 0 - } - - #[cfg(test)] - fn is_equal_to<'a>(&self, b: impl orx_iterable::Iterable) -> bool - where - O: PartialEq + 'a, - { - let mut b = b.iter(); - for x in self.iter() { - match b.next() { - Some(y) if x != y => return false, - None => return false, - _ => {} - } - } - - b.next().is_none() - } - - #[cfg(test)] - fn is_equal_to_ref(&self, b: impl orx_iterable::Iterable) -> bool - where - O: PartialEq, - { - let mut b = b.iter(); - for x in self.iter() { - match b.next() { - Some(y) if x != &y => return false, - None => return false, - _ => {} - } - } - - b.next().is_none() - } + Vo: TransformableValues, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync; } /// Collection types into which outputs of a parallel computations can be collected into. -pub trait UParCollectInto: UParCollectIntoCore + UParCollectIntoCore {} +pub trait UParCollectInto: UParCollectIntoCore {} -impl UParCollectInto for C where C: UParCollectIntoCore + UParCollectIntoCore {} +impl UParCollectInto for C where C: UParCollectIntoCore {} diff --git a/src/using/collect_into/vec.rs b/src/using/collect_into/vec.rs index 6f3bfc8..52d70df 100644 --- a/src/using/collect_into/vec.rs +++ b/src/using/collect_into/vec.rs @@ -1,112 +1,67 @@ -// use super::par_collect_into::ParCollectIntoCore; -// use crate::Params; -// use crate::collect_into::utils::extend_vec_from_split; -// use crate::computational_variants::computations::map_collect_into; -// use crate::generic_values::runner_results::{Fallibility, Infallible}; -// use crate::generic_values::{TransformableValues, Values}; -// use crate::orch::Orchestrator; -// use alloc::vec::Vec; -// use orx_concurrent_iter::ConcurrentIter; -// use orx_fixed_vec::FixedVec; -// use orx_split_vec::SplitVec; +use crate::Params; +use crate::collect_into::utils::extend_vec_from_split; +use crate::generic_values::TransformableValues; +use crate::generic_values::runner_results::Infallible; +use crate::orch::Orchestrator; +use crate::using::collect_into::collect::map_collect_into; +use crate::using::collect_into::u_par_collect_into::UParCollectIntoCore; +use crate::using::using_variants::Using; +use alloc::vec::Vec; +use orx_concurrent_iter::ConcurrentIter; +use orx_fixed_vec::FixedVec; +use orx_split_vec::SplitVec; -// impl ParCollectIntoCore for Vec -// where -// O: Send + Sync, -// { -// type BridgePinnedVec = FixedVec; +impl UParCollectIntoCore for Vec +where + O: Send + Sync, +{ + fn u_m_collect_into( + mut self, + using: U, + orchestrator: R, + params: Params, + iter: I, + map1: M1, + ) -> Self + where + U: Using, + R: Orchestrator, + I: ConcurrentIter, + M1: Fn(&mut U::Item, I::Item) -> O + Sync, + { + match iter.try_get_len() { + None => { + let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); + let split_vec = split_vec.u_m_collect_into(using, orchestrator, params, iter, map1); + extend_vec_from_split(self, split_vec) + } + Some(len) => { + self.reserve(len); + let fixed_vec = FixedVec::from(self); + let (_, fixed_vec) = + map_collect_into(using, orchestrator, params, iter, map1, fixed_vec); + Vec::from(fixed_vec) + } + } + } -// fn empty(iter_len: Option) -> Self { -// match iter_len { -// Some(len) => Vec::with_capacity(len), -// None => Vec::new(), -// } -// } - -// fn m_collect_into( -// mut self, -// orchestrator: R, -// params: Params, -// iter: I, -// map1: M1, -// ) -> Self -// where -// R: Orchestrator, -// I: ConcurrentIter, -// M1: Fn(I::Item) -> O + Sync, -// O: Send, -// { -// match iter.try_get_len() { -// None => { -// let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); -// let split_vec = split_vec.m_collect_into(orchestrator, params, iter, map1); -// extend_vec_from_split(self, split_vec) -// } -// Some(len) => { -// self.reserve(len); -// let fixed_vec = FixedVec::from(self); -// let (_, fixed_vec) = map_collect_into(orchestrator, params, iter, map1, fixed_vec); -// Vec::from(fixed_vec) -// } -// } -// } - -// fn x_collect_into( -// self, -// orchestrator: R, -// params: Params, -// iter: I, -// xap1: X1, -// ) -> Self -// where -// R: Orchestrator, -// I: ConcurrentIter, -// Vo: TransformableValues, -// X1: Fn(I::Item) -> Vo + Sync, -// { -// let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); -// let split_vec = split_vec.x_collect_into(orchestrator, params, iter, xap1); -// extend_vec_from_split(self, split_vec) -// } - -// fn x_try_collect_into( -// self, -// orchestrator: R, -// params: Params, -// iter: I, -// xap1: X1, -// ) -> Result::Error> -// where -// R: Orchestrator, -// I: ConcurrentIter, -// X1: Fn(I::Item) -> Vo + Sync, -// Vo: Values, -// Self: Sized, -// { -// let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); -// let result = split_vec.x_try_collect_into(orchestrator, params, iter, xap1); -// result.map(|split_vec| extend_vec_from_split(self, split_vec)) -// } - -// // test - -// #[cfg(test)] -// fn length(&self) -> usize { -// self.len() -// } -// } - -// // #[cfg(test)] -// // mod tsts { -// // use crate::*; -// // use alloc::vec::Vec; -// // use orx_split_vec::SplitVec; - -// // #[test] -// // fn abc() { -// // fn take>(c: C) {} - -// // take(SplitVec::new()); -// // take(Vec::new()); -// // } -// // } + fn u_x_collect_into( + self, + using: U, + orchestrator: R, + params: Params, + iter: I, + xap1: X1, + ) -> Self + where + U: Using, + R: Orchestrator, + I: ConcurrentIter, + Vo: TransformableValues, + X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, + { + let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); + let split_vec = split_vec.u_x_collect_into(using, orchestrator, params, iter, xap1); + extend_vec_from_split(self, split_vec) + } +} From 9b4b71e57bd305dde3f3359995a9c2f4cf7504d0 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 11:16:29 +0200 Subject: [PATCH 175/264] complete UParXap implementation --- src/collect_into/par_collect_into.rs | 13 ++++++++++--- src/using/collect_into/mod.rs | 2 ++ src/using/collect_into/u_par_collect_into.rs | 5 ----- src/using/computational_variants/u_xap.rs | 5 ++--- src/using/mod.rs | 1 + src/using_old/collect_into/fixed_vec.rs | 12 ++++++------ src/using_old/collect_into/mod.rs | 2 +- src/using_old/collect_into/split_vec.rs | 8 ++++---- src/using_old/collect_into/u_par_collect_into.rs | 6 +++--- src/using_old/collect_into/vec.rs | 12 ++++++------ src/using_old/computational_variants/u_map.rs | 2 +- src/using_old/computational_variants/u_par.rs | 2 +- src/using_old/computational_variants/u_xap.rs | 2 +- src/using_old/mod.rs | 2 +- 14 files changed, 39 insertions(+), 35 deletions(-) diff --git a/src/collect_into/par_collect_into.rs b/src/collect_into/par_collect_into.rs index c03c319..e982366 100644 --- a/src/collect_into/par_collect_into.rs +++ b/src/collect_into/par_collect_into.rs @@ -2,7 +2,8 @@ use crate::Params; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; -use crate::using_old::UParCollectIntoCore; +use crate::using::UParCollectIntoCore; +use crate::using_old::UParCollectIntoCoreOld; use orx_concurrent_iter::ConcurrentIter; use orx_iterable::Collection; use orx_pinned_vec::IntoConcurrentPinnedVec; @@ -91,6 +92,12 @@ pub trait ParCollectIntoCore: Collection { } /// Collection types into which outputs of a parallel computations can be collected into. -pub trait ParCollectInto: ParCollectIntoCore + UParCollectIntoCore {} +pub trait ParCollectInto: + ParCollectIntoCore + UParCollectIntoCore + UParCollectIntoCoreOld +{ +} -impl ParCollectInto for C where C: ParCollectIntoCore + UParCollectIntoCore {} +impl ParCollectInto for C where + C: ParCollectIntoCore + UParCollectIntoCore + UParCollectIntoCoreOld +{ +} diff --git a/src/using/collect_into/mod.rs b/src/using/collect_into/mod.rs index 898b934..1a85260 100644 --- a/src/using/collect_into/mod.rs +++ b/src/using/collect_into/mod.rs @@ -3,3 +3,5 @@ mod fixed_vec; mod split_vec; mod u_par_collect_into; mod vec; + +pub use u_par_collect_into::UParCollectIntoCore; diff --git a/src/using/collect_into/u_par_collect_into.rs b/src/using/collect_into/u_par_collect_into.rs index e41b63d..f4f113a 100644 --- a/src/using/collect_into/u_par_collect_into.rs +++ b/src/using/collect_into/u_par_collect_into.rs @@ -36,8 +36,3 @@ pub trait UParCollectIntoCore: ParCollectIntoCore { Vo: TransformableValues, X1: Fn(&mut U::Item, I::Item) -> Vo + Sync; } - -/// Collection types into which outputs of a parallel computations can be collected into. -pub trait UParCollectInto: UParCollectIntoCore {} - -impl UParCollectInto for C where C: UParCollectIntoCore {} diff --git a/src/using/computational_variants/u_xap.rs b/src/using/computational_variants/u_xap.rs index 702bb04..ccd59ec 100644 --- a/src/using/computational_variants/u_xap.rs +++ b/src/using/computational_variants/u_xap.rs @@ -209,9 +209,8 @@ where where C: ParCollectInto, { - // let (using, orchestrator, params, iter, x1) = self.destruct(); - // output.x_collect_into(using, orchestrator, params, iter, x1) - todo!() + let (using, orchestrator, params, iter, x1) = self.destruct(); + output.u_x_collect_into(using, orchestrator, params, iter, x1) } fn reduce(self, reduce: Reduce) -> Option diff --git a/src/using/mod.rs b/src/using/mod.rs index e41688f..d3c088c 100644 --- a/src/using/mod.rs +++ b/src/using/mod.rs @@ -4,4 +4,5 @@ mod runner; mod u_par_iter; mod using_variants; +pub use collect_into::UParCollectIntoCore; pub use u_par_iter::ParIterUsing; diff --git a/src/using_old/collect_into/fixed_vec.rs b/src/using_old/collect_into/fixed_vec.rs index 7b3d7b6..8dbaadf 100644 --- a/src/using_old/collect_into/fixed_vec.rs +++ b/src/using_old/collect_into/fixed_vec.rs @@ -2,16 +2,16 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::Infallible; use crate::runner::ParallelRunner; use crate::using_old::Using; -use crate::using_old::collect_into::u_par_collect_into::UParCollectIntoCore; +use crate::using_old::collect_into::u_par_collect_into::UParCollectIntoCoreOld; use crate::using_old::computations::{UM, UX}; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; -impl UParCollectIntoCore for FixedVec +impl UParCollectIntoCoreOld for FixedVec where O: Send + Sync, { - fn u_m_collect_into(self, m: UM) -> Self + fn u_m_collect_into_old(self, m: UM) -> Self where R: ParallelRunner, U: Using, @@ -19,10 +19,10 @@ where M1: Fn(&mut U::Item, I::Item) -> O + Sync, { let vec = Vec::from(self); - FixedVec::from(vec.u_m_collect_into::(m)) + FixedVec::from(vec.u_m_collect_into_old::(m)) } - fn u_x_collect_into(self, x: UX) -> Self + fn u_x_collect_into_old(self, x: UX) -> Self where R: ParallelRunner, U: Using, @@ -32,6 +32,6 @@ where M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, { let vec = Vec::from(self); - FixedVec::from(vec.u_x_collect_into::(x)) + FixedVec::from(vec.u_x_collect_into_old::(x)) } } diff --git a/src/using_old/collect_into/mod.rs b/src/using_old/collect_into/mod.rs index e97e9a7..d1277a2 100644 --- a/src/using_old/collect_into/mod.rs +++ b/src/using_old/collect_into/mod.rs @@ -3,4 +3,4 @@ mod split_vec; mod u_par_collect_into; mod vec; -pub use u_par_collect_into::UParCollectIntoCore; +pub use u_par_collect_into::UParCollectIntoCoreOld; diff --git a/src/using_old/collect_into/split_vec.rs b/src/using_old/collect_into/split_vec.rs index adfa43b..98a8c35 100644 --- a/src/using_old/collect_into/split_vec.rs +++ b/src/using_old/collect_into/split_vec.rs @@ -3,18 +3,18 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::Infallible; use crate::runner::ParallelRunner; use crate::using_old::Using; -use crate::using_old::collect_into::u_par_collect_into::UParCollectIntoCore; +use crate::using_old::collect_into::u_par_collect_into::UParCollectIntoCoreOld; use crate::using_old::computations::{UM, UX}; use orx_concurrent_iter::ConcurrentIter; use orx_split_vec::{GrowthWithConstantTimeAccess, PseudoDefault, SplitVec}; -impl UParCollectIntoCore for SplitVec +impl UParCollectIntoCoreOld for SplitVec where O: Send + Sync, G: GrowthWithConstantTimeAccess, Self: PseudoDefault, { - fn u_m_collect_into(mut self, m: UM) -> Self + fn u_m_collect_into_old(mut self, m: UM) -> Self where R: ParallelRunner, U: Using, @@ -30,7 +30,7 @@ where pinned_vec } - fn u_x_collect_into(mut self, x: UX) -> Self + fn u_x_collect_into_old(mut self, x: UX) -> Self where R: ParallelRunner, U: Using, diff --git a/src/using_old/collect_into/u_par_collect_into.rs b/src/using_old/collect_into/u_par_collect_into.rs index ec52de0..8551119 100644 --- a/src/using_old/collect_into/u_par_collect_into.rs +++ b/src/using_old/collect_into/u_par_collect_into.rs @@ -6,15 +6,15 @@ use crate::using_old::Using; use crate::using_old::computations::{UM, UX}; use orx_concurrent_iter::ConcurrentIter; -pub trait UParCollectIntoCore: ParCollectIntoCore { - fn u_m_collect_into(self, m: UM) -> Self +pub trait UParCollectIntoCoreOld: ParCollectIntoCore { + fn u_m_collect_into_old(self, m: UM) -> Self where R: ParallelRunner, U: Using, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync; - fn u_x_collect_into(self, x: UX) -> Self + fn u_x_collect_into_old(self, x: UX) -> Self where R: ParallelRunner, U: Using, diff --git a/src/using_old/collect_into/vec.rs b/src/using_old/collect_into/vec.rs index 961ed99..4361283 100644 --- a/src/using_old/collect_into/vec.rs +++ b/src/using_old/collect_into/vec.rs @@ -3,17 +3,17 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::Infallible; use crate::runner::ParallelRunner; use crate::using_old::Using; -use crate::using_old::collect_into::u_par_collect_into::UParCollectIntoCore; +use crate::using_old::collect_into::u_par_collect_into::UParCollectIntoCoreOld; use crate::using_old::computations::{UM, UX}; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; use orx_split_vec::SplitVec; -impl UParCollectIntoCore for Vec +impl UParCollectIntoCoreOld for Vec where O: Send + Sync, { - fn u_m_collect_into(mut self, m: UM) -> Self + fn u_m_collect_into_old(mut self, m: UM) -> Self where R: ParallelRunner, U: Using, @@ -23,7 +23,7 @@ where match m.iter().try_get_len() { None => { let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let split_vec = split_vec.u_m_collect_into::(m); + let split_vec = split_vec.u_m_collect_into_old::(m); extend_vec_from_split(self, split_vec) } Some(len) => { @@ -35,7 +35,7 @@ where } } - fn u_x_collect_into(self, x: UX) -> Self + fn u_x_collect_into_old(self, x: UX) -> Self where R: ParallelRunner, U: Using, @@ -44,7 +44,7 @@ where M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, { let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let split_vec = split_vec.u_x_collect_into::(x); + let split_vec = split_vec.u_x_collect_into_old::(x); extend_vec_from_split(self, split_vec) } } diff --git a/src/using_old/computational_variants/u_map.rs b/src/using_old/computational_variants/u_map.rs index 31c9ea2..52aad3e 100644 --- a/src/using_old/computational_variants/u_map.rs +++ b/src/using_old/computational_variants/u_map.rs @@ -163,7 +163,7 @@ where where C: ParCollectInto, { - output.u_m_collect_into::(self.um) + output.u_m_collect_into_old::(self.um) } // reduce diff --git a/src/using_old/computational_variants/u_par.rs b/src/using_old/computational_variants/u_par.rs index 70bb11c..c8c412e 100644 --- a/src/using_old/computational_variants/u_par.rs +++ b/src/using_old/computational_variants/u_par.rs @@ -155,7 +155,7 @@ where where C: ParCollectInto, { - output.u_m_collect_into::(self.u_m()) + output.u_m_collect_into_old::(self.u_m()) } // reduce diff --git a/src/using_old/computational_variants/u_xap.rs b/src/using_old/computational_variants/u_xap.rs index 3875e79..53c9467 100644 --- a/src/using_old/computational_variants/u_xap.rs +++ b/src/using_old/computational_variants/u_xap.rs @@ -203,7 +203,7 @@ where where C: ParCollectInto, { - output.u_x_collect_into::(self.ux) + output.u_x_collect_into_old::(self.ux) } // reduce diff --git a/src/using_old/mod.rs b/src/using_old/mod.rs index 22df421..ecd419d 100644 --- a/src/using_old/mod.rs +++ b/src/using_old/mod.rs @@ -6,6 +6,6 @@ mod runner; mod u_par_iter; mod using_variants; -pub(crate) use collect_into::UParCollectIntoCore; +pub(crate) use collect_into::UParCollectIntoCoreOld; pub use u_par_iter::ParIterUsingOld; pub use using_variants::{Using, UsingClone, UsingFun}; From a8651f4f69e5165c5f561aa8ab2b01dd07af379c Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 11:29:24 +0200 Subject: [PATCH 176/264] complete UParMap part-iter implementation --- src/using/computational_variants/mod.rs | 1 + src/using/computational_variants/u_map.rs | 194 ++++++++++++++++++++++ src/using/computational_variants/u_xap.rs | 3 +- 3 files changed, 197 insertions(+), 1 deletion(-) create mode 100644 src/using/computational_variants/u_map.rs diff --git a/src/using/computational_variants/mod.rs b/src/using/computational_variants/mod.rs index 92a663a..2b85d50 100644 --- a/src/using/computational_variants/mod.rs +++ b/src/using/computational_variants/mod.rs @@ -1 +1,2 @@ +mod u_map; mod u_xap; diff --git a/src/using/computational_variants/u_map.rs b/src/using/computational_variants/u_map.rs new file mode 100644 index 0000000..38e244b --- /dev/null +++ b/src/using/computational_variants/u_map.rs @@ -0,0 +1,194 @@ +use crate::ParIterUsing; +use crate::generic_values::Vector; +use crate::orch::{DefaultOrchestrator, Orchestrator}; +use crate::using::computational_variants::u_xap::UParXap; +use crate::using::runner::parallel_runner_compute as prc; +use crate::using::using_variants::Using; +use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params}; +use orx_concurrent_iter::ConcurrentIter; + +/// A parallel iterator that maps inputs. +pub struct UParMap +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, + M1: Fn(&mut U::Item, I::Item) -> O + Sync, +{ + using: U, + orchestrator: R, + params: Params, + iter: I, + map1: M1, +} + +impl UParMap +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, + M1: Fn(&mut U::Item, I::Item) -> O + Sync, +{ + pub(crate) fn new(using: U, orchestrator: R, params: Params, iter: I, map1: M1) -> Self { + Self { + using, + orchestrator, + params, + iter, + map1, + } + } + + pub(crate) fn destruct(self) -> (U, R, Params, I, M1) { + ( + self.using, + self.orchestrator, + self.params, + self.iter, + self.map1, + ) + } +} + +unsafe impl Send for UParMap +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, + M1: Fn(&mut U::Item, I::Item) -> O + Sync, +{ +} + +unsafe impl Sync for UParMap +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, + M1: Fn(&mut U::Item, I::Item) -> O + Sync, +{ +} + +impl ParIterUsing for UParMap +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, + M1: Fn(&mut U::Item, I::Item) -> O + Sync, +{ + type Item = O; + + fn con_iter(&self) -> &impl ConcurrentIter { + &self.iter + } + + fn params(&self) -> Params { + self.params + } + + fn num_threads(mut self, num_threads: impl Into) -> Self { + self.params = self.params.with_num_threads(num_threads); + self + } + + fn chunk_size(mut self, chunk_size: impl Into) -> Self { + self.params = self.params.with_chunk_size(chunk_size); + self + } + + fn iteration_order(mut self, collect: IterationOrder) -> Self { + self.params = self.params.with_collect_ordering(collect); + self + } + + fn with_runner( + self, + orchestrator: Q, + ) -> impl ParIterUsing { + let (using, _, params, iter, x1) = self.destruct(); + UParMap::new(using, orchestrator, params, iter, x1) + } + + fn map(self, map: Map) -> impl ParIterUsing + where + Map: Fn(&mut U::Item, Self::Item) -> Out + Sync + Clone, + { + let (using, orchestrator, params, iter, m1) = self.destruct(); + let m1 = move |u: &mut U::Item, x: I::Item| { + let v1 = m1(u, x); + map(u, v1) + }; + UParMap::new(using, orchestrator, params, iter, m1) + } + + fn filter(self, filter: Filter) -> impl ParIterUsing + where + Filter: Fn(&mut U::Item, &Self::Item) -> bool + Sync + Clone, + { + let (using, orchestrator, params, iter, m1) = self.destruct(); + + let x1 = move |u: &mut U::Item, i: I::Item| { + let value = m1(u, i); + filter(u, &value).then_some(value) + }; + UParXap::new(using, orchestrator, params, iter, x1) + } + + fn flat_map( + self, + flat_map: FlatMap, + ) -> impl ParIterUsing + where + IOut: IntoIterator, + FlatMap: Fn(&mut U::Item, Self::Item) -> IOut + Sync + Clone, + { + let (using, orchestrator, params, iter, m1) = self.destruct(); + let x1 = move |u: &mut U::Item, i: I::Item| { + let a = m1(u, i); + Vector(flat_map(u, a)) + }; + UParXap::new(using, orchestrator, params, iter, x1) + } + + fn filter_map( + self, + filter_map: FilterMap, + ) -> impl ParIterUsing + where + FilterMap: Fn(&mut U::Item, Self::Item) -> Option + Sync + Clone, + { + let (using, orchestrator, params, iter, m1) = self.destruct(); + let x1 = move |u: &mut U::Item, i: I::Item| { + let a = m1(u, i); + filter_map(u, a) + }; + UParXap::new(using, orchestrator, params, iter, x1) + } + + fn collect_into(self, output: C) -> C + where + C: ParCollectInto, + { + let (using, orchestrator, params, iter, m1) = self.destruct(); + output.u_m_collect_into(using, orchestrator, params, iter, m1) + } + + fn reduce(self, reduce: Reduce) -> Option + where + Self::Item: Send, + Reduce: Fn(&mut U::Item, Self::Item, Self::Item) -> Self::Item + Sync, + { + let (using, orchestrator, params, iter, m1) = self.destruct(); + prc::reduce::m(using, orchestrator, params, iter, m1, reduce).1 + } + + fn first(self) -> Option + where + Self::Item: Send, + { + let (using, orchestrator, params, iter, m1) = self.destruct(); + match params.iteration_order { + IterationOrder::Ordered => prc::next::m(using, orchestrator, params, iter, m1).1, + IterationOrder::Arbitrary => prc::next_any::m(using, orchestrator, params, iter, m1).1, + } + } +} diff --git a/src/using/computational_variants/u_xap.rs b/src/using/computational_variants/u_xap.rs index ccd59ec..8ff6574 100644 --- a/src/using/computational_variants/u_xap.rs +++ b/src/using/computational_variants/u_xap.rs @@ -1,8 +1,9 @@ +use crate::using::runner::parallel_runner_compute as prc; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIterUsing, Params, generic_values::{TransformableValues, runner_results::Infallible}, orch::{DefaultOrchestrator, Orchestrator}, - using::{runner::parallel_runner_compute as prc, using_variants::Using}, + using::using_variants::Using, }; use orx_concurrent_iter::ConcurrentIter; // use crate::runner::parallel_runner_compute as prc; From edc2c81272eada1760677acae6edcb62c0f07951 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 11:38:45 +0200 Subject: [PATCH 177/264] complete UPar implementation --- src/computational_variants/par.rs | 5 +- src/using/computational_variants/mod.rs | 1 + src/using/computational_variants/u_par.rs | 172 ++++++++++++++++++++++ 3 files changed, 174 insertions(+), 4 deletions(-) create mode 100644 src/using/computational_variants/u_par.rs diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 630a730..74312dc 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -12,7 +12,6 @@ use crate::{ use crate::{IntoParIter, ParIterResult}; use orx_concurrent_iter::chain::ChainKnownLenI; use orx_concurrent_iter::{ConcurrentIter, ExactSizeConcurrentIter}; -use std::marker::PhantomData; /// A parallel iterator. pub struct Par @@ -23,7 +22,6 @@ where orchestrator: R, params: Params, iter: I, - phantom: PhantomData, } impl Par @@ -36,7 +34,6 @@ where orchestrator, iter, params, - phantom: PhantomData, } } @@ -145,7 +142,7 @@ where FlatMap: Fn(Self::Item) -> IOut + Sync, { let (orchestrator, params, iter) = self.destruct(); - let x1 = move |i: Self::Item| Vector(flat_map(i)); // TODO: inline + let x1 = move |i: Self::Item| Vector(flat_map(i)); ParXap::new(orchestrator, params, iter, x1) } diff --git a/src/using/computational_variants/mod.rs b/src/using/computational_variants/mod.rs index 2b85d50..38579ce 100644 --- a/src/using/computational_variants/mod.rs +++ b/src/using/computational_variants/mod.rs @@ -1,2 +1,3 @@ mod u_map; +mod u_par; mod u_xap; diff --git a/src/using/computational_variants/u_par.rs b/src/using/computational_variants/u_par.rs new file mode 100644 index 0000000..3dd7671 --- /dev/null +++ b/src/using/computational_variants/u_par.rs @@ -0,0 +1,172 @@ +use crate::ParIterUsing; +use crate::default_fns::u_map_self; +use crate::generic_values::Vector; +use crate::orch::{DefaultOrchestrator, Orchestrator}; +use crate::using::computational_variants::u_map::UParMap; +use crate::using::computational_variants::u_xap::UParXap; +use crate::using::runner::parallel_runner_compute as prc; +use crate::using::using_variants::Using; +use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params}; +use orx_concurrent_iter::ConcurrentIter; + +/// A parallel iterator. +pub struct UPar +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, +{ + using: U, + orchestrator: R, + params: Params, + iter: I, +} + +impl UPar +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, +{ + pub(crate) fn new(using: U, orchestrator: R, params: Params, iter: I) -> Self { + Self { + using, + orchestrator, + params, + iter, + } + } + + pub(crate) fn destruct(self) -> (U, R, Params, I) { + (self.using, self.orchestrator, self.params, self.iter) + } +} + +unsafe impl Send for UPar +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, +{ +} + +unsafe impl Sync for UPar +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, +{ +} + +impl ParIterUsing for UPar +where + U: Using, + R: Orchestrator, + I: ConcurrentIter, +{ + type Item = I::Item; + + fn con_iter(&self) -> &impl ConcurrentIter { + &self.iter + } + + fn params(&self) -> Params { + self.params + } + + fn num_threads(mut self, num_threads: impl Into) -> Self { + self.params = self.params.with_num_threads(num_threads); + self + } + + fn chunk_size(mut self, chunk_size: impl Into) -> Self { + self.params = self.params.with_chunk_size(chunk_size); + self + } + + fn iteration_order(mut self, collect: IterationOrder) -> Self { + self.params = self.params.with_collect_ordering(collect); + self + } + + fn with_runner( + self, + orchestrator: Q, + ) -> impl ParIterUsing { + let (using, _, params, iter) = self.destruct(); + UPar::new(using, orchestrator, params, iter) + } + + fn map(self, map: Map) -> impl ParIterUsing + where + Map: Fn(&mut ::Item, Self::Item) -> Out + Sync + Clone, + { + let (using, orchestrator, params, iter) = self.destruct(); + UParMap::new(using, orchestrator, params, iter, map) + } + + fn filter(self, filter: Filter) -> impl ParIterUsing + where + Filter: Fn(&mut ::Item, &Self::Item) -> bool + Sync + Clone, + { + let (using, orchestrator, params, iter) = self.destruct(); + let x1 = move |u: &mut U::Item, i: Self::Item| filter(u, &i).then_some(i); + UParXap::new(using, orchestrator, params, iter, x1) + } + + fn flat_map( + self, + flat_map: FlatMap, + ) -> impl ParIterUsing + where + IOut: IntoIterator, + FlatMap: Fn(&mut ::Item, Self::Item) -> IOut + Sync + Clone, + { + let (using, orchestrator, params, iter) = self.destruct(); + let x1 = move |u: &mut U::Item, i: Self::Item| Vector(flat_map(u, i)); + UParXap::new(using, orchestrator, params, iter, x1) + } + + fn filter_map( + self, + filter_map: FilterMap, + ) -> impl ParIterUsing + where + FilterMap: Fn(&mut ::Item, Self::Item) -> Option + Sync + Clone, + { + let (using, orchestrator, params, iter) = self.destruct(); + UParXap::new(using, orchestrator, params, iter, filter_map) + } + + fn collect_into(self, output: C) -> C + where + C: ParCollectInto, + { + let (using, orchestrator, params, iter) = self.destruct(); + output.u_m_collect_into(using, orchestrator, params, iter, u_map_self) + } + + fn reduce(self, reduce: Reduce) -> Option + where + Self::Item: Send, + Reduce: Fn(&mut ::Item, Self::Item, Self::Item) -> Self::Item + Sync, + { + let (using, orchestrator, params, iter) = self.destruct(); + prc::reduce::m(using, orchestrator, params, iter, u_map_self, reduce).1 + } + + fn first(self) -> Option + where + Self::Item: Send, + { + let (using, orchestrator, params, iter) = self.destruct(); + match params.iteration_order { + IterationOrder::Ordered => { + prc::next::m(using, orchestrator, params, iter, u_map_self).1 + } + IterationOrder::Arbitrary => { + prc::next_any::m(using, orchestrator, params, iter, u_map_self).1 + } + } + } +} From aa9a63742b4ef90185f5a3d17b0cf160ebda6e25 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 11:48:45 +0200 Subject: [PATCH 178/264] using transformations fixed --- src/computational_variants/map.rs | 32 ++++++++++++------------- src/computational_variants/par.rs | 25 +++++++++---------- src/computational_variants/xap.rs | 24 +++++++++---------- src/par_iter.rs | 14 +++++------ src/using/computational_variants/mod.rs | 4 ++++ src/using/mod.rs | 2 ++ src/using/using_variants.rs | 8 ++++--- 7 files changed, 57 insertions(+), 52 deletions(-) diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index fc781b6..6c5a134 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -5,10 +5,8 @@ use crate::generic_values::{Vector, WhilstAtom}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; use crate::runner::parallel_runner_compute as prc; -use crate::{ - ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsingOld, Params, - using_old::{UsingClone, UsingFun, computational_variants::UParMap}, -}; +use crate::using::{UParMap, UsingClone, UsingFun}; +use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, Params}; use orx_concurrent_iter::ConcurrentIter; /// A parallel iterator that maps inputs. @@ -103,28 +101,28 @@ where fn using( self, using: F, - ) -> impl ParIterUsingOld, R, Item = >::Item> + ) -> impl crate::ParIterUsing, R, Item = >::Item> where - U: Send + 'static, - F: FnMut(usize) -> U, + U: 'static, + F: Fn(usize) -> U + Sync, { let using = UsingFun::new(using); - let (orchestrator, params, iter, m1) = self.destruct(); - let m1 = move |_: &mut U, t: I::Item| m1(t); - UParMap::new(using, params, iter, m1) + let (orchestrator, params, iter, x1) = self.destruct(); + let m1 = move |_: &mut U, t: I::Item| x1(t); + UParMap::new(using, orchestrator, params, iter, m1) } fn using_clone( self, - using: U, - ) -> impl ParIterUsingOld, R, Item = >::Item> + value: U, + ) -> impl crate::ParIterUsing, R, Item = >::Item> where - U: Clone + Send + 'static, + U: Clone + 'static, { - let using = UsingClone::new(using); - let (orchestrator, params, iter, m1) = self.destruct(); - let m1 = move |_: &mut U, t: I::Item| m1(t); - UParMap::new(using, params, iter, m1) + let using = UsingClone::new(value); + let (orchestrator, params, iter, x1) = self.destruct(); + let m1 = move |_: &mut U, t: I::Item| x1(t); + UParMap::new(using, orchestrator, params, iter, m1) } // computation transformations diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 74312dc..19a2f17 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -4,10 +4,9 @@ use crate::generic_values::{Vector, WhilstAtom}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; use crate::runner::parallel_runner_compute as prc; +use crate::using::{UPar, UsingClone, UsingFun}; use crate::{ - ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsingOld, Params, - default_fns::map_self, - using_old::{UsingClone, UsingFun, computational_variants::UPar}, + ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, Params, default_fns::map_self, }; use crate::{IntoParIter, ParIterResult}; use orx_concurrent_iter::chain::ChainKnownLenI; @@ -97,24 +96,26 @@ where fn using( self, using: F, - ) -> impl ParIterUsingOld, R, Item = >::Item> + ) -> impl crate::ParIterUsing, R, Item = >::Item> where - U: Send + 'static, - F: FnMut(usize) -> U, + U: 'static, + F: Fn(usize) -> U + Sync, { let using = UsingFun::new(using); - UPar::new(using, self.params, self.iter) + let (orchestrator, params, iter) = self.destruct(); + UPar::new(using, orchestrator, params, iter) } fn using_clone( self, - using: U, - ) -> impl ParIterUsingOld, R, Item = >::Item> + value: U, + ) -> impl crate::ParIterUsing, R, Item = >::Item> where - U: Clone + Send + 'static, + U: Clone + 'static, { - let using = UsingClone::new(using); - UPar::new(using, self.params, self.iter) + let using = UsingClone::new(value); + let (orchestrator, params, iter) = self.destruct(); + UPar::new(using, orchestrator, params, iter) } // computation transformations diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index 612021a..cda0e4c 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -5,10 +5,8 @@ use crate::generic_values::runner_results::Infallible; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; use crate::runner::parallel_runner_compute as prc; -use crate::{ - ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, ParIterUsingOld, Params, - using_old::{UsingClone, UsingFun, computational_variants::UParXap}, -}; +use crate::using::{UParXap, UsingClone, UsingFun}; +use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, Params}; use orx_concurrent_iter::ConcurrentIter; /// A parallel iterator that xaps inputs. @@ -110,28 +108,28 @@ where fn using( self, using: F, - ) -> impl ParIterUsingOld, R, Item = >::Item> + ) -> impl crate::ParIterUsing, R, Item = >::Item> where - U: Send + 'static, - F: FnMut(usize) -> U, + U: 'static, + F: Fn(usize) -> U + Sync, { let using = UsingFun::new(using); let (orchestrator, params, iter, x1) = self.destruct(); let m1 = move |_: &mut U, t: I::Item| x1(t); - UParXap::new(using, params, iter, m1) + UParXap::new(using, orchestrator, params, iter, m1) } fn using_clone( self, - using: U, - ) -> impl ParIterUsingOld, R, Item = >::Item> + value: U, + ) -> impl crate::ParIterUsing, R, Item = >::Item> where - U: Clone + Send + 'static, + U: Clone + 'static, { - let using = UsingClone::new(using); + let using = UsingClone::new(value); let (orchestrator, params, iter, x1) = self.destruct(); let m1 = move |_: &mut U, t: I::Item| x1(t); - UParXap::new(using, params, iter, m1) + UParXap::new(using, orchestrator, params, iter, m1) } // computation transformations diff --git a/src/par_iter.rs b/src/par_iter.rs index d3169cf..5ead958 100644 --- a/src/par_iter.rs +++ b/src/par_iter.rs @@ -3,9 +3,9 @@ use crate::computational_variants::fallible_option::ParOption; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_option::{IntoOption, ParIterOption}; use crate::par_iter_result::IntoResult; -use crate::using_old::{UsingClone, UsingFun}; +use crate::using::{UsingClone, UsingFun}; use crate::{ - ParIterUsingOld, Params, + ParIterUsing, Params, collect_into::ParCollectInto, default_fns::{map_clone, map_copy, map_count, reduce_sum, reduce_unit}, parameters::{ChunkSize, IterationOrder, NumThreads}, @@ -475,10 +475,10 @@ where fn using( self, using: F, - ) -> impl ParIterUsingOld, R, Item = >::Item> + ) -> impl ParIterUsing, R, Item = >::Item> where - U: Send + 'static, - F: FnMut(usize) -> U; + U: 'static, + F: Fn(usize) -> U + Sync; /// Converts the [`ParIter`] into [`ParIterUsing`] which will have access to a mutable reference of the /// used variable throughout the computation. @@ -496,9 +496,9 @@ where fn using_clone( self, value: U, - ) -> impl ParIterUsingOld, R, Item = >::Item> + ) -> impl ParIterUsing, R, Item = >::Item> where - U: Clone + Send + 'static; + U: Clone + 'static; // transformations into fallible computations diff --git a/src/using/computational_variants/mod.rs b/src/using/computational_variants/mod.rs index 38579ce..00dffde 100644 --- a/src/using/computational_variants/mod.rs +++ b/src/using/computational_variants/mod.rs @@ -1,3 +1,7 @@ mod u_map; mod u_par; mod u_xap; + +pub use u_map::UParMap; +pub use u_par::UPar; +pub use u_xap::UParXap; diff --git a/src/using/mod.rs b/src/using/mod.rs index d3c088c..9701892 100644 --- a/src/using/mod.rs +++ b/src/using/mod.rs @@ -5,4 +5,6 @@ mod u_par_iter; mod using_variants; pub use collect_into::UParCollectIntoCore; +pub use computational_variants::{UPar, UParMap, UParXap}; pub use u_par_iter::ParIterUsing; +pub use using_variants::{Using, UsingClone, UsingFun}; diff --git a/src/using/using_variants.rs b/src/using/using_variants.rs index b501d36..bdb7484 100644 --- a/src/using/using_variants.rs +++ b/src/using/using_variants.rs @@ -14,15 +14,15 @@ pub trait Using: Sync { } /// Using variant that creates instances of each thread by cloning an initial value. -pub struct UsingClone(T); +pub struct UsingClone(T); -impl UsingClone { +impl UsingClone { pub(crate) fn new(value: T) -> Self { Self(value) } } -impl Using for UsingClone { +impl Using for UsingClone { type Item = T; fn create(&self, _: usize) -> T { @@ -34,6 +34,8 @@ impl Using for UsingClone { } } +unsafe impl Sync for UsingClone {} + /// Using variant that creates instances of each thread using a closure. pub struct UsingFun where From 21bfffb6e2c26890d749d13b418de252057966ce Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 11:50:53 +0200 Subject: [PATCH 179/264] wip --- examples/using_metrics.rs | 230 +++++++++++++++++++------------------- 1 file changed, 116 insertions(+), 114 deletions(-) diff --git a/examples/using_metrics.rs b/examples/using_metrics.rs index 2f13d41..d73a7d8 100644 --- a/examples/using_metrics.rs +++ b/examples/using_metrics.rs @@ -1,114 +1,116 @@ -use orx_parallel::*; -use std::cell::UnsafeCell; - -const N: u64 = 10_000_000; -const MAX_NUM_THREADS: usize = 8; - -// just some work -fn fibonacci(n: u64) -> u64 { - let mut a = 0; - let mut b = 1; - for _ in 0..n { - let c = a + b; - a = b; - b = c; - } - a -} - -#[derive(Default, Debug)] -struct ThreadMetrics { - thread_idx: usize, - num_items_handled: usize, - handled_42: bool, - num_filtered_out: usize, -} - -struct ThreadMetricsWriter<'a> { - metrics_ref: &'a mut ThreadMetrics, -} - -struct ComputationMetrics { - thread_metrics: UnsafeCell<[ThreadMetrics; MAX_NUM_THREADS]>, -} -impl ComputationMetrics { - fn new() -> Self { - let mut thread_metrics: [ThreadMetrics; MAX_NUM_THREADS] = Default::default(); - for i in 0..MAX_NUM_THREADS { - thread_metrics[i].thread_idx = i; - } - Self { - thread_metrics: UnsafeCell::new(thread_metrics), - } - } -} - -impl ComputationMetrics { - unsafe fn create_for_thread<'a>(&mut self, thread_idx: usize) -> ThreadMetricsWriter<'a> { - // SAFETY: here we create a mutable variable to the thread_idx-th metrics - // * If we call this method multiple times with the same index, - // we create multiple mutable references to the same ThreadMetrics, - // which would lead to a race condition. - // * We must make sure that `create_for_thread` is called only once per thread. - // * If we use `create_for_thread` within the `using` call to create mutable values - // used by the threads, we are certain that the parallel computation - // will only call this method once per thread; hence, it will not - // cause the race condition. - // * On the other hand, we must ensure that we do not call this method - // externally. - let array = unsafe { &mut *self.thread_metrics.get() }; - ThreadMetricsWriter { - metrics_ref: &mut array[thread_idx], - } - } -} - -fn main() { - let mut metrics = ComputationMetrics::new(); - - let input: Vec = (0..N).collect(); - - let sum = input - .par() - // SAFETY: we do not call `create_for_thread` externally; - // it is safe if it is called only by the parallel computation. - .using(|t| unsafe { metrics.create_for_thread(t) }) - .map(|m: &mut ThreadMetricsWriter<'_>, i| { - // collect some useful metrics - m.metrics_ref.num_items_handled += 1; - m.metrics_ref.handled_42 |= *i == 42; - - // actual work - fibonacci((*i % 50) + 1) % 100 - }) - .filter(|m, i| { - let is_even = i % 2 == 0; - - if !is_even { - m.metrics_ref.num_filtered_out += 1; - } - - is_even - }) - .num_threads(MAX_NUM_THREADS) - .sum(); - - println!("\nINPUT-LEN = {N}"); - println!("SUM = {sum}"); - - println!("\n\n"); - - println!("COLLECTED METRICS PER THREAD"); - for metrics in metrics.thread_metrics.get_mut().iter() { - println!("* {metrics:?}"); - } - let total_by_metrics: usize = metrics - .thread_metrics - .get_mut() - .iter() - .map(|x| x.num_items_handled) - .sum(); - println!("\n-> total num_items_handled by collected metrics: {total_by_metrics:?}\n"); - - assert_eq!(N as usize, total_by_metrics); -} +// use orx_parallel::*; +// use std::cell::UnsafeCell; + +// const N: u64 = 10_000_000; +// const MAX_NUM_THREADS: usize = 8; + +// // just some work +// fn fibonacci(n: u64) -> u64 { +// let mut a = 0; +// let mut b = 1; +// for _ in 0..n { +// let c = a + b; +// a = b; +// b = c; +// } +// a +// } + +// #[derive(Default, Debug)] +// struct ThreadMetrics { +// thread_idx: usize, +// num_items_handled: usize, +// handled_42: bool, +// num_filtered_out: usize, +// } + +// struct ThreadMetricsWriter<'a> { +// metrics_ref: &'a mut ThreadMetrics, +// } + +// struct ComputationMetrics { +// thread_metrics: UnsafeCell<[ThreadMetrics; MAX_NUM_THREADS]>, +// } +// impl ComputationMetrics { +// fn new() -> Self { +// let mut thread_metrics: [ThreadMetrics; MAX_NUM_THREADS] = Default::default(); +// for i in 0..MAX_NUM_THREADS { +// thread_metrics[i].thread_idx = i; +// } +// Self { +// thread_metrics: UnsafeCell::new(thread_metrics), +// } +// } +// } + +// impl ComputationMetrics { +// unsafe fn create_for_thread<'a>(&mut self, thread_idx: usize) -> ThreadMetricsWriter<'a> { +// // SAFETY: here we create a mutable variable to the thread_idx-th metrics +// // * If we call this method multiple times with the same index, +// // we create multiple mutable references to the same ThreadMetrics, +// // which would lead to a race condition. +// // * We must make sure that `create_for_thread` is called only once per thread. +// // * If we use `create_for_thread` within the `using` call to create mutable values +// // used by the threads, we are certain that the parallel computation +// // will only call this method once per thread; hence, it will not +// // cause the race condition. +// // * On the other hand, we must ensure that we do not call this method +// // externally. +// let array = unsafe { &mut *self.thread_metrics.get() }; +// ThreadMetricsWriter { +// metrics_ref: &mut array[thread_idx], +// } +// } +// } + +// fn main() { +// let mut metrics = ComputationMetrics::new(); + +// let input: Vec = (0..N).collect(); + +// let sum = input +// .par() +// // SAFETY: we do not call `create_for_thread` externally; +// // it is safe if it is called only by the parallel computation. +// .using(|t| unsafe { metrics.create_for_thread(t) }) +// .map(|m: &mut ThreadMetricsWriter<'_>, i| { +// // collect some useful metrics +// m.metrics_ref.num_items_handled += 1; +// m.metrics_ref.handled_42 |= *i == 42; + +// // actual work +// fibonacci((*i % 50) + 1) % 100 +// }) +// .filter(|m, i| { +// let is_even = i % 2 == 0; + +// if !is_even { +// m.metrics_ref.num_filtered_out += 1; +// } + +// is_even +// }) +// .num_threads(MAX_NUM_THREADS) +// .sum(); + +// println!("\nINPUT-LEN = {N}"); +// println!("SUM = {sum}"); + +// println!("\n\n"); + +// println!("COLLECTED METRICS PER THREAD"); +// for metrics in metrics.thread_metrics.get_mut().iter() { +// println!("* {metrics:?}"); +// } +// let total_by_metrics: usize = metrics +// .thread_metrics +// .get_mut() +// .iter() +// .map(|x| x.num_items_handled) +// .sum(); +// println!("\n-> total num_items_handled by collected metrics: {total_by_metrics:?}\n"); + +// assert_eq!(N as usize, total_by_metrics); +// } + +fn main() {} From 3efbf14d32d073aa85f8eb1fb71ea72b11ca1dbb Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 11:52:42 +0200 Subject: [PATCH 180/264] clean up old using code --- src/collect_into/par_collect_into.rs | 11 +- src/lib.rs | 3 - src/orch/par_thread_pool.rs | 4 +- src/par_iter.rs | 34 +- src/using_old/collect_into/fixed_vec.rs | 37 -- src/using_old/collect_into/mod.rs | 6 - src/using_old/collect_into/split_vec.rs | 49 --- .../collect_into/u_par_collect_into.rs | 24 -- src/using_old/collect_into/vec.rs | 50 --- src/using_old/computational_variants/mod.rs | 7 - src/using_old/computational_variants/u_map.rs | 187 -------- src/using_old/computational_variants/u_par.rs | 179 -------- src/using_old/computational_variants/u_xap.rs | 227 ---------- src/using_old/computations/default_fns.rs | 32 -- src/using_old/computations/mod.rs | 7 - src/using_old/computations/u_map/collect.rs | 51 --- src/using_old/computations/u_map/m.rs | 58 --- src/using_old/computations/u_map/mod.rs | 10 - src/using_old/computations/u_map/next.rs | 30 -- src/using_old/computations/u_map/reduce.rs | 24 -- .../computations/u_map/tests/collect.rs | 53 --- .../computations/u_map/tests/find.rs | 56 --- src/using_old/computations/u_map/tests/mod.rs | 3 - .../computations/u_map/tests/reduce.rs | 81 ---- .../computations/u_map/transformations.rs | 23 - src/using_old/computations/u_xap/collect.rs | 75 ---- src/using_old/computations/u_xap/mod.rs | 9 - src/using_old/computations/u_xap/next.rs | 37 -- src/using_old/computations/u_xap/reduce.rs | 28 -- .../computations/u_xap/tests/collect.rs | 99 ----- .../computations/u_xap/tests/find.rs | 59 --- src/using_old/computations/u_xap/tests/mod.rs | 3 - .../computations/u_xap/tests/reduce.rs | 86 ---- src/using_old/computations/u_xap/x.rs | 61 --- src/using_old/mod.rs | 11 - src/using_old/runner/mod.rs | 2 - .../runner/parallel_runner_compute/mod.rs | 5 - .../u_collect_arbitrary.rs | 159 ------- .../u_collect_ordered.rs | 133 ------ .../runner/parallel_runner_compute/u_next.rs | 132 ------ .../parallel_runner_compute/u_next_any.rs | 114 ----- .../parallel_runner_compute/u_reduce.rs | 144 ------- .../runner/thread_runner_compute/mod.rs | 5 - .../u_collect_arbitrary.rs | 142 ------ .../u_collect_ordered.rs | 152 ------- .../runner/thread_runner_compute/u_next.rs | 163 ------- .../thread_runner_compute/u_next_any.rs | 163 ------- .../runner/thread_runner_compute/u_reduce.rs | 178 -------- src/using_old/u_par_iter.rs | 404 ------------------ src/using_old/using_variants.rs | 70 --- 50 files changed, 4 insertions(+), 3676 deletions(-) delete mode 100644 src/using_old/collect_into/fixed_vec.rs delete mode 100644 src/using_old/collect_into/mod.rs delete mode 100644 src/using_old/collect_into/split_vec.rs delete mode 100644 src/using_old/collect_into/u_par_collect_into.rs delete mode 100644 src/using_old/collect_into/vec.rs delete mode 100644 src/using_old/computational_variants/mod.rs delete mode 100644 src/using_old/computational_variants/u_map.rs delete mode 100644 src/using_old/computational_variants/u_par.rs delete mode 100644 src/using_old/computational_variants/u_xap.rs delete mode 100644 src/using_old/computations/default_fns.rs delete mode 100644 src/using_old/computations/mod.rs delete mode 100644 src/using_old/computations/u_map/collect.rs delete mode 100644 src/using_old/computations/u_map/m.rs delete mode 100644 src/using_old/computations/u_map/mod.rs delete mode 100644 src/using_old/computations/u_map/next.rs delete mode 100644 src/using_old/computations/u_map/reduce.rs delete mode 100644 src/using_old/computations/u_map/tests/collect.rs delete mode 100644 src/using_old/computations/u_map/tests/find.rs delete mode 100644 src/using_old/computations/u_map/tests/mod.rs delete mode 100644 src/using_old/computations/u_map/tests/reduce.rs delete mode 100644 src/using_old/computations/u_map/transformations.rs delete mode 100644 src/using_old/computations/u_xap/collect.rs delete mode 100644 src/using_old/computations/u_xap/mod.rs delete mode 100644 src/using_old/computations/u_xap/next.rs delete mode 100644 src/using_old/computations/u_xap/reduce.rs delete mode 100644 src/using_old/computations/u_xap/tests/collect.rs delete mode 100644 src/using_old/computations/u_xap/tests/find.rs delete mode 100644 src/using_old/computations/u_xap/tests/mod.rs delete mode 100644 src/using_old/computations/u_xap/tests/reduce.rs delete mode 100644 src/using_old/computations/u_xap/x.rs delete mode 100644 src/using_old/mod.rs delete mode 100644 src/using_old/runner/mod.rs delete mode 100644 src/using_old/runner/parallel_runner_compute/mod.rs delete mode 100644 src/using_old/runner/parallel_runner_compute/u_collect_arbitrary.rs delete mode 100644 src/using_old/runner/parallel_runner_compute/u_collect_ordered.rs delete mode 100644 src/using_old/runner/parallel_runner_compute/u_next.rs delete mode 100644 src/using_old/runner/parallel_runner_compute/u_next_any.rs delete mode 100644 src/using_old/runner/parallel_runner_compute/u_reduce.rs delete mode 100644 src/using_old/runner/thread_runner_compute/mod.rs delete mode 100644 src/using_old/runner/thread_runner_compute/u_collect_arbitrary.rs delete mode 100644 src/using_old/runner/thread_runner_compute/u_collect_ordered.rs delete mode 100644 src/using_old/runner/thread_runner_compute/u_next.rs delete mode 100644 src/using_old/runner/thread_runner_compute/u_next_any.rs delete mode 100644 src/using_old/runner/thread_runner_compute/u_reduce.rs delete mode 100644 src/using_old/u_par_iter.rs delete mode 100644 src/using_old/using_variants.rs diff --git a/src/collect_into/par_collect_into.rs b/src/collect_into/par_collect_into.rs index e982366..0e7e8ab 100644 --- a/src/collect_into/par_collect_into.rs +++ b/src/collect_into/par_collect_into.rs @@ -3,7 +3,6 @@ use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; use crate::orch::Orchestrator; use crate::using::UParCollectIntoCore; -use crate::using_old::UParCollectIntoCoreOld; use orx_concurrent_iter::ConcurrentIter; use orx_iterable::Collection; use orx_pinned_vec::IntoConcurrentPinnedVec; @@ -92,12 +91,6 @@ pub trait ParCollectIntoCore: Collection { } /// Collection types into which outputs of a parallel computations can be collected into. -pub trait ParCollectInto: - ParCollectIntoCore + UParCollectIntoCore + UParCollectIntoCoreOld -{ -} +pub trait ParCollectInto: ParCollectIntoCore + UParCollectIntoCore {} -impl ParCollectInto for C where - C: ParCollectIntoCore + UParCollectIntoCore + UParCollectIntoCoreOld -{ -} +impl ParCollectInto for C where C: ParCollectIntoCore + UParCollectIntoCore {} diff --git a/src/lib.rs b/src/lib.rs index 712187a..9e5f7f3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -43,8 +43,6 @@ pub mod runner; mod special_type_sets; /// Module defining parallel iterators with mutable access to values distributed to each thread. pub mod using; -/// Module defining parallel iterators with mutable access to values distributed to each thread. -pub mod using_old; /// Module defining the GenericIterator which is a generalization over /// sequential iterator, rayon's parallel iterator and orx-parallel's @@ -72,4 +70,3 @@ pub use parameters::{ChunkSize, IterationOrder, NumThreads, Params}; pub use runner::{DefaultRunner, ParallelRunner, ThreadRunner}; pub use special_type_sets::Sum; pub use using::ParIterUsing; -pub use using_old::ParIterUsingOld; diff --git a/src/orch/par_thread_pool.rs b/src/orch/par_thread_pool.rs index fcaf71c..8016e76 100644 --- a/src/orch/par_thread_pool.rs +++ b/src/orch/par_thread_pool.rs @@ -1,6 +1,4 @@ -use crate::{ - generic_values::runner_results::Fallibility, orch::num_spawned::NumSpawned, using_old::Using, -}; +use crate::{generic_values::runner_results::Fallibility, orch::num_spawned::NumSpawned}; use alloc::vec::Vec; use core::num::NonZeroUsize; use orx_concurrent_bag::ConcurrentBag; diff --git a/src/par_iter.rs b/src/par_iter.rs index 5ead958..f7e3d20 100644 --- a/src/par_iter.rs +++ b/src/par_iter.rs @@ -437,39 +437,7 @@ where /// /// let input: Vec = (0..N).collect(); /// - /// let sum = input - /// .par() - /// // SAFETY: we do not call `create_for_thread` externally; - /// // it is safe if it is called only by the parallel computation. - /// .using(|t| unsafe { metrics.create_for_thread(t) }) - /// .map(|m: &mut ThreadMetricsWriter<'_>, i| { - /// // collect some useful metrics - /// m.metrics_ref.num_items_handled += 1; - /// m.metrics_ref.handled_42 |= *i == 42; - /// - /// // actual work - /// fibonacci((*i % 20) + 1) % 100 - /// }) - /// .filter(|m, i| { - /// let is_even = i % 2 == 0; - /// - /// if !is_even { - /// m.metrics_ref.num_filtered_out += 1; - /// } - /// - /// is_even - /// }) - /// .num_threads(MAX_NUM_THREADS) - /// .sum(); - /// - /// let total_by_metrics: usize = metrics - /// .thread_metrics - /// .get_mut() - /// .iter() - /// .map(|x| x.num_items_handled) - /// .sum(); - /// - /// assert_eq!(N as usize, total_by_metrics); + /// ``` /// fn using( diff --git a/src/using_old/collect_into/fixed_vec.rs b/src/using_old/collect_into/fixed_vec.rs deleted file mode 100644 index 8dbaadf..0000000 --- a/src/using_old/collect_into/fixed_vec.rs +++ /dev/null @@ -1,37 +0,0 @@ -use crate::generic_values::Values; -use crate::generic_values::runner_results::Infallible; -use crate::runner::ParallelRunner; -use crate::using_old::Using; -use crate::using_old::collect_into::u_par_collect_into::UParCollectIntoCoreOld; -use crate::using_old::computations::{UM, UX}; -use orx_concurrent_iter::ConcurrentIter; -use orx_fixed_vec::FixedVec; - -impl UParCollectIntoCoreOld for FixedVec -where - O: Send + Sync, -{ - fn u_m_collect_into_old(self, m: UM) -> Self - where - R: ParallelRunner, - U: Using, - I: ConcurrentIter, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, - { - let vec = Vec::from(self); - FixedVec::from(vec.u_m_collect_into_old::(m)) - } - - fn u_x_collect_into_old(self, x: UX) -> Self - where - R: ParallelRunner, - U: Using, - I: ConcurrentIter, - Vo: Values, - Vo::Item: Send + Sync, - M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, - { - let vec = Vec::from(self); - FixedVec::from(vec.u_x_collect_into_old::(x)) - } -} diff --git a/src/using_old/collect_into/mod.rs b/src/using_old/collect_into/mod.rs deleted file mode 100644 index d1277a2..0000000 --- a/src/using_old/collect_into/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -mod fixed_vec; -mod split_vec; -mod u_par_collect_into; -mod vec; - -pub use u_par_collect_into::UParCollectIntoCoreOld; diff --git a/src/using_old/collect_into/split_vec.rs b/src/using_old/collect_into/split_vec.rs deleted file mode 100644 index 98a8c35..0000000 --- a/src/using_old/collect_into/split_vec.rs +++ /dev/null @@ -1,49 +0,0 @@ -use crate::collect_into::utils::split_vec_reserve; -use crate::generic_values::Values; -use crate::generic_values::runner_results::Infallible; -use crate::runner::ParallelRunner; -use crate::using_old::Using; -use crate::using_old::collect_into::u_par_collect_into::UParCollectIntoCoreOld; -use crate::using_old::computations::{UM, UX}; -use orx_concurrent_iter::ConcurrentIter; -use orx_split_vec::{GrowthWithConstantTimeAccess, PseudoDefault, SplitVec}; - -impl UParCollectIntoCoreOld for SplitVec -where - O: Send + Sync, - G: GrowthWithConstantTimeAccess, - Self: PseudoDefault, -{ - fn u_m_collect_into_old(mut self, m: UM) -> Self - where - R: ParallelRunner, - U: Using, - I: ConcurrentIter, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, - { - split_vec_reserve( - &mut self, - m.params().is_sequential(), - m.iter().try_get_len(), - ); - let (_num_spawned, pinned_vec) = m.collect_into::(self); - pinned_vec - } - - fn u_x_collect_into_old(mut self, x: UX) -> Self - where - R: ParallelRunner, - U: Using, - I: ConcurrentIter, - Vo: Values, - M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, - { - split_vec_reserve( - &mut self, - x.params().is_sequential(), - x.con_iter().try_get_len(), - ); - let (_num_spawned, pinned_vec) = x.collect_into::(self); - pinned_vec - } -} diff --git a/src/using_old/collect_into/u_par_collect_into.rs b/src/using_old/collect_into/u_par_collect_into.rs deleted file mode 100644 index 8551119..0000000 --- a/src/using_old/collect_into/u_par_collect_into.rs +++ /dev/null @@ -1,24 +0,0 @@ -use crate::collect_into::ParCollectIntoCore; -use crate::generic_values::Values; -use crate::generic_values::runner_results::Infallible; -use crate::runner::ParallelRunner; -use crate::using_old::Using; -use crate::using_old::computations::{UM, UX}; -use orx_concurrent_iter::ConcurrentIter; - -pub trait UParCollectIntoCoreOld: ParCollectIntoCore { - fn u_m_collect_into_old(self, m: UM) -> Self - where - R: ParallelRunner, - U: Using, - I: ConcurrentIter, - M1: Fn(&mut U::Item, I::Item) -> O + Sync; - - fn u_x_collect_into_old(self, x: UX) -> Self - where - R: ParallelRunner, - U: Using, - I: ConcurrentIter, - Vo: Values, - M1: Fn(&mut U::Item, I::Item) -> Vo + Sync; -} diff --git a/src/using_old/collect_into/vec.rs b/src/using_old/collect_into/vec.rs deleted file mode 100644 index 4361283..0000000 --- a/src/using_old/collect_into/vec.rs +++ /dev/null @@ -1,50 +0,0 @@ -use crate::collect_into::utils::extend_vec_from_split; -use crate::generic_values::Values; -use crate::generic_values::runner_results::Infallible; -use crate::runner::ParallelRunner; -use crate::using_old::Using; -use crate::using_old::collect_into::u_par_collect_into::UParCollectIntoCoreOld; -use crate::using_old::computations::{UM, UX}; -use orx_concurrent_iter::ConcurrentIter; -use orx_fixed_vec::FixedVec; -use orx_split_vec::SplitVec; - -impl UParCollectIntoCoreOld for Vec -where - O: Send + Sync, -{ - fn u_m_collect_into_old(mut self, m: UM) -> Self - where - R: ParallelRunner, - U: Using, - I: ConcurrentIter, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, - { - match m.iter().try_get_len() { - None => { - let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let split_vec = split_vec.u_m_collect_into_old::(m); - extend_vec_from_split(self, split_vec) - } - Some(len) => { - self.reserve(len); - let fixed_vec = FixedVec::from(self); - let (_num_spawned, fixed_vec) = m.collect_into::(fixed_vec); - Vec::from(fixed_vec) - } - } - } - - fn u_x_collect_into_old(self, x: UX) -> Self - where - R: ParallelRunner, - U: Using, - I: ConcurrentIter, - Vo: Values, - M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, - { - let split_vec = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let split_vec = split_vec.u_x_collect_into_old::(x); - extend_vec_from_split(self, split_vec) - } -} diff --git a/src/using_old/computational_variants/mod.rs b/src/using_old/computational_variants/mod.rs deleted file mode 100644 index 00dffde..0000000 --- a/src/using_old/computational_variants/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -mod u_map; -mod u_par; -mod u_xap; - -pub use u_map::UParMap; -pub use u_par::UPar; -pub use u_xap::UParXap; diff --git a/src/using_old/computational_variants/u_map.rs b/src/using_old/computational_variants/u_map.rs deleted file mode 100644 index 52aad3e..0000000 --- a/src/using_old/computational_variants/u_map.rs +++ /dev/null @@ -1,187 +0,0 @@ -use crate::{ - ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, - generic_values::Vector, - orch::{DefaultOrchestrator, Orchestrator}, - using_old::{ - Using, computational_variants::u_xap::UParXap, computations::UM, - u_par_iter::ParIterUsingOld, - }, -}; -use orx_concurrent_iter::ConcurrentIter; -use std::marker::PhantomData; - -/// A parallel iterator that maps inputs. -pub struct UParMap -where - R: Orchestrator, - U: Using, - I: ConcurrentIter, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, -{ - um: UM, - phantom: PhantomData, -} - -impl UParMap -where - R: Orchestrator, - U: Using, - I: ConcurrentIter, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, -{ - pub(crate) fn new(using: U, params: Params, iter: I, m1: M1) -> Self { - Self { - um: UM::new(using, params, iter, m1), - phantom: PhantomData, - } - } - - fn destruct(self) -> (U, Params, I, M1) { - self.um.destruct() - } -} - -unsafe impl Send for UParMap -where - R: Orchestrator, - U: Using, - I: ConcurrentIter, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, -{ -} - -unsafe impl Sync for UParMap -where - R: Orchestrator, - U: Using, - I: ConcurrentIter, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, -{ -} - -impl ParIterUsingOld for UParMap -where - R: Orchestrator, - U: Using, - I: ConcurrentIter, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, -{ - type Item = O; - - fn con_iter(&self) -> &impl ConcurrentIter { - self.um.iter() - } - - fn params(&self) -> Params { - self.um.params() - } - - // parameter transformations - - fn num_threads(mut self, num_threads: impl Into) -> Self { - self.um.num_threads(num_threads); - self - } - - fn chunk_size(mut self, chunk_size: impl Into) -> Self { - self.um.chunk_size(chunk_size); - self - } - - fn iteration_order(mut self, collect: IterationOrder) -> Self { - self.um.iteration_order(collect); - self - } - - fn with_runner(self) -> impl ParIterUsingOld { - let (using, params, iter, map) = self.destruct(); - UParMap::new(using, params, iter, map) - } - - // computation transformations - - fn map(self, map: Map) -> impl ParIterUsingOld - where - Map: Fn(&mut U::Item, Self::Item) -> Out + Sync + Clone, - { - let (using, params, iter, m1) = self.destruct(); - let m1 = move |u: &mut U::Item, x: I::Item| { - let v1 = m1(u, x); - map(u, v1) - }; - UParMap::new(using, params, iter, m1) - } - - fn filter(self, filter: Filter) -> impl ParIterUsingOld - where - Filter: Fn(&mut U::Item, &Self::Item) -> bool + Sync + Clone, - { - let (using, params, iter, m1) = self.destruct(); - - let x1 = move |u: &mut U::Item, i: I::Item| { - let value = m1(u, i); - filter(u, &value).then_some(value) - }; - - UParXap::new(using, params, iter, x1) - } - - fn flat_map( - self, - flat_map: FlatMap, - ) -> impl ParIterUsingOld - where - IOut: IntoIterator, - FlatMap: Fn(&mut U::Item, Self::Item) -> IOut + Sync + Clone, - { - let (using, params, iter, m1) = self.destruct(); - let x1 = move |u: &mut U::Item, i: I::Item| { - let a = m1(u, i); - Vector(flat_map(u, a)) - }; - UParXap::new(using, params, iter, x1) - } - - fn filter_map( - self, - filter_map: FilterMap, - ) -> impl ParIterUsingOld - where - FilterMap: Fn(&mut U::Item, Self::Item) -> Option + Sync + Clone, - { - let (using, params, iter, m1) = self.destruct(); - let x1 = move |u: &mut U::Item, i: I::Item| { - let a = m1(u, i); - filter_map(u, a) - }; - UParXap::new(using, params, iter, x1) - } - - // collect - - fn collect_into(self, output: C) -> C - where - C: ParCollectInto, - { - output.u_m_collect_into_old::(self.um) - } - - // reduce - - fn reduce(self, reduce: Reduce) -> Option - where - Self::Item: Send, - Reduce: Fn(&mut U::Item, Self::Item, Self::Item) -> Self::Item + Sync, - { - self.um.reduce::(reduce).1 - } - - // early exit - - fn first(self) -> Option - where - Self::Item: Send, - { - self.um.next::().1 - } -} diff --git a/src/using_old/computational_variants/u_par.rs b/src/using_old/computational_variants/u_par.rs deleted file mode 100644 index c8c412e..0000000 --- a/src/using_old/computational_variants/u_par.rs +++ /dev/null @@ -1,179 +0,0 @@ -use crate::{ - ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, - generic_values::Vector, - orch::{DefaultOrchestrator, Orchestrator}, - using_old::{ - Using, - computational_variants::{u_map::UParMap, u_xap::UParXap}, - computations::{UM, u_map_self}, - u_par_iter::ParIterUsingOld, - }, -}; -use orx_concurrent_iter::ConcurrentIter; -use std::marker::PhantomData; - -/// A parallel iterator. -pub struct UPar -where - U: Using, - R: Orchestrator, - I: ConcurrentIter, -{ - using: U, - iter: I, - params: Params, - phantom: PhantomData, -} - -impl UPar -where - U: Using, - R: Orchestrator, - I: ConcurrentIter, -{ - pub(crate) fn new(using: U, params: Params, iter: I) -> Self { - Self { - using, - iter, - params, - phantom: PhantomData, - } - } - - fn destruct(self) -> (U, Params, I) { - (self.using, self.params, self.iter) - } - - #[allow(clippy::type_complexity)] - fn u_m(self) -> UM I::Item> { - let (using, params, iter) = self.destruct(); - UM::new(using, params, iter, u_map_self) - } -} - -unsafe impl Send for UPar -where - U: Using, - R: Orchestrator, - I: ConcurrentIter, -{ -} - -unsafe impl Sync for UPar -where - U: Using, - R: Orchestrator, - I: ConcurrentIter, -{ -} - -impl ParIterUsingOld for UPar -where - U: Using, - R: Orchestrator, - I: ConcurrentIter, -{ - type Item = I::Item; - - fn con_iter(&self) -> &impl ConcurrentIter { - &self.iter - } - - fn params(&self) -> Params { - self.params - } - - // params transformations - - fn num_threads(mut self, num_threads: impl Into) -> Self { - self.params = self.params.with_num_threads(num_threads); - self - } - - fn chunk_size(mut self, chunk_size: impl Into) -> Self { - self.params = self.params.with_chunk_size(chunk_size); - self - } - - fn iteration_order(mut self, collect: IterationOrder) -> Self { - self.params = self.params.with_collect_ordering(collect); - self - } - - fn with_runner(self) -> impl ParIterUsingOld { - UPar::new(self.using, self.params, self.iter) - } - - // computational transformations - - fn map(self, map: Map) -> impl ParIterUsingOld - where - Map: Fn(&mut ::Item, Self::Item) -> Out + Sync + Clone, - { - let (using, params, iter) = self.destruct(); - let map = move |u: &mut U::Item, x: Self::Item| map(u, x); - UParMap::new(using, params, iter, map) - } - - fn filter(self, filter: Filter) -> impl ParIterUsingOld - where - Filter: Fn(&mut U::Item, &Self::Item) -> bool + Sync + Clone, - { - let (using, params, iter) = self.destruct(); - let x1 = move |u: &mut U::Item, i: Self::Item| filter(u, &i).then_some(i); - UParXap::new(using, params, iter, x1) - } - - fn flat_map( - self, - flat_map: FlatMap, - ) -> impl ParIterUsingOld - where - IOut: IntoIterator, - FlatMap: Fn(&mut U::Item, Self::Item) -> IOut + Sync + Clone, - { - let (using, params, iter) = self.destruct(); - let x1 = move |u: &mut U::Item, i: Self::Item| Vector(flat_map(u, i)); - UParXap::new(using, params, iter, x1) - } - - fn filter_map( - self, - filter_map: FilterMap, - ) -> impl ParIterUsingOld - where - FilterMap: Fn(&mut ::Item, Self::Item) -> Option + Sync + Clone, - { - let (using, params, iter) = self.destruct(); - let x1 = move |u: &mut U::Item, x: Self::Item| filter_map(u, x); - UParXap::new(using, params, iter, x1) - } - - // collect - - fn collect_into(self, output: C) -> C - where - C: ParCollectInto, - { - output.u_m_collect_into_old::(self.u_m()) - } - - // reduce - - fn reduce(self, reduce: Reduce) -> Option - where - Self::Item: Send, - Reduce: Fn(&mut U::Item, Self::Item, Self::Item) -> Self::Item + Sync, - { - self.u_m().reduce::(reduce).1 - } - - // early exit - - fn first(self) -> Option - where - Self::Item: Send, - { - self.u_m().next::().1 - } -} diff --git a/src/using_old/computational_variants/u_xap.rs b/src/using_old/computational_variants/u_xap.rs deleted file mode 100644 index 53c9467..0000000 --- a/src/using_old/computational_variants/u_xap.rs +++ /dev/null @@ -1,227 +0,0 @@ -use crate::{ - ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, - generic_values::{TransformableValues, runner_results::Infallible}, - orch::{DefaultOrchestrator, Orchestrator}, - using_old::{Using, computations::UX, u_par_iter::ParIterUsingOld}, -}; -use orx_concurrent_iter::ConcurrentIter; -use std::marker::PhantomData; - -/// A parallel iterator that xaps inputs. -/// -/// *xap* is a generalization of one-to-one map, filter-map and flat-map operations. -pub struct UParXap -where - R: Orchestrator, - U: Using, - I: ConcurrentIter, - Vo: TransformableValues, - M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, -{ - ux: UX, - phantom: PhantomData, -} - -impl UParXap -where - R: Orchestrator, - U: Using, - I: ConcurrentIter, - Vo: TransformableValues, - M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, -{ - pub(crate) fn new(using: U, params: Params, iter: I, x1: M1) -> Self { - Self { - ux: UX::new(using, params, iter, x1), - phantom: PhantomData, - } - } - - fn destruct(self) -> (U, Params, I, M1) { - self.ux.destruct() - } -} - -unsafe impl Send for UParXap -where - R: Orchestrator, - U: Using, - I: ConcurrentIter, - Vo: TransformableValues, - M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, -{ -} - -unsafe impl Sync for UParXap -where - R: Orchestrator, - U: Using, - I: ConcurrentIter, - Vo: TransformableValues, - M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, -{ -} - -impl ParIterUsingOld for UParXap -where - R: Orchestrator, - U: Using, - I: ConcurrentIter, - Vo: TransformableValues, - M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, -{ - type Item = Vo::Item; - - fn con_iter(&self) -> &impl ConcurrentIter { - self.ux.con_iter() - } - - fn params(&self) -> Params { - self.ux.params() - } - - // params transformations - - fn num_threads(mut self, num_threads: impl Into) -> Self { - self.ux.num_threads(num_threads); - self - } - - fn chunk_size(mut self, chunk_size: impl Into) -> Self { - self.ux.chunk_size(chunk_size); - self - } - - fn iteration_order(mut self, collect: IterationOrder) -> Self { - self.ux.iteration_order(collect); - self - } - - fn with_runner(self) -> impl ParIterUsingOld { - let (using, params, iter, map1) = self.destruct(); - UParXap::new(using, params, iter, map1) - } - - // computation transformations - - fn map(self, map: Map) -> impl ParIterUsingOld - where - Map: Fn(&mut U::Item, Self::Item) -> Out + Sync + Clone, - { - let (using, params, iter, x1) = self.destruct(); - - let x1 = move |u: &mut U::Item, i: I::Item| { - let vo = x1(u, i); - // SAFETY: all threads are guaranteed to have its own Using::Item value that is not shared with other threads. - // This guarantees that there will be no race conditions. - // TODO: the reason to have this unsafe block is the complication in lifetimes, which must be possible to fix; however with a large refactoring. - let u = unsafe { - &mut *{ - let p: *mut U::Item = u; - p - } - }; - vo.u_map(u, map.clone()) - }; - - UParXap::new(using, params, iter, x1) - } - - fn filter(self, filter: Filter) -> impl ParIterUsingOld - where - Filter: Fn(&mut U::Item, &Self::Item) -> bool + Sync + Clone, - { - let (using, params, iter, x1) = self.destruct(); - let x1 = move |u: &mut U::Item, i: I::Item| { - let vo = x1(u, i); - // SAFETY: all threads are guaranteed to have its own Using::Item value that is not shared with other threads. - // This guarantees that there will be no race conditions. - // TODO: the reason to have this unsafe block is the complication in lifetimes, which must be possible to fix; however with a large refactoring. - let u = unsafe { - &mut *{ - let p: *mut U::Item = u; - p - } - }; - vo.u_filter(u, filter.clone()) - }; - UParXap::new(using, params, iter, x1) - } - - fn flat_map( - self, - flat_map: FlatMap, - ) -> impl ParIterUsingOld - where - IOut: IntoIterator, - FlatMap: Fn(&mut U::Item, Self::Item) -> IOut + Sync + Clone, - { - let (using, params, iter, x1) = self.destruct(); - let x1 = move |u: &mut U::Item, i: I::Item| { - let vo = x1(u, i); - // SAFETY: all threads are guaranteed to have its own Using::Item value that is not shared with other threads. - // This guarantees that there will be no race conditions. - // TODO: the reason to have this unsafe block is the complication in lifetimes, which must be possible to fix; however with a large refactoring. - let u = unsafe { - &mut *{ - let p: *mut U::Item = u; - p - } - }; - vo.u_flat_map(u, flat_map.clone()) - }; - UParXap::new(using, params, iter, x1) - } - - fn filter_map( - self, - filter_map: FilterMap, - ) -> impl ParIterUsingOld - where - FilterMap: Fn(&mut U::Item, Self::Item) -> Option + Sync + Clone, - { - let (using, params, iter, x1) = self.destruct(); - let x1 = move |u: &mut U::Item, i: I::Item| { - let vo = x1(u, i); - // SAFETY: all threads are guaranteed to have its own Using::Item value that is not shared with other threads. - // This guarantees that there will be no race conditions. - // TODO: the reason to have this unsafe block is the complication in lifetimes, which must be possible to fix; however with a large refactoring. - let u = unsafe { - &mut *{ - let p: *mut U::Item = u; - p - } - }; - vo.u_filter_map(u, filter_map.clone()) - }; - UParXap::new(using, params, iter, x1) - } - - // collect - - fn collect_into(self, output: C) -> C - where - C: ParCollectInto, - { - output.u_x_collect_into_old::(self.ux) - } - - // reduce - - fn reduce(self, reduce: Reduce) -> Option - where - Self::Item: Send, - Reduce: Fn(&mut U::Item, Self::Item, Self::Item) -> Self::Item + Sync, - { - self.ux.reduce::(reduce).1 - } - - // early exit - - fn first(self) -> Option - where - Self::Item: Send, - { - self.ux.next::().1 - } -} diff --git a/src/using_old/computations/default_fns.rs b/src/using_old/computations/default_fns.rs deleted file mode 100644 index 84d9739..0000000 --- a/src/using_old/computations/default_fns.rs +++ /dev/null @@ -1,32 +0,0 @@ -use std::ops::Add; - -#[inline(always)] -pub fn u_map_self(_: &mut U, input: T) -> T { - input -} - -#[inline(always)] -pub fn u_map_copy(_: &mut U, x: &T) -> T { - *x -} - -#[inline(always)] -pub fn u_map_clone(_: &mut U, x: &T) -> T { - x.clone() -} - -#[inline(always)] -pub fn u_map_count(_: &mut U, _: T) -> usize { - 1 -} - -#[inline(always)] -pub fn u_reduce_sum(_: &mut U, a: T, b: T) -> T -where - T: Add, -{ - a + b -} - -#[inline(always)] -pub fn u_reduce_unit(_: &mut U, _: (), _: ()) {} diff --git a/src/using_old/computations/mod.rs b/src/using_old/computations/mod.rs deleted file mode 100644 index 879c051..0000000 --- a/src/using_old/computations/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -mod default_fns; -mod u_map; -mod u_xap; - -pub(super) use default_fns::*; -pub(crate) use u_map::UM; -pub(crate) use u_xap::UX; diff --git a/src/using_old/computations/u_map/collect.rs b/src/using_old/computations/u_map/collect.rs deleted file mode 100644 index bcb33bb..0000000 --- a/src/using_old/computations/u_map/collect.rs +++ /dev/null @@ -1,51 +0,0 @@ -use super::m::UM; -#[cfg(test)] -use crate::IterationOrder; -use crate::orch::NumSpawned; -use crate::runner::{ParallelRunner, ParallelRunnerCompute}; -use crate::using_old::Using; -#[cfg(test)] -use crate::using_old::runner::parallel_runner_compute::u_collect_arbitrary; -use crate::using_old::runner::parallel_runner_compute::u_collect_ordered; -use orx_concurrent_iter::ConcurrentIter; -use orx_pinned_vec::IntoConcurrentPinnedVec; - -impl UM -where - U: Using, - I: ConcurrentIter, - O: Send, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, -{ - pub fn collect_into(self, pinned_vec: P) -> (NumSpawned, P) - where - R: ParallelRunner, - P: IntoConcurrentPinnedVec, - { - let len = self.iter().try_get_len(); - let p = self.params(); - match (p.is_sequential(), p.iteration_order) { - (true, _) => (NumSpawned::zero(), self.sequential(pinned_vec)), - #[cfg(test)] - (false, IterationOrder::Arbitrary) => { - u_collect_arbitrary::u_m(R::collection(p, len), self, pinned_vec) - } - (false, _) => u_collect_ordered::u_m(R::collection(p, len), self, pinned_vec), - } - } - - fn sequential

(self, mut pinned_vec: P) -> P - where - P: IntoConcurrentPinnedVec, - { - let (using, _, iter, map1) = self.destruct(); - let mut u = using.into_inner(); - - let iter = iter.into_seq_iter(); - for i in iter { - pinned_vec.push(map1(&mut u, i)); - } - - pinned_vec - } -} diff --git a/src/using_old/computations/u_map/m.rs b/src/using_old/computations/u_map/m.rs deleted file mode 100644 index b0c3608..0000000 --- a/src/using_old/computations/u_map/m.rs +++ /dev/null @@ -1,58 +0,0 @@ -use crate::{ChunkSize, IterationOrder, NumThreads, Params, using_old::Using}; -use orx_concurrent_iter::ConcurrentIter; - -pub struct UM -where - U: Using, - I: ConcurrentIter, - M1: Fn(&mut U::Item, I::Item) -> O, -{ - using: U, - params: Params, - iter: I, - map1: M1, -} - -impl UM -where - U: Using, - I: ConcurrentIter, - M1: Fn(&mut U::Item, I::Item) -> O, -{ - pub fn new(using: U, params: Params, iter: I, map1: M1) -> Self { - Self { - using, - params, - iter, - map1, - } - } - - pub fn destruct(self) -> (U, Params, I, M1) { - (self.using, self.params, self.iter, self.map1) - } - - pub fn params(&self) -> Params { - self.params - } - - pub fn len_and_params(&self) -> (Option, Params) { - (self.iter.try_get_len(), self.params) - } - - pub fn num_threads(&mut self, num_threads: impl Into) { - self.params = self.params().with_num_threads(num_threads); - } - - pub fn chunk_size(&mut self, chunk_size: impl Into) { - self.params = self.params.with_chunk_size(chunk_size); - } - - pub fn iteration_order(&mut self, collect: IterationOrder) { - self.params = self.params.with_collect_ordering(collect); - } - - pub(crate) fn iter(&self) -> &I { - &self.iter - } -} diff --git a/src/using_old/computations/u_map/mod.rs b/src/using_old/computations/u_map/mod.rs deleted file mode 100644 index dcd4f85..0000000 --- a/src/using_old/computations/u_map/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -#[cfg(test)] -mod tests; - -mod collect; -mod m; -mod next; -mod reduce; -mod transformations; - -pub use m::UM; diff --git a/src/using_old/computations/u_map/next.rs b/src/using_old/computations/u_map/next.rs deleted file mode 100644 index c11d596..0000000 --- a/src/using_old/computations/u_map/next.rs +++ /dev/null @@ -1,30 +0,0 @@ -use super::m::UM; -use crate::orch::NumSpawned; -use crate::runner::{ParallelRunner, ParallelRunnerCompute}; -use crate::using_old::Using; -use crate::using_old::runner::parallel_runner_compute::{u_next, u_next_any}; -use orx_concurrent_iter::ConcurrentIter; - -impl UM -where - U: Using, - I: ConcurrentIter, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, - O: Send, -{ - pub fn next(self) -> (NumSpawned, Option) - where - R: ParallelRunner, - { - let (len, p) = self.len_and_params(); - u_next::u_m(R::early_return(p, len), self) - } - - pub fn next_any(self) -> (NumSpawned, Option) - where - R: ParallelRunner, - { - let (len, p) = self.len_and_params(); - u_next_any::u_m(R::early_return(p, len), self) - } -} diff --git a/src/using_old/computations/u_map/reduce.rs b/src/using_old/computations/u_map/reduce.rs deleted file mode 100644 index c062170..0000000 --- a/src/using_old/computations/u_map/reduce.rs +++ /dev/null @@ -1,24 +0,0 @@ -use crate::orch::NumSpawned; -use crate::runner::{ParallelRunner, ParallelRunnerCompute}; -use crate::using_old::Using; -use crate::using_old::computations::UM; -use crate::using_old::runner::parallel_runner_compute::u_reduce; -use orx_concurrent_iter::ConcurrentIter; - -impl UM -where - U: Using, - I: ConcurrentIter, - O: Send, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, -{ - pub fn reduce(self, reduce: X) -> (NumSpawned, Option) - where - R: ParallelRunner, - X: Fn(&mut U::Item, O, O) -> O + Sync, - { - let len = self.iter().try_get_len(); - let p = self.params(); - u_reduce::u_m(R::reduce(p, len), self, reduce) - } -} diff --git a/src/using_old/computations/u_map/tests/collect.rs b/src/using_old/computations/u_map/tests/collect.rs deleted file mode 100644 index e1ebf38..0000000 --- a/src/using_old/computations/u_map/tests/collect.rs +++ /dev/null @@ -1,53 +0,0 @@ -use crate::{ - IterationOrder, Params, runner::DefaultRunner, using_old::UsingClone, - using_old::computations::UM, -}; -use orx_concurrent_iter::IntoConcurrentIter; -use orx_pinned_vec::PinnedVec; -use orx_split_vec::SplitVec; -use test_case::test_matrix; - -#[cfg(miri)] -const N: [usize; 2] = [37, 125]; -#[cfg(not(miri))] -const N: [usize; 2] = [1025, 4735]; - -#[test_matrix( - [0, 1, N[0], N[1]], - [1, 4], - [1, 64], - [IterationOrder::Ordered, IterationOrder::Arbitrary]) -] -fn u_m_map_collect(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { - let offset = 33; - - let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); - let map = |u: &mut usize, x: String| { - *u += 1; - format!("{}!", x) - }; - - let mut output = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let mut expected = Vec::new(); - - let mut u = 0; - for i in 0..offset { - let value = map(&mut u, i.to_string()); - output.push(value.clone()); - expected.push(value); - } - expected.extend(input.clone().into_iter().map(|x| map(&mut u, x))); - - let params = Params::new(nt, chunk, ordering); - let iter = input.into_con_iter(); - let m = UM::new(UsingClone::new(0), params, iter, map); - - let (_, mut output) = m.collect_into::(output); - - if !params.is_sequential() && matches!(params.iteration_order, IterationOrder::Arbitrary) { - expected.sort(); - output.sort(); - } - - assert_eq!(expected, output.to_vec()); -} diff --git a/src/using_old/computations/u_map/tests/find.rs b/src/using_old/computations/u_map/tests/find.rs deleted file mode 100644 index d1e60eb..0000000 --- a/src/using_old/computations/u_map/tests/find.rs +++ /dev/null @@ -1,56 +0,0 @@ -use crate::{ - DefaultRunner, Params, - using_old::{UsingClone, UsingFun, computations::UM}, -}; -use orx_concurrent_iter::IntoConcurrentIter; -use test_case::test_matrix; - -#[cfg(miri)] -const N: [usize; 2] = [37, 125]; -#[cfg(not(miri))] -const N: [usize; 2] = [1025, 4735]; - -#[test_matrix( - [0, 1, N[0], N[1]], - [1, 4], - [1, 64]) -] -fn u_m_find(n: usize, nt: usize, chunk: usize) { - let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); - - let expected = input.clone().into_iter().next(); - - let params = Params::new(nt, chunk, Default::default()); - let iter = input.into_con_iter(); - let map = |u: &mut usize, x: String| { - *u += 1; - x - }; - let m = UM::new(UsingClone::new(0), params, iter, map); - - let output = m.next::().1; - assert_eq!(expected, output); -} - -#[test_matrix( - [0, 1, N[0], N[1]], - [1, 4], - [1, 64]) -] -fn u_m_map_find(n: usize, nt: usize, chunk: usize) { - let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); - let map = |u: &mut usize, x: String| { - *u += 1; - format!("{}!", x) - }; - - let mut u = 0; - let expected = input.clone().into_iter().map(|x| map(&mut u, x)).next(); - - let params = Params::new(nt, chunk, Default::default()); - let iter = input.into_con_iter(); - let m = UM::new(UsingFun::new(|idx| idx), params, iter, map); - let output = m.next::().1; - - assert_eq!(expected, output); -} diff --git a/src/using_old/computations/u_map/tests/mod.rs b/src/using_old/computations/u_map/tests/mod.rs deleted file mode 100644 index 5493e3c..0000000 --- a/src/using_old/computations/u_map/tests/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod collect; -mod find; -mod reduce; diff --git a/src/using_old/computations/u_map/tests/reduce.rs b/src/using_old/computations/u_map/tests/reduce.rs deleted file mode 100644 index f9e1189..0000000 --- a/src/using_old/computations/u_map/tests/reduce.rs +++ /dev/null @@ -1,81 +0,0 @@ -use crate::{ - Params, - runner::DefaultRunner, - using_old::computations::UM, - using_old::{UsingClone, UsingFun}, -}; -use orx_concurrent_iter::IntoConcurrentIter; -use test_case::test_matrix; - -#[cfg(miri)] -const N: [usize; 2] = [37, 125]; -#[cfg(not(miri))] -const N: [usize; 2] = [1025, 4735]; - -#[test_matrix( - [0, 1, N[0], N[1]], - [1, 4], - [1, 64]) -] -fn m_reduce(n: usize, nt: usize, chunk: usize) { - let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); - let reduce = |u: &mut usize, x: String, y: String| { - *u += 1; - match x < y { - true => y, - false => x, - } - }; - - let mut u = 0; - let expected = input - .clone() - .into_iter() - .reduce(|a, b| reduce(&mut u, a, b)); - - let params = Params::new(nt, chunk, Default::default()); - let iter = input.into_con_iter(); - let map = |u: &mut usize, x: String| { - *u += 1; - x - }; - let m = UM::new(UsingClone::new(0), params, iter, map); - let (_, output) = m.reduce::(reduce); - - assert_eq!(expected, output); -} - -#[test_matrix( - [0, 1, N[0], N[1]], - [1, 4], - [1, 64]) -] -fn m_map_reduce(n: usize, nt: usize, chunk: usize) { - let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); - let map = |u: &mut usize, x: String| { - *u += 1; - format!("{}!", x) - }; - let reduce = |u: &mut usize, x: String, y: String| { - *u += 1; - match x < y { - true => y, - false => x, - } - }; - - let mut u = 0; - let mut u2 = 0; - let expected = input - .clone() - .into_iter() - .map(|x| map(&mut u, x)) - .reduce(|a, b| reduce(&mut u2, a, b)); - - let params = Params::new(nt, chunk, Default::default()); - let iter = input.into_con_iter(); - let m = UM::new(UsingFun::new(|_| 42), params, iter, map); - let (_, output) = m.reduce::(reduce); - - assert_eq!(expected, output); -} diff --git a/src/using_old/computations/u_map/transformations.rs b/src/using_old/computations/u_map/transformations.rs deleted file mode 100644 index 29932eb..0000000 --- a/src/using_old/computations/u_map/transformations.rs +++ /dev/null @@ -1,23 +0,0 @@ -use crate::using_old::Using; -use crate::using_old::computations::UM; -use orx_concurrent_iter::ConcurrentIter; - -impl UM -where - U: Using, - I: ConcurrentIter, - M1: Fn(&mut U::Item, I::Item) -> O, -{ - #[allow(clippy::type_complexity)] - pub fn map(self, map: M2) -> UM Q> - where - M2: Fn(&mut U::Item, O) -> Q, - { - let (using, params, iter, map1) = self.destruct(); - let map2 = move |u: &mut U::Item, t: ::Item| { - let v1 = map1(u, t); - map(u, v1) - }; - UM::new(using, params, iter, map2) - } -} diff --git a/src/using_old/computations/u_xap/collect.rs b/src/using_old/computations/u_xap/collect.rs deleted file mode 100644 index 5e917b3..0000000 --- a/src/using_old/computations/u_xap/collect.rs +++ /dev/null @@ -1,75 +0,0 @@ -use crate::generic_values::runner_results::{ - Infallible, ParallelCollect, ParallelCollectArbitrary, -}; -use crate::orch::NumSpawned; -use crate::using_old::Using; -use crate::using_old::computations::UX; -use crate::using_old::runner::parallel_runner_compute::{u_collect_arbitrary, u_collect_ordered}; -use crate::{ - IterationOrder, - generic_values::Values, - runner::{ParallelRunner, ParallelRunnerCompute}, -}; -use orx_concurrent_iter::ConcurrentIter; -use orx_fixed_vec::IntoConcurrentPinnedVec; - -impl UX -where - U: Using, - I: ConcurrentIter, - Vo: Values, - Vo::Item: Send, - M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, -{ - pub fn collect_into(self, pinned_vec: P) -> (NumSpawned, P) - where - R: ParallelRunner, - P: IntoConcurrentPinnedVec, - Vo: Values, - { - let (len, p) = self.len_and_params(); - - match (p.is_sequential(), p.iteration_order) { - (true, _) => (NumSpawned::zero(), self.sequential(pinned_vec)), - (false, IterationOrder::Arbitrary) => { - let (num_threads, result) = - u_collect_arbitrary::u_x(R::collection(p, len), self, pinned_vec); - let pinned_vec = match result { - ParallelCollectArbitrary::AllOrUntilWhileCollected { pinned_vec } => pinned_vec, - }; - (num_threads, pinned_vec) - } - (false, IterationOrder::Ordered) => { - let (num_threads, result) = - u_collect_ordered::u_x(R::collection(p, len), self, pinned_vec); - let pinned_vec = match result { - ParallelCollect::AllCollected { pinned_vec } => pinned_vec, - ParallelCollect::StoppedByWhileCondition { - pinned_vec, - stopped_idx: _, - } => pinned_vec, - }; - (num_threads, pinned_vec) - } - } - } - - fn sequential

(self, mut pinned_vec: P) -> P - where - P: IntoConcurrentPinnedVec, - { - let (using, _, iter, xap1) = self.destruct(); - let mut u = using.into_inner(); - - let iter = iter.into_seq_iter(); - for i in iter { - let vt = xap1(&mut u, i); - let done = vt.push_to_pinned_vec(&mut pinned_vec); - if Vo::sequential_push_to_stop(done).is_some() { - break; - } - } - - pinned_vec - } -} diff --git a/src/using_old/computations/u_xap/mod.rs b/src/using_old/computations/u_xap/mod.rs deleted file mode 100644 index 9342c2e..0000000 --- a/src/using_old/computations/u_xap/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -#[cfg(test)] -mod tests; - -mod collect; -mod next; -mod reduce; -mod x; - -pub use x::UX; diff --git a/src/using_old/computations/u_xap/next.rs b/src/using_old/computations/u_xap/next.rs deleted file mode 100644 index 3181dbb..0000000 --- a/src/using_old/computations/u_xap/next.rs +++ /dev/null @@ -1,37 +0,0 @@ -use crate::generic_values::Values; -use crate::generic_values::runner_results::Infallible; -use crate::orch::NumSpawned; -use crate::runner::{ParallelRunner, ParallelRunnerCompute}; -use crate::using_old::Using; -use crate::using_old::computations::UX; -use crate::using_old::runner::parallel_runner_compute::{u_next, u_next_any}; -use orx_concurrent_iter::ConcurrentIter; - -impl UX -where - U: Using, - I: ConcurrentIter, - Vo: Values, - M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, - Vo::Item: Send, -{ - pub fn next(self) -> (NumSpawned, Option) - where - R: ParallelRunner, - Vo: Values, - { - let (len, p) = self.len_and_params(); - let (num_threads, Ok(result)) = u_next::u_x(R::early_return(p, len), self); - (num_threads, result.map(|x| x.1)) - } - - pub fn next_any(self) -> (NumSpawned, Option) - where - R: ParallelRunner, - Vo: Values, - { - let (len, p) = self.len_and_params(); - let (num_threads, Ok(next)) = u_next_any::u_x(R::early_return(p, len), self); - (num_threads, next) - } -} diff --git a/src/using_old/computations/u_xap/reduce.rs b/src/using_old/computations/u_xap/reduce.rs deleted file mode 100644 index ebfe653..0000000 --- a/src/using_old/computations/u_xap/reduce.rs +++ /dev/null @@ -1,28 +0,0 @@ -use crate::generic_values::Values; -use crate::generic_values::runner_results::Infallible; -use crate::orch::NumSpawned; -use crate::runner::{ParallelRunner, ParallelRunnerCompute}; -use crate::using_old::Using; -use crate::using_old::computations::UX; -use crate::using_old::runner::parallel_runner_compute::u_reduce; -use orx_concurrent_iter::ConcurrentIter; - -impl UX -where - U: Using, - I: ConcurrentIter, - Vo: Values, - Vo::Item: Send, - M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, -{ - pub fn reduce(self, reduce: Red) -> (NumSpawned, Option) - where - R: ParallelRunner, - Red: Fn(&mut U::Item, Vo::Item, Vo::Item) -> Vo::Item + Sync, - Vo: Values, - { - let (len, p) = self.len_and_params(); - let (num_threads, Ok(acc)) = u_reduce::u_x(R::reduce(p, len), self, reduce); - (num_threads, acc) - } -} diff --git a/src/using_old/computations/u_xap/tests/collect.rs b/src/using_old/computations/u_xap/tests/collect.rs deleted file mode 100644 index 3086012..0000000 --- a/src/using_old/computations/u_xap/tests/collect.rs +++ /dev/null @@ -1,99 +0,0 @@ -use crate::{ - IterationOrder, Params, - generic_values::Vector, - runner::DefaultRunner, - using_old::{UsingClone, computations::UX}, -}; -use orx_concurrent_iter::IntoConcurrentIter; -use orx_pinned_vec::PinnedVec; -use orx_split_vec::SplitVec; -use test_case::test_matrix; - -#[cfg(miri)] -const N: [usize; 2] = [37, 125]; -#[cfg(not(miri))] -const N: [usize; 2] = [1025, 4735]; - -#[test_matrix( - [0, 1, N[0], N[1]], - [1, 4], - [1, 64], - [IterationOrder::Ordered, IterationOrder::Arbitrary]) -] -fn u_x_flat_map_collect(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { - let offset = 33; - - let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); - let fmap = |x: String| x.chars().map(|x| x.to_string()).collect::>(); - let xmap = |u: &mut usize, x: String| { - *u += 1; - Vector(fmap(x)) - }; - - let mut output = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let mut expected = Vec::new(); - - for i in 0..offset { - let i = i.to_string(); - for x in fmap(i) { - output.push(x.clone()); - expected.push(x); - } - } - expected.extend(input.clone().into_iter().flat_map(&fmap)); - - let params = Params::new(nt, chunk, ordering); - let iter = input.into_con_iter(); - let x = UX::new(UsingClone::new(0), params, iter, xmap); - - let (_, mut output) = x.collect_into::(output); - - if !params.is_sequential() && matches!(params.iteration_order, IterationOrder::Arbitrary) { - expected.sort(); - output.sort(); - } - - assert_eq!(expected, output.to_vec()); -} - -#[test_matrix( - [0, 1, N[0], N[1]], - [1, 4], - [1, 64], - [IterationOrder::Ordered, IterationOrder::Arbitrary]) -] -fn u_x_filter_map_collect(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { - let offset = 33; - - let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); - let fmap = |x: String| (!x.starts_with('3')).then_some(format!("{}!", x)); - let xmap = |u: &mut usize, x: String| { - *u += 1; - Vector(fmap(x)) - }; - - let mut output = SplitVec::with_doubling_growth_and_max_concurrent_capacity(); - let mut expected = Vec::new(); - - for i in 0..offset { - let i = i.to_string(); - if let Some(x) = fmap(i) { - output.push(x.clone()); - expected.push(x); - } - } - expected.extend(input.clone().into_iter().flat_map(&fmap)); - - let params = Params::new(nt, chunk, ordering); - let iter = input.into_con_iter(); - let x = UX::new(UsingClone::new(0), params, iter, xmap); - - let (_, mut output) = x.collect_into::(output); - - if !params.is_sequential() && matches!(params.iteration_order, IterationOrder::Arbitrary) { - expected.sort(); - output.sort(); - } - - assert_eq!(expected, output.to_vec()); -} diff --git a/src/using_old/computations/u_xap/tests/find.rs b/src/using_old/computations/u_xap/tests/find.rs deleted file mode 100644 index c72acbb..0000000 --- a/src/using_old/computations/u_xap/tests/find.rs +++ /dev/null @@ -1,59 +0,0 @@ -use crate::{ - DefaultRunner, Params, - generic_values::Vector, - using_old::{UsingClone, computations::UX}, -}; -use orx_concurrent_iter::IntoConcurrentIter; -use test_case::test_matrix; - -#[cfg(miri)] -const N: [usize; 2] = [37, 125]; -#[cfg(not(miri))] -const N: [usize; 2] = [1025, 4735]; - -#[test_matrix( - [0, 1, N[0], N[1]], - [1, 4], - [1, 64]) -] -fn u_x_flat_map_find(n: usize, nt: usize, chunk: usize) { - let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); - let fmap = |x: String| x.chars().map(|x| x.to_string()).collect::>(); - let xmap = |u: &mut usize, x: String| { - *u += 1; - Vector(fmap(x)) - }; - - let expected = input.clone().into_iter().flat_map(fmap).next(); - - let params = Params::new(nt, chunk, Default::default()); - let iter = input.into_con_iter(); - let x = UX::new(UsingClone::new(0), params, iter, xmap); - - let output = x.next::().1; - assert_eq!(expected, output); -} - -#[test_matrix( - [0, 1, N[0], N[1]], - [1, 4], - [1, 64]) -] -fn u_x_filter_map_find(n: usize, nt: usize, chunk: usize) { - let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); - let fmap = |x: String| (!x.starts_with('3')).then_some(format!("{}!", x)); - let xmap = |u: &mut usize, x: String| { - *u += 1; - Vector(fmap(x)) - }; - - let expected = input.clone().into_iter().filter_map(fmap).next(); - - let params = Params::new(nt, chunk, Default::default()); - let iter = input.into_con_iter(); - let x = UX::new(UsingClone::new(0), params, iter, xmap); - - let output = x.next::().1; - - assert_eq!(expected, output); -} diff --git a/src/using_old/computations/u_xap/tests/mod.rs b/src/using_old/computations/u_xap/tests/mod.rs deleted file mode 100644 index 5493e3c..0000000 --- a/src/using_old/computations/u_xap/tests/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod collect; -mod find; -mod reduce; diff --git a/src/using_old/computations/u_xap/tests/reduce.rs b/src/using_old/computations/u_xap/tests/reduce.rs deleted file mode 100644 index 80f96ba..0000000 --- a/src/using_old/computations/u_xap/tests/reduce.rs +++ /dev/null @@ -1,86 +0,0 @@ -use crate::{ - Params, - generic_values::Vector, - runner::DefaultRunner, - using_old::{UsingClone, computations::UX}, -}; -use orx_concurrent_iter::IntoConcurrentIter; -use test_case::test_matrix; - -#[cfg(miri)] -const N: [usize; 2] = [37, 125]; -#[cfg(not(miri))] -const N: [usize; 2] = [1025, 4735]; - -#[test_matrix( - [0, 1, N[0], N[1]], - [1, 4], - [1, 64]) -] -fn u_x_flat_map_reduce(n: usize, nt: usize, chunk: usize) { - let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); - let fmap = |x: String| x.chars().map(|x| x.to_string()).collect::>(); - let xmap = |u: &mut usize, x: String| { - *u += 1; - Vector(fmap(x)) - }; - - let reduce = |u: &mut usize, x: String, y: String| { - *u += 1; - match x < y { - true => y, - false => x, - } - }; - - let mut u = 0; - let expected = input - .clone() - .into_iter() - .flat_map(fmap) - .reduce(|a, b| reduce(&mut u, a, b)); - - let params = Params::new(nt, chunk, Default::default()); - let iter = input.into_con_iter(); - let x = UX::new(UsingClone::new(0), params, iter, xmap); - - let (_, output) = x.reduce::(reduce); - - assert_eq!(expected, output); -} - -#[test_matrix( - [0, 1, N[0], N[1]], - [1, 4], - [1, 64]) -] -fn u_x_filter_map_reduce(n: usize, nt: usize, chunk: usize) { - let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); - let fmap = |x: String| (!x.starts_with('3')).then_some(format!("{}!", x)); - let xmap = |u: &mut usize, x: String| { - *u += 1; - Vector(fmap(x)) - }; - let reduce = |u: &mut usize, x: String, y: String| { - *u += 1; - match x < y { - true => y, - false => x, - } - }; - - let mut u = 0; - let expected = input - .clone() - .into_iter() - .filter_map(fmap) - .reduce(|a, b| reduce(&mut u, a, b)); - - let params = Params::new(nt, chunk, Default::default()); - let iter = input.into_con_iter(); - let x = UX::new(UsingClone::new(0), params, iter, xmap); - - let (_, output) = x.reduce::(reduce); - - assert_eq!(expected, output); -} diff --git a/src/using_old/computations/u_xap/x.rs b/src/using_old/computations/u_xap/x.rs deleted file mode 100644 index 2e60ccb..0000000 --- a/src/using_old/computations/u_xap/x.rs +++ /dev/null @@ -1,61 +0,0 @@ -use crate::using_old::Using; -use crate::{ChunkSize, IterationOrder, NumThreads, Params, generic_values::Values}; -use orx_concurrent_iter::ConcurrentIter; - -pub struct UX -where - U: Using, - I: ConcurrentIter, - Vo: Values, - M1: Fn(&mut U::Item, I::Item) -> Vo, -{ - using: U, - params: Params, - iter: I, - xap1: M1, -} - -impl UX -where - U: Using, - I: ConcurrentIter, - Vo: Values, - M1: Fn(&mut U::Item, I::Item) -> Vo, -{ - pub fn new(using: U, params: Params, iter: I, xap1: M1) -> Self { - Self { - using, - params, - iter, - xap1, - } - } - - pub fn destruct(self) -> (U, Params, I, M1) { - (self.using, self.params, self.iter, self.xap1) - } - - pub fn params(&self) -> Params { - self.params - } - - pub fn len_and_params(&self) -> (Option, Params) { - (self.iter.try_get_len(), self.params) - } - - pub fn num_threads(&mut self, num_threads: impl Into) { - self.params = self.params().with_num_threads(num_threads); - } - - pub fn chunk_size(&mut self, chunk_size: impl Into) { - self.params = self.params.with_chunk_size(chunk_size); - } - - pub fn iteration_order(&mut self, collect: IterationOrder) { - self.params = self.params.with_collect_ordering(collect); - } - - pub(crate) fn con_iter(&self) -> &I { - &self.iter - } -} diff --git a/src/using_old/mod.rs b/src/using_old/mod.rs deleted file mode 100644 index ecd419d..0000000 --- a/src/using_old/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -mod collect_into; -/// Module containing variants of parallel iterators using a mutable variable. -pub mod computational_variants; -mod computations; -mod runner; -mod u_par_iter; -mod using_variants; - -pub(crate) use collect_into::UParCollectIntoCoreOld; -pub use u_par_iter::ParIterUsingOld; -pub use using_variants::{Using, UsingClone, UsingFun}; diff --git a/src/using_old/runner/mod.rs b/src/using_old/runner/mod.rs deleted file mode 100644 index 2fd84c7..0000000 --- a/src/using_old/runner/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub(crate) mod parallel_runner_compute; -mod thread_runner_compute; diff --git a/src/using_old/runner/parallel_runner_compute/mod.rs b/src/using_old/runner/parallel_runner_compute/mod.rs deleted file mode 100644 index 25f59bc..0000000 --- a/src/using_old/runner/parallel_runner_compute/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub(crate) mod u_collect_arbitrary; -pub(crate) mod u_collect_ordered; -pub(crate) mod u_next; -pub(crate) mod u_next_any; -pub(crate) mod u_reduce; diff --git a/src/using_old/runner/parallel_runner_compute/u_collect_arbitrary.rs b/src/using_old/runner/parallel_runner_compute/u_collect_arbitrary.rs deleted file mode 100644 index 9f97322..0000000 --- a/src/using_old/runner/parallel_runner_compute/u_collect_arbitrary.rs +++ /dev/null @@ -1,159 +0,0 @@ -use super::super::thread_runner_compute as th; -use crate::generic_values::Values; -use crate::generic_values::runner_results::{ParallelCollectArbitrary, ThreadCollectArbitrary}; -use crate::orch::NumSpawned; -use crate::runner::ParallelRunnerCompute; -use crate::using_old::Using; -#[cfg(test)] -use crate::using_old::computations::UM; -use crate::using_old::computations::UX; -use orx_concurrent_bag::ConcurrentBag; -use orx_concurrent_iter::ConcurrentIter; -use orx_fixed_vec::IntoConcurrentPinnedVec; - -// m - -#[cfg(test)] -pub fn u_m(runner: C, m: UM, pinned_vec: P) -> (NumSpawned, P) -where - C: ParallelRunnerCompute, - U: Using, - I: ConcurrentIter, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, - P: IntoConcurrentPinnedVec, - O: Send, -{ - let capacity_bound = pinned_vec.capacity_bound(); - let offset = pinned_vec.len(); - let (mut using, _, iter, map1) = m.destruct(); - - let mut bag: ConcurrentBag = pinned_vec.into(); - match iter.try_get_len() { - Some(iter_len) => bag.reserve_maximum_capacity(offset + iter_len), - None => bag.reserve_maximum_capacity(capacity_bound), - }; - - // compute - - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - std::thread::scope(|s| { - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned.into_inner()); - num_spawned.increment(); - s.spawn(|| { - th::u_collect_arbitrary::u_m( - runner.new_thread_runner(shared_state), - u, - &iter, - shared_state, - &map1, - &bag, - ); - }); - } - }); - let values = bag.into_inner(); - (num_spawned, values) -} - -// x - -pub fn u_x( - runner: C, - x: UX, - pinned_vec: P, -) -> (NumSpawned, ParallelCollectArbitrary) -where - C: ParallelRunnerCompute, - U: Using, - I: ConcurrentIter, - Vo: Values, - Vo::Item: Send, - M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, - P: IntoConcurrentPinnedVec, -{ - let capacity_bound = pinned_vec.capacity_bound(); - let offset = pinned_vec.len(); - let (mut using, _, iter, xap1) = x.destruct(); - - let mut bag: ConcurrentBag = pinned_vec.into(); - match iter.try_get_len() { - Some(iter_len) => bag.reserve_maximum_capacity(offset + iter_len), - None => bag.reserve_maximum_capacity(capacity_bound), - }; - - // compute - - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - let result: ThreadCollectArbitrary = std::thread::scope(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned.into_inner()); - num_spawned.increment(); - handles.push(s.spawn(|| { - th::u_collect_arbitrary::u_x( - runner.new_thread_runner(shared_state), - u, - &iter, - shared_state, - &xap1, - &bag, - ) - })); - } - - let mut early_exit_result = None; - while !handles.is_empty() { - let mut finished_idx = None; - for (h, handle) in handles.iter().enumerate() { - if handle.is_finished() { - finished_idx = Some(h); - break; - } - } - - if let Some(h) = finished_idx { - let handle = handles.remove(h); - let result = handle.join().expect("failed to join the thread"); - match &result { - ThreadCollectArbitrary::AllCollected => {} - ThreadCollectArbitrary::StoppedByError { error: _ } => { - early_exit_result = Some(result); - break; - } - ThreadCollectArbitrary::StoppedByWhileCondition => { - early_exit_result = Some(result); - } - } - } - } - - early_exit_result.unwrap_or(ThreadCollectArbitrary::AllCollected) - }); - - ( - num_spawned, - match result { - ThreadCollectArbitrary::AllCollected => { - ParallelCollectArbitrary::AllOrUntilWhileCollected { - pinned_vec: bag.into_inner(), - } - } - ThreadCollectArbitrary::StoppedByWhileCondition => { - ParallelCollectArbitrary::AllOrUntilWhileCollected { - pinned_vec: bag.into_inner(), - } - } - ThreadCollectArbitrary::StoppedByError { error } => { - ParallelCollectArbitrary::StoppedByError { error } - } - }, - ) -} diff --git a/src/using_old/runner/parallel_runner_compute/u_collect_ordered.rs b/src/using_old/runner/parallel_runner_compute/u_collect_ordered.rs deleted file mode 100644 index 9421610..0000000 --- a/src/using_old/runner/parallel_runner_compute/u_collect_ordered.rs +++ /dev/null @@ -1,133 +0,0 @@ -use super::super::thread_runner_compute as th; -use crate::generic_values::Values; -use crate::generic_values::runner_results::{Fallibility, ParallelCollect, ThreadCollect}; -use crate::orch::NumSpawned; -use crate::runner::ParallelRunnerCompute; -use crate::using_old::Using; -use crate::using_old::computations::{UM, UX}; -use orx_concurrent_iter::ConcurrentIter; -use orx_concurrent_ordered_bag::ConcurrentOrderedBag; -use orx_fixed_vec::IntoConcurrentPinnedVec; - -// m - -pub fn u_m(runner: C, m: UM, pinned_vec: P) -> (NumSpawned, P) -where - C: ParallelRunnerCompute, - U: Using, - I: ConcurrentIter, - O: Send, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, - P: IntoConcurrentPinnedVec, -{ - let offset = pinned_vec.len(); - let (mut using, _, iter, map1) = m.destruct(); - - let o_bag: ConcurrentOrderedBag = pinned_vec.into(); - - // compute - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - std::thread::scope(|s| { - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned.into_inner()); - num_spawned.increment(); - s.spawn(|| { - th::u_collect_ordered::u_m( - runner.new_thread_runner(shared_state), - u, - &iter, - shared_state, - &map1, - &o_bag, - offset, - ); - }); - } - }); - - let values = unsafe { o_bag.into_inner().unwrap_only_if_counts_match() }; - (num_spawned, values) -} - -// x - -pub fn u_x( - runner: C, - x: UX, - pinned_vec: P, -) -> (NumSpawned, ParallelCollect) -where - C: ParallelRunnerCompute, - U: Using, - I: ConcurrentIter, - Vo: Values, - Vo::Item: Send, - M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, - P: IntoConcurrentPinnedVec, -{ - let (mut using, _, iter, xap1) = x.destruct(); - - // compute - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - let result: Result>, ::Error> = - std::thread::scope(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned.into_inner()); - num_spawned.increment(); - handles.push(s.spawn(|| { - th::u_collect_ordered::u_x( - runner.new_thread_runner(shared_state), - u, - &iter, - shared_state, - &xap1, - ) - })); - } - - let mut results = Vec::with_capacity(handles.len()); - - let mut error = None; - while !handles.is_empty() { - let mut finished_idx = None; - for (h, handle) in handles.iter().enumerate() { - if handle.is_finished() { - finished_idx = Some(h); - break; - } - } - - if let Some(h) = finished_idx { - let handle = handles.remove(h); - let result = handle.join().expect("failed to join the thread"); - match result.into_result() { - Ok(result) => results.push(result), - Err(e) => { - error = Some(e); - break; - } - } - } - } - - match error { - Some(error) => Err(error), - None => Ok(results), - } - }); - - let result = match result { - Err(error) => ParallelCollect::StoppedByError { error }, - Ok(results) => ParallelCollect::reduce(results, pinned_vec), - }; - - (num_spawned, result) -} diff --git a/src/using_old/runner/parallel_runner_compute/u_next.rs b/src/using_old/runner/parallel_runner_compute/u_next.rs deleted file mode 100644 index 10a2d8f..0000000 --- a/src/using_old/runner/parallel_runner_compute/u_next.rs +++ /dev/null @@ -1,132 +0,0 @@ -use super::super::thread_runner_compute as th; -use crate::generic_values::Values; -use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; -use crate::orch::NumSpawned; -use crate::runner::ParallelRunnerCompute; -use crate::using_old::Using; -use crate::using_old::computations::{UM, UX}; -use orx_concurrent_iter::ConcurrentIter; - -pub fn u_m(runner: C, m: UM) -> (NumSpawned, Option) -where - C: ParallelRunnerCompute, - U: Using, - I: ConcurrentIter, - O: Send, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, -{ - let (mut using, _, iter, map1) = m.destruct(); - - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - let results = std::thread::scope(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned.into_inner()); - num_spawned.increment(); - handles.push(s.spawn(|| { - th::u_next::u_m( - runner.new_thread_runner(shared_state), - u, - &iter, - shared_state, - &map1, - ) - })) - } - - let mut results: Vec<(usize, O)> = Vec::with_capacity(handles.len()); - for x in handles { - if let Some(x) = x.join().expect("failed to join the thread") { - results.push(x); - } - } - results - }); - - let acc = results.into_iter().min_by_key(|x| x.0).map(|x| x.1); - - (num_spawned, acc) -} - -type ResultNext = Result< - Option<(usize, ::Item)>, - <::Fallibility as Fallibility>::Error, ->; - -pub fn u_x(runner: C, x: UX) -> (NumSpawned, ResultNext) -where - C: ParallelRunnerCompute, - U: Using, - I: ConcurrentIter, - Vo: Values, - Vo::Item: Send, - X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, -{ - let (mut using, _, iter, xap1) = x.destruct(); - - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - let result: Result>, _> = std::thread::scope(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned.into_inner()); - num_spawned.increment(); - handles.push(s.spawn(|| { - th::u_next::u_x( - runner.new_thread_runner(shared_state), - u, - &iter, - shared_state, - &xap1, - ) - })) - } - - let mut results = Vec::with_capacity(handles.len()); - - let mut error = None; - while !handles.is_empty() { - let mut finished_idx = None; - for (h, handle) in handles.iter().enumerate() { - if handle.is_finished() { - finished_idx = Some(h); - break; - } - } - - if let Some(h) = finished_idx { - let handle = handles.remove(h); - let result = handle.join().expect("failed to join the thread"); - match result { - NextWithIdx::Found { idx, value } => { - results.push(NextSuccess::Found { idx, value }) - } - NextWithIdx::NotFound => {} - NextWithIdx::StoppedByWhileCondition { idx } => { - results.push(NextSuccess::StoppedByWhileCondition { idx }); - } - NextWithIdx::StoppedByError { error: e } => { - error = Some(e); - break; - } - } - } - } - - match error { - Some(error) => Err(error), - None => Ok(results), - } - }); - - let next = result.map(NextSuccess::reduce); - - (num_spawned, next) -} diff --git a/src/using_old/runner/parallel_runner_compute/u_next_any.rs b/src/using_old/runner/parallel_runner_compute/u_next_any.rs deleted file mode 100644 index 12e2077..0000000 --- a/src/using_old/runner/parallel_runner_compute/u_next_any.rs +++ /dev/null @@ -1,114 +0,0 @@ -use super::super::thread_runner_compute as th; -use crate::generic_values::runner_results::Fallibility; -use crate::orch::NumSpawned; -use crate::using_old::Using; -use crate::using_old::computations::{UM, UX}; -use crate::{generic_values::Values, runner::ParallelRunnerCompute}; -use orx_concurrent_iter::ConcurrentIter; - -pub fn u_m(runner: C, m: UM) -> (NumSpawned, Option) -where - C: ParallelRunnerCompute, - U: Using, - I: ConcurrentIter, - O: Send, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, -{ - let (mut using, _, iter, xap1) = m.destruct(); - - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - let result = std::thread::scope(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned.into_inner()); - num_spawned.increment(); - handles.push(s.spawn(|| { - th::u_next_any::u_m( - runner.new_thread_runner(shared_state), - u, - &iter, - shared_state, - &xap1, - ) - })); - } - - // do not wait to join other threads - handles - .into_iter() - .find_map(|x| x.join().expect("failed to join the thread")) - }); - - (num_spawned, result) -} - -type ResultNextAny = - Result::Item>, <::Fallibility as Fallibility>::Error>; - -pub fn u_x(runner: C, x: UX) -> (NumSpawned, ResultNextAny) -where - C: ParallelRunnerCompute, - U: Using, - I: ConcurrentIter, - Vo: Values, - Vo::Item: Send, - X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, -{ - let (mut using, _, iter, xap1) = x.destruct(); - - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - let result = std::thread::scope(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned.into_inner()); - num_spawned.increment(); - handles.push(s.spawn(|| { - th::u_next_any::u_x( - runner.new_thread_runner(shared_state), - u, - &iter, - shared_state, - &xap1, - ) - })); - } - - let mut result = Ok(None); - while !handles.is_empty() { - let mut finished_idx = None; - for (h, handle) in handles.iter().enumerate() { - if handle.is_finished() { - finished_idx = Some(h); - break; - } - } - - if let Some(h) = finished_idx { - let handle = handles.remove(h); - match handle.join().expect("failed to join the thread") { - Ok(Some(x)) => { - result = Ok(Some(x)); - break; - } - Err(error) => { - result = Err(error); - break; - } - Ok(None) => {} - } - } - } - - result - }); - - (num_spawned, result) -} diff --git a/src/using_old/runner/parallel_runner_compute/u_reduce.rs b/src/using_old/runner/parallel_runner_compute/u_reduce.rs deleted file mode 100644 index 28b67c9..0000000 --- a/src/using_old/runner/parallel_runner_compute/u_reduce.rs +++ /dev/null @@ -1,144 +0,0 @@ -use super::super::thread_runner_compute as th; -use crate::generic_values::Values; -use crate::generic_values::runner_results::{Fallibility, Reduce}; -use crate::orch::NumSpawned; -use crate::runner::ParallelRunnerCompute; -use crate::using_old::Using; -use crate::using_old::computations::{UM, UX}; -use orx_concurrent_iter::ConcurrentIter; - -// m - -pub fn u_m( - runner: C, - m: UM, - reduce: Red, -) -> (NumSpawned, Option) -where - C: ParallelRunnerCompute, - U: Using, - I: ConcurrentIter, - O: Send, - M1: Fn(&mut U::Item, I::Item) -> O + Sync, - Red: Fn(&mut U::Item, O, O) -> O + Sync, -{ - let (mut using, _, iter, map1) = m.destruct(); - - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - let results = std::thread::scope(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned.into_inner()); - num_spawned.increment(); - handles.push(s.spawn(|| { - th::u_reduce::u_m( - runner.new_thread_runner(shared_state), - u, - &iter, - shared_state, - &map1, - &reduce, - ) - })); - } - - let mut results = Vec::with_capacity(handles.len()); - for x in handles { - if let Some(x) = x.join().expect("failed to join the thread") { - results.push(x); - } - } - results - }); - - let mut u = using.into_inner(); - let acc = results.into_iter().reduce(|a, b| reduce(&mut u, a, b)); - - (num_spawned, acc) -} - -// x - -type ResultReduce = - Result::Item>, <::Fallibility as Fallibility>::Error>; - -pub fn u_x( - runner: C, - x: UX, - reduce: Red, -) -> (NumSpawned, ResultReduce) -where - C: ParallelRunnerCompute, - U: Using, - I: ConcurrentIter, - Vo: Values, - Vo::Item: Send, - M1: Fn(&mut U::Item, I::Item) -> Vo + Sync, - Red: Fn(&mut U::Item, Vo::Item, Vo::Item) -> Vo::Item + Sync, -{ - let (mut using, _, iter, xap1) = x.destruct(); - - let state = runner.new_shared_state(); - let shared_state = &state; - - let mut num_spawned = NumSpawned::zero(); - let result: Result, _> = std::thread::scope(|s| { - let mut handles = vec![]; - - while runner.do_spawn_new(num_spawned, shared_state, &iter) { - let u = using.create(num_spawned.into_inner()); - num_spawned.increment(); - handles.push(s.spawn(|| { - th::u_reduce::u_x( - runner.new_thread_runner(shared_state), - u, - &iter, - shared_state, - &xap1, - &reduce, - ) - })); - } - - let mut results = Vec::with_capacity(handles.len()); - - let mut error = None; - while !handles.is_empty() { - let mut finished_idx = None; - for (h, handle) in handles.iter().enumerate() { - if handle.is_finished() { - finished_idx = Some(h); - break; - } - } - - if let Some(h) = finished_idx { - let handle = handles.remove(h); - let result = handle.join().expect("failed to join the thread"); - match result { - Reduce::Done { acc: Some(acc) } => results.push(acc), - Reduce::StoppedByWhileCondition { acc: Some(acc) } => results.push(acc), - Reduce::StoppedByError { error: e } => { - error = Some(e); - break; - } - _ => {} - } - } - } - - match error { - Some(error) => Err(error), - None => Ok(results), - } - }); - - let mut u = using.into_inner(); - let acc = result.map(|results| results.into_iter().reduce(|a, b| reduce(&mut u, a, b))); - - (num_spawned, acc) -} diff --git a/src/using_old/runner/thread_runner_compute/mod.rs b/src/using_old/runner/thread_runner_compute/mod.rs deleted file mode 100644 index 25f59bc..0000000 --- a/src/using_old/runner/thread_runner_compute/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub(crate) mod u_collect_arbitrary; -pub(crate) mod u_collect_ordered; -pub(crate) mod u_next; -pub(crate) mod u_next_any; -pub(crate) mod u_reduce; diff --git a/src/using_old/runner/thread_runner_compute/u_collect_arbitrary.rs b/src/using_old/runner/thread_runner_compute/u_collect_arbitrary.rs deleted file mode 100644 index b25fb0b..0000000 --- a/src/using_old/runner/thread_runner_compute/u_collect_arbitrary.rs +++ /dev/null @@ -1,142 +0,0 @@ -use crate::generic_values::Values; -use crate::generic_values::runner_results::Stop; -use crate::{ThreadRunner, generic_values::runner_results::ThreadCollectArbitrary}; -use orx_concurrent_bag::ConcurrentBag; -use orx_concurrent_iter::{ChunkPuller, ConcurrentIter}; -use orx_fixed_vec::IntoConcurrentPinnedVec; - -// m - -#[cfg(test)] -pub fn u_m( - mut runner: C, - mut u: U, - iter: &I, - shared_state: &C::SharedState, - map1: &M1, - bag: &ConcurrentBag, -) where - C: ThreadRunner, - I: ConcurrentIter, - M1: Fn(&mut U, I::Item) -> O, - P: IntoConcurrentPinnedVec, - O: Send, -{ - let mut chunk_puller = iter.chunk_puller(0); - let mut item_puller = iter.item_puller(); - - loop { - let chunk_size = runner.next_chunk_size(shared_state, iter); - - runner.begin_chunk(chunk_size); - - match chunk_size { - 0 | 1 => match item_puller.next() { - Some(value) => _ = bag.push(map1(&mut u, value)), - None => break, - }, - c => { - if c > chunk_puller.chunk_size() { - chunk_puller = iter.chunk_puller(c); - } - - match chunk_puller.pull() { - Some(chunk) => _ = bag.extend(chunk.map(|value| map1(&mut u, value))), - None => break, - } - } - } - - runner.complete_chunk(shared_state, chunk_size); - } - - runner.complete_task(shared_state); -} - -// x - -pub fn u_x( - mut runner: C, - mut u: U, - iter: &I, - shared_state: &C::SharedState, - xap1: &X1, - bag: &ConcurrentBag, -) -> ThreadCollectArbitrary -where - C: ThreadRunner, - I: ConcurrentIter, - Vo: Values, - X1: Fn(&mut U, I::Item) -> Vo, - P: IntoConcurrentPinnedVec, - Vo::Item: Send, -{ - let mut chunk_puller = iter.chunk_puller(0); - let mut item_puller = iter.item_puller(); - - loop { - let chunk_size = runner.next_chunk_size(shared_state, iter); - - runner.begin_chunk(chunk_size); - - match chunk_size { - 0 | 1 => match item_puller.next() { - Some(value) => { - // TODO: possible to try to get len and bag.extend(values_vt.values()) when available, same holds for chunk below - let vo = xap1(&mut u, value); - let done = vo.push_to_bag(bag); - - if let Some(stop) = Vo::arbitrary_push_to_stop(done) { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - match stop { - Stop::DueToWhile => { - return ThreadCollectArbitrary::StoppedByWhileCondition; - } - Stop::DueToError { error } => { - return ThreadCollectArbitrary::StoppedByError { error }; - } - } - } - } - None => break, - }, - c => { - if c > chunk_puller.chunk_size() { - chunk_puller = iter.chunk_puller(c); - } - - match chunk_puller.pull() { - Some(chunk) => { - for value in chunk { - let vo = xap1(&mut u, value); - let done = vo.push_to_bag(bag); - - if let Some(stop) = Vo::arbitrary_push_to_stop(done) { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - match stop { - Stop::DueToWhile => { - return ThreadCollectArbitrary::StoppedByWhileCondition; - } - Stop::DueToError { error } => { - return ThreadCollectArbitrary::StoppedByError { error }; - } - } - } - } - } - None => break, - } - } - } - - runner.complete_chunk(shared_state, chunk_size); - } - - runner.complete_task(shared_state); - - ThreadCollectArbitrary::AllCollected -} diff --git a/src/using_old/runner/thread_runner_compute/u_collect_ordered.rs b/src/using_old/runner/thread_runner_compute/u_collect_ordered.rs deleted file mode 100644 index ce998a3..0000000 --- a/src/using_old/runner/thread_runner_compute/u_collect_ordered.rs +++ /dev/null @@ -1,152 +0,0 @@ -use crate::{ - ThreadRunner, - generic_values::{ - Values, - runner_results::{StopWithIdx, ThreadCollect}, - }, -}; -use orx_concurrent_iter::{ChunkPuller, ConcurrentIter}; -use orx_concurrent_ordered_bag::ConcurrentOrderedBag; -use orx_fixed_vec::IntoConcurrentPinnedVec; - -// m - -pub fn u_m( - mut runner: C, - mut u: U, - iter: &I, - shared_state: &C::SharedState, - map1: &M1, - o_bag: &ConcurrentOrderedBag, - offset: usize, -) where - C: ThreadRunner, - I: ConcurrentIter, - M1: Fn(&mut U, I::Item) -> O, - P: IntoConcurrentPinnedVec, - O: Send, -{ - let mut chunk_puller = iter.chunk_puller(0); - let mut item_puller = iter.item_puller_with_idx(); - - loop { - let chunk_size = runner.next_chunk_size(shared_state, iter); - - runner.begin_chunk(chunk_size); - - match chunk_size { - 0 | 1 => match item_puller.next() { - Some((idx, value)) => unsafe { o_bag.set_value(offset + idx, map1(&mut u, value)) }, - None => break, - }, - c => { - if c > chunk_puller.chunk_size() { - chunk_puller = iter.chunk_puller(c); - } - - match chunk_puller.pull_with_idx() { - Some((begin_idx, chunk)) => { - let values = chunk.map(|value| map1(&mut u, value)); - unsafe { o_bag.set_values(offset + begin_idx, values) }; - } - None => break, - } - } - } - - runner.complete_chunk(shared_state, chunk_size); - } - - runner.complete_task(shared_state); -} - -// x - -pub fn u_x( - mut runner: C, - mut u: U, - iter: &I, - shared_state: &C::SharedState, - xap1: &X1, -) -> ThreadCollect -where - C: ThreadRunner, - I: ConcurrentIter, - Vo: Values, - X1: Fn(&mut U, I::Item) -> Vo, -{ - let mut collected = Vec::new(); - let out_vec = &mut collected; - - let mut chunk_puller = iter.chunk_puller(0); - let mut item_puller = iter.item_puller_with_idx(); - - loop { - let chunk_size = runner.next_chunk_size(shared_state, iter); - - runner.begin_chunk(chunk_size); - - match chunk_size { - 0 | 1 => match item_puller.next() { - Some((idx, i)) => { - let vo = xap1(&mut u, i); - let done = vo.push_to_vec_with_idx(idx, out_vec); - if let Some(stop) = Vo::ordered_push_to_stop(done) { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - match stop { - StopWithIdx::DueToWhile { idx } => { - return ThreadCollect::StoppedByWhileCondition { - vec: collected, - stopped_idx: idx, - }; - } - StopWithIdx::DueToError { idx: _, error } => { - return ThreadCollect::StoppedByError { error }; - } - } - } - } - None => break, - }, - c => { - if c > chunk_puller.chunk_size() { - chunk_puller = iter.chunk_puller(c); - } - - match chunk_puller.pull_with_idx() { - Some((chunk_begin_idx, chunk)) => { - for i in chunk { - let vo = xap1(&mut u, i); - let done = vo.push_to_vec_with_idx(chunk_begin_idx, out_vec); - if let Some(stop) = Vo::ordered_push_to_stop(done) { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - match stop { - StopWithIdx::DueToWhile { idx } => { - return ThreadCollect::StoppedByWhileCondition { - vec: collected, - stopped_idx: idx, - }; - } - StopWithIdx::DueToError { idx: _, error } => { - return ThreadCollect::StoppedByError { error }; - } - } - } - } - } - None => break, - } - } - } - - runner.complete_chunk(shared_state, chunk_size); - } - - runner.complete_task(shared_state); - - ThreadCollect::AllCollected { vec: collected } -} diff --git a/src/using_old/runner/thread_runner_compute/u_next.rs b/src/using_old/runner/thread_runner_compute/u_next.rs deleted file mode 100644 index c19bfae..0000000 --- a/src/using_old/runner/thread_runner_compute/u_next.rs +++ /dev/null @@ -1,163 +0,0 @@ -use crate::{ - ThreadRunner, - generic_values::{ - Values, - runner_results::{Next, NextWithIdx}, - }, -}; -use orx_concurrent_iter::{ChunkPuller, ConcurrentIter}; - -pub fn u_m( - mut runner: C, - mut u: U, - iter: &I, - shared_state: &C::SharedState, - map1: &M1, -) -> Option<(usize, O)> -where - C: ThreadRunner, - I: ConcurrentIter, - M1: Fn(&mut U, I::Item) -> O, -{ - let u = &mut u; - let mut chunk_puller = iter.chunk_puller(0); - let mut item_puller = iter.item_puller_with_idx(); - - loop { - let chunk_size = runner.next_chunk_size(shared_state, iter); - - runner.begin_chunk(chunk_size); - - match chunk_size { - 0 | 1 => match item_puller.next() { - Some((idx, i)) => { - let first = map1(u, i); - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - return Some((idx, first)); - } - None => break, - }, - c => { - if c > chunk_puller.chunk_size() { - chunk_puller = iter.chunk_puller(c); - } - - match chunk_puller.pull_with_idx() { - Some((idx, mut chunk)) => { - if let Some(i) = chunk.next() { - let first = map1(u, i); - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - return Some((idx, first)); - } - } - None => break, - } - } - } - - runner.complete_chunk(shared_state, chunk_size); - } - - runner.complete_task(shared_state); - None -} - -pub fn u_x( - mut runner: C, - mut u: U, - iter: &I, - shared_state: &C::SharedState, - xap1: &X1, -) -> NextWithIdx -where - C: ThreadRunner, - I: ConcurrentIter, - Vo: Values, - X1: Fn(&mut U, I::Item) -> Vo, -{ - let u = &mut u; - let mut chunk_puller = iter.chunk_puller(0); - let mut item_puller = iter.item_puller_with_idx(); - - loop { - let chunk_size = runner.next_chunk_size(shared_state, iter); - - runner.begin_chunk(chunk_size); - - match chunk_size { - 0 | 1 => match item_puller.next() { - Some((idx, i)) => { - let vt = xap1(u, i); - match vt.next() { - Next::Done { value } => { - if let Some(value) = value { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - return NextWithIdx::Found { idx, value }; - } - } - Next::StoppedByError { error } => { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - return NextWithIdx::StoppedByError { error }; - } - Next::StoppedByWhileCondition => { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - return NextWithIdx::StoppedByWhileCondition { idx }; - } - } - } - None => break, - }, - c => { - if c > chunk_puller.chunk_size() { - chunk_puller = iter.chunk_puller(c); - } - - match chunk_puller.pull_with_idx() { - Some((idx, chunk)) => { - for i in chunk { - let vt = xap1(u, i); - match vt.next() { - Next::Done { value } => { - if let Some(value) = value { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - return NextWithIdx::Found { idx, value }; - } - } - Next::StoppedByError { error } => { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - return NextWithIdx::StoppedByError { error }; - } - Next::StoppedByWhileCondition => { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - return NextWithIdx::StoppedByWhileCondition { idx }; - } - } - } - } - None => break, - } - } - } - - runner.complete_chunk(shared_state, chunk_size); - } - - runner.complete_task(shared_state); - NextWithIdx::NotFound -} diff --git a/src/using_old/runner/thread_runner_compute/u_next_any.rs b/src/using_old/runner/thread_runner_compute/u_next_any.rs deleted file mode 100644 index c90b7f8..0000000 --- a/src/using_old/runner/thread_runner_compute/u_next_any.rs +++ /dev/null @@ -1,163 +0,0 @@ -use crate::{ - ThreadRunner, - generic_values::Values, - generic_values::runner_results::{Fallibility, Next}, -}; -use orx_concurrent_iter::{ChunkPuller, ConcurrentIter}; - -pub fn u_m( - mut runner: C, - mut u: U, - iter: &I, - shared_state: &C::SharedState, - map1: &M1, -) -> Option -where - C: ThreadRunner, - I: ConcurrentIter, - O: Send, - M1: Fn(&mut U, I::Item) -> O, -{ - let u = &mut u; - let mut chunk_puller = iter.chunk_puller(0); - let mut item_puller = iter.item_puller(); - - loop { - let chunk_size = runner.next_chunk_size(shared_state, iter); - - runner.begin_chunk(chunk_size); - - match chunk_size { - 0 | 1 => match item_puller.next() { - Some(i) => { - let first = map1(u, i); - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - return Some(first); - } - None => break, - }, - c => { - if c > chunk_puller.chunk_size() { - chunk_puller = iter.chunk_puller(c); - } - - match chunk_puller.pull() { - Some(mut chunk) => { - if let Some(i) = chunk.next() { - let first = map1(u, i); - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - return Some(first); - } - } - None => break, - } - } - } - - runner.complete_chunk(shared_state, chunk_size); - } - - runner.complete_task(shared_state); - None -} - -pub fn u_x( - mut runner: C, - mut u: U, - iter: &I, - shared_state: &C::SharedState, - xap1: &X1, -) -> Result, ::Error> -where - C: ThreadRunner, - I: ConcurrentIter, - Vo: Values, - Vo::Item: Send, - X1: Fn(&mut U, I::Item) -> Vo, -{ - let u = &mut u; - let mut chunk_puller = iter.chunk_puller(0); - let mut item_puller = iter.item_puller(); - - loop { - let chunk_size = runner.next_chunk_size(shared_state, iter); - - runner.begin_chunk(chunk_size); - - match chunk_size { - 0 | 1 => match item_puller.next() { - Some(i) => { - let vt = xap1(u, i); - match vt.next() { - Next::Done { value } => { - if let Some(value) = value { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - return Ok(Some(value)); - } - } - Next::StoppedByError { error } => { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - return Err(error); - } - Next::StoppedByWhileCondition => { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - return Ok(None); - } - } - } - None => break, - }, - c => { - if c > chunk_puller.chunk_size() { - chunk_puller = iter.chunk_puller(c); - } - - match chunk_puller.pull() { - Some(chunk) => { - for i in chunk { - let vt = xap1(u, i); - match vt.next() { - Next::Done { value } => { - if let Some(value) = value { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - return Ok(Some(value)); - } - } - Next::StoppedByError { error } => { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - return Err(error); - } - Next::StoppedByWhileCondition => { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - return Ok(None); - } - } - } - } - None => break, - } - } - } - - runner.complete_chunk(shared_state, chunk_size); - } - - runner.complete_task(shared_state); - Ok(None) -} diff --git a/src/using_old/runner/thread_runner_compute/u_reduce.rs b/src/using_old/runner/thread_runner_compute/u_reduce.rs deleted file mode 100644 index bb13891..0000000 --- a/src/using_old/runner/thread_runner_compute/u_reduce.rs +++ /dev/null @@ -1,178 +0,0 @@ -use crate::{ - ThreadRunner, - generic_values::{ - Values, - runner_results::{Reduce, StopReduce}, - }, -}; -use orx_concurrent_iter::{ChunkPuller, ConcurrentIter}; - -// m - -pub fn u_m( - mut runner: C, - mut u: U, - iter: &I, - shared_state: &C::SharedState, - map1: &M1, - reduce: &Red, -) -> Option -where - C: ThreadRunner, - I: ConcurrentIter, - M1: Fn(&mut U, I::Item) -> O, - Red: Fn(&mut U, O, O) -> O, -{ - let u = &mut u; - let mut chunk_puller = iter.chunk_puller(0); - let mut item_puller = iter.item_puller(); - - let mut acc = None; - loop { - let chunk_size = runner.next_chunk_size(shared_state, iter); - - runner.begin_chunk(chunk_size); - - match chunk_size { - 0 | 1 => match item_puller.next() { - Some(i) => { - let y = map1(u, i); - acc = match acc { - Some(x) => Some(reduce(u, x, y)), - None => Some(y), - }; - } - None => break, - }, - c => { - if c > chunk_puller.chunk_size() { - chunk_puller = iter.chunk_puller(c); - } - - match chunk_puller.pull() { - Some(mut chunk) => { - acc = match acc { - Some(mut acc) => { - for a in chunk { - let a = map1(u, a); - acc = reduce(u, acc, a); - } - Some(acc) - } - None => match chunk.next() { - Some(a) => { - let mut acc = map1(u, a); - for a in chunk { - let a = map1(u, a); - acc = reduce(u, acc, a); - } - Some(acc) - } - None => None, - }, - }; - } - None => break, - } - } - } - - runner.complete_chunk(shared_state, chunk_size); - } - - runner.complete_task(shared_state); - acc -} - -// x - -pub fn u_x( - mut runner: C, - mut u: U, - iter: &I, - shared_state: &C::SharedState, - xap1: &X1, - reduce: &Red, -) -> Reduce -where - C: ThreadRunner, - I: ConcurrentIter, - Vo: Values, - X1: Fn(&mut U, I::Item) -> Vo, - Red: Fn(&mut U, Vo::Item, Vo::Item) -> Vo::Item, -{ - let u = &mut u; - - let mut chunk_puller = iter.chunk_puller(0); - let mut item_puller = iter.item_puller(); - - let mut acc = None; - loop { - let chunk_size = runner.next_chunk_size(shared_state, iter); - - runner.begin_chunk(chunk_size); - - match chunk_size { - 0 | 1 => match item_puller.next() { - Some(i) => { - let vo = xap1(u, i); - let reduce = vo.u_acc_reduce(u, acc, reduce); - acc = match Vo::reduce_to_stop(reduce) { - Ok(acc) => acc, - Err(stop) => { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - match stop { - StopReduce::DueToWhile { acc } => { - return Reduce::StoppedByWhileCondition { acc }; - } - StopReduce::DueToError { error } => { - return Reduce::StoppedByError { error }; - } - } - } - }; - } - None => break, - }, - c => { - if c > chunk_puller.chunk_size() { - chunk_puller = iter.chunk_puller(c); - } - - match chunk_puller.pull() { - Some(chunk) => { - for i in chunk { - let vo = xap1(u, i); - let reduce = vo.u_acc_reduce(u, acc, reduce); - acc = match Vo::reduce_to_stop(reduce) { - Ok(acc) => acc, - Err(stop) => { - iter.skip_to_end(); - runner.complete_chunk(shared_state, chunk_size); - runner.complete_task(shared_state); - match stop { - StopReduce::DueToWhile { acc } => { - return Reduce::StoppedByWhileCondition { acc }; - } - StopReduce::DueToError { error } => { - return Reduce::StoppedByError { error }; - } - } - } - }; - } - } - None => break, - } - } - } - - runner.complete_chunk(shared_state, chunk_size); - } - - runner.complete_task(shared_state); - - Reduce::Done { acc } -} diff --git a/src/using_old/u_par_iter.rs b/src/using_old/u_par_iter.rs deleted file mode 100644 index f64d42c..0000000 --- a/src/using_old/u_par_iter.rs +++ /dev/null @@ -1,404 +0,0 @@ -use crate::{ - ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, Sum, - orch::{DefaultOrchestrator, Orchestrator}, - using_old::{ - Using, - computations::{u_map_clone, u_map_copy, u_map_count, u_reduce_sum, u_reduce_unit}, - }, -}; -use core::cmp::Ordering; -use orx_concurrent_iter::ConcurrentIter; - -/// Parallel iterator which allows mutable access to a variable of type `U` within its iterator methods. -/// -/// Note that one variable will be created per thread used by the parallel computation. -pub trait ParIterUsingOld: Sized + Send + Sync -where - R: Orchestrator, - U: Using, -{ - /// Element type of the parallel iterator. - type Item; - - /// Returns a reference to the input concurrent iterator. - fn con_iter(&self) -> &impl ConcurrentIter; - - /// Parameters of the parallel iterator. - /// - /// See [crate::ParIter::params] for details. - fn params(&self) -> Params; - - // params transformations - - /// Sets the number of threads to be used in the parallel execution. - /// Integers can be used as the argument with the following mapping: - /// - /// * `0` -> `NumThreads::Auto` - /// * `1` -> `NumThreads::sequential()` - /// * `n > 0` -> `NumThreads::Max(n)` - /// - /// /// Parameters of the parallel iterator. - /// - /// See [crate::ParIter::num_threads] for details. - fn num_threads(self, num_threads: impl Into) -> Self; - - /// Sets the number of elements to be pulled from the concurrent iterator during the - /// parallel execution. When integers are used as argument, the following mapping applies: - /// - /// * `0` -> `ChunkSize::Auto` - /// * `n > 0` -> `ChunkSize::Exact(n)` - /// - /// Please use the default enum constructor for creating `ChunkSize::Min` variant. - /// - /// See [crate::ParIter::chunk_size] for details. - fn chunk_size(self, chunk_size: impl Into) -> Self; - - /// Sets the iteration order of the parallel computation. - /// - /// See [crate::ParIter::iteration_order] for details. - fn iteration_order(self, collect: IterationOrder) -> Self; - - /// Rather than the [`DefaultOrchestrator`], uses the parallel runner `Q` which implements [`Orchestrator`]. - /// - /// See [crate::ParIter::with_runner] for details. - fn with_runner(self) -> impl ParIterUsingOld; - - // computation transformations - - /// Takes a closure `map` and creates a parallel iterator which calls that closure on each element. - /// - /// Unlike [crate::ParIter::map], the closure allows access to mutable reference of the used variable. - /// - /// Please see [`crate::ParIter::using`] transformation for details and examples. - /// - /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn map(self, map: Map) -> impl ParIterUsingOld - where - Map: Fn(&mut U::Item, Self::Item) -> Out + Sync + Clone; - - /// Creates an iterator which uses a closure `filter` to determine if an element should be yielded. - /// - /// Unlike [crate::ParIter::filter], the closure allows access to mutable reference of the used variable. - /// - /// Please see [`crate::ParIter::using`] transformation for details and examples. - /// - /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn filter(self, filter: Filter) -> impl ParIterUsingOld - where - Filter: Fn(&mut U::Item, &Self::Item) -> bool + Sync + Clone; - - /// Creates an iterator that works like map, but flattens nested structure. - /// - /// Unlike [crate::ParIter::flat_map], the closure allows access to mutable reference of the used variable. - /// - /// Please see [`crate::ParIter::using`] transformation for details and examples. - /// - /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn flat_map( - self, - flat_map: FlatMap, - ) -> impl ParIterUsingOld - where - IOut: IntoIterator, - FlatMap: Fn(&mut U::Item, Self::Item) -> IOut + Sync + Clone; - - /// Creates an iterator that both filters and maps. - /// - /// The returned iterator yields only the values for which the supplied closure `filter_map` returns `Some(value)`. - /// - /// `filter_map` can be used to make chains of `filter` and `map` more concise. - /// The example below shows how a `map().filter().map()` can be shortened to a single call to `filter_map`. - /// - /// Unlike [crate::ParIter::filter_map], the closure allows access to mutable reference of the used variable. - /// - /// Please see [`crate::ParIter::using`] transformation for details and examples. - /// - /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn filter_map( - self, - filter_map: FilterMap, - ) -> impl ParIterUsingOld - where - FilterMap: Fn(&mut U::Item, Self::Item) -> Option + Sync + Clone; - - /// Does something with each element of an iterator, passing the value on. - /// - /// Unlike [crate::ParIter::inspect], the closure allows access to mutable reference of the used variable. - /// - /// Please see [`crate::ParIter::using`] transformation for details and examples. - /// - /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn inspect( - self, - operation: Operation, - ) -> impl ParIterUsingOld - where - Operation: Fn(&mut U::Item, &Self::Item) + Sync + Clone, - { - let map = move |u: &mut U::Item, x: Self::Item| { - operation(u, &x); - x - }; - self.map(map) - } - - // special item transformations - - /// Creates an iterator which copies all of its elements. - /// - /// Unlike [crate::ParIter::copied], the closure allows access to mutable reference of the used variable. - /// - /// Please see [`crate::ParIter::using`] transformation for details and examples. - /// - /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn copied<'a, T>(self) -> impl ParIterUsingOld - where - T: 'a + Copy, - Self: ParIterUsingOld, - { - self.map(u_map_copy) - } - - /// Creates an iterator which clones all of its elements. - /// - /// Unlike [crate::ParIter::cloned], the closure allows access to mutable reference of the used variable. - /// - /// Please see [`crate::ParIter::using`] transformation for details and examples. - /// - /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn cloned<'a, T>(self) -> impl ParIterUsingOld - where - T: 'a + Clone, - Self: ParIterUsingOld, - { - self.map(u_map_clone) - } - - /// Creates an iterator that flattens nested structure. - /// - /// Unlike [crate::ParIter::flatten], the closure allows access to mutable reference of the used variable. - /// - /// Please see [`crate::ParIter::using`] transformation for details and examples. - /// - /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn flatten(self) -> impl ParIterUsingOld::Item> - where - Self::Item: IntoIterator, - { - let map = |_: &mut U::Item, e: Self::Item| e.into_iter(); - self.flat_map(map) - } - - // collect - - /// Collects all the items from an iterator into a collection. - /// - /// Unlike [crate::ParIter::collect_into], the closure allows access to mutable reference of the used variable. - /// - /// Please see [`crate::ParIter::using`] transformation for details and examples. - /// - /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn collect_into(self, output: C) -> C - where - C: ParCollectInto; - - /// Transforms an iterator into a collection. - /// - /// Unlike [crate::ParIter::collect], the closure allows access to mutable reference of the used variable. - /// - /// Please see [`crate::ParIter::using`] transformation for details and examples. - /// - /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn collect(self) -> C - where - C: ParCollectInto, - { - let output = C::empty(self.con_iter().try_get_len()); - self.collect_into(output) - } - - // reduce - - /// Reduces the elements to a single one, by repeatedly applying a reducing operation. - /// - /// See the details here: [crate::ParIter::reduce]. - fn reduce(self, reduce: Reduce) -> Option - where - Self::Item: Send, - Reduce: Fn(&mut U::Item, Self::Item, Self::Item) -> Self::Item + Sync; - - /// Tests if every element of the iterator matches a predicate. - /// - /// Unlike [crate::ParIter::all], the closure allows access to mutable reference of the used variable. - /// - /// Please see [`crate::ParIter::using`] transformation for details and examples. - /// - /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn all(self, predicate: Predicate) -> bool - where - Self::Item: Send, - Predicate: Fn(&mut U::Item, &Self::Item) -> bool + Sync + Clone, - { - let violates = |u: &mut U::Item, x: &Self::Item| !predicate(u, x); - self.find(violates).is_none() - } - - /// Tests if any element of the iterator matches a predicate. - /// - /// Unlike [crate::ParIter::any], the closure allows access to mutable reference of the used variable. - /// - /// Please see [`crate::ParIter::using`] transformation for details and examples. - /// - /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn any(self, predicate: Predicate) -> bool - where - Self::Item: Send, - Predicate: Fn(&mut U::Item, &Self::Item) -> bool + Sync + Clone, - { - self.find(predicate).is_some() - } - - /// Consumes the iterator, counting the number of iterations and returning it. - /// - /// See the details here: [crate::ParIter::count]. - fn count(self) -> usize { - self.map(u_map_count).reduce(u_reduce_sum).unwrap_or(0) - } - - /// Calls a closure on each element of an iterator. - /// - /// Unlike [crate::ParIter::for_each], the closure allows access to mutable reference of the used variable. - /// - /// Please see [`crate::ParIter::using`] transformation for details and examples. - /// - /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn for_each(self, operation: Operation) - where - Operation: Fn(&mut U::Item, Self::Item) + Sync, - { - let map = |u: &mut U::Item, x| operation(u, x); - let _ = self.map(map).reduce(u_reduce_unit); - } - - /// Returns the maximum element of an iterator. - /// - /// See the details here: [crate::ParIter::max]. - fn max(self) -> Option - where - Self::Item: Ord + Send, - { - self.reduce(|_, a, b| Ord::max(a, b)) - } - - /// Returns the element that gives the maximum value with respect to the specified `compare` function. - /// - /// See the details here: [crate::ParIter::max_by]. - fn max_by(self, compare: Compare) -> Option - where - Self::Item: Send, - Compare: Fn(&Self::Item, &Self::Item) -> Ordering + Sync, - { - let reduce = |_: &mut U::Item, x, y| match compare(&x, &y) { - Ordering::Greater | Ordering::Equal => x, - Ordering::Less => y, - }; - self.reduce(reduce) - } - - /// Returns the element that gives the maximum value from the specified function. - /// - /// See the details here: [crate::ParIter::max_by_key]. - fn max_by_key(self, key: GetKey) -> Option - where - Self::Item: Send, - Key: Ord, - GetKey: Fn(&Self::Item) -> Key + Sync, - { - let reduce = |_: &mut U::Item, x, y| match key(&x).cmp(&key(&y)) { - Ordering::Greater | Ordering::Equal => x, - Ordering::Less => y, - }; - self.reduce(reduce) - } - - /// Returns the minimum element of an iterator. - /// - /// See the details here: [crate::ParIter::min]. - fn min(self) -> Option - where - Self::Item: Ord + Send, - { - self.reduce(|_, a, b| Ord::min(a, b)) - } - - /// Returns the element that gives the minimum value with respect to the specified `compare` function. - /// - /// See the details here: [crate::ParIter::min_by]. - fn min_by(self, compare: Compare) -> Option - where - Self::Item: Send, - Compare: Fn(&Self::Item, &Self::Item) -> Ordering + Sync, - { - let reduce = |_: &mut U::Item, x, y| match compare(&x, &y) { - Ordering::Less | Ordering::Equal => x, - Ordering::Greater => y, - }; - self.reduce(reduce) - } - - /// Returns the element that gives the minimum value from the specified function. - /// - /// See the details here: [crate::ParIter::min_by_key]. - fn min_by_key(self, get_key: GetKey) -> Option - where - Self::Item: Send, - Key: Ord, - GetKey: Fn(&Self::Item) -> Key + Sync, - { - let reduce = |_: &mut U::Item, x, y| match get_key(&x).cmp(&get_key(&y)) { - Ordering::Less | Ordering::Equal => x, - Ordering::Greater => y, - }; - self.reduce(reduce) - } - - /// Sums the elements of an iterator. - /// - /// See the details here: [crate::ParIter::sum]. - fn sum(self) -> Out - where - Self::Item: Sum, - Out: Send, - { - self.map(Self::Item::u_map) - .reduce(Self::Item::u_reduce) - .unwrap_or(Self::Item::zero()) - } - - // early exit - - /// Returns the first (or any) element of the iterator; returns None if it is empty. - /// - /// * first element is returned if default iteration order `IterationOrder::Ordered` is used, - /// * any element is returned if `IterationOrder::Arbitrary` is set. - /// - /// See the details here: [crate::ParIter::first]. - fn first(self) -> Option - where - Self::Item: Send; - - /// Searches for an element of an iterator that satisfies a `predicate`. - /// - /// Unlike [crate::ParIter::find], the closure allows access to mutable reference of the used variable. - /// - /// Please see [`crate::ParIter::using`] transformation for details and examples. - /// - /// Further documentation can be found here: [`using.md`](https://github.com/orxfun/orx-parallel/blob/main/docs/using.md). - fn find(self, predicate: Predicate) -> Option - where - Self::Item: Send, - Predicate: Fn(&mut U::Item, &Self::Item) -> bool + Sync, - { - self.filter(&predicate).first() - } -} diff --git a/src/using_old/using_variants.rs b/src/using_old/using_variants.rs deleted file mode 100644 index 5489d23..0000000 --- a/src/using_old/using_variants.rs +++ /dev/null @@ -1,70 +0,0 @@ -/// A type that can [`create`] a value per thread, which will then be send to the thread, -/// and used mutable by the defined computation. -/// -/// [`create`]: crate::using::Using::create -pub trait Using { - /// Item to be used mutably by each threads used in parallel computation. - type Item: Send + 'static; - - /// Creates an instance of the variable to be used by the `thread_idx`-th thread. - fn create(&mut self, thread_idx: usize) -> Self::Item; - - /// Consumes self and creates exactly one instance of the variable. - fn into_inner(self) -> Self::Item; -} - -/// Using variant that creates instances of each thread by cloning an initial value. -pub struct UsingClone(T); - -impl UsingClone { - pub(crate) fn new(value: T) -> Self { - Self(value) - } -} - -impl Using for UsingClone { - type Item = T; - - fn create(&mut self, _: usize) -> T { - self.0.clone() - } - - fn into_inner(self) -> Self::Item { - self.0 - } -} - -/// Using variant that creates instances of each thread using a closure. -pub struct UsingFun -where - T: Send + 'static, - F: FnMut(usize) -> T, -{ - fun: F, -} - -impl UsingFun -where - T: Send + 'static, - F: FnMut(usize) -> T, -{ - pub(crate) fn new(fun: F) -> Self { - Self { fun } - } -} - -impl Using for UsingFun -where - T: Send + 'static, - F: FnMut(usize) -> T, -{ - type Item = T; - - fn create(&mut self, thread_idx: usize) -> Self::Item { - (self.fun)(thread_idx) - } - - fn into_inner(mut self) -> Self::Item { - (self.fun)(0) - } -} From 43635ee5bf0d5a5bf7d398ae0aa5171651a077aa Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 11:53:10 +0200 Subject: [PATCH 181/264] wip fn sync issue --- examples/using_metrics.rs | 230 +++++++++++++++++++------------------- src/par_iter.rs | 34 +++++- 2 files changed, 147 insertions(+), 117 deletions(-) diff --git a/examples/using_metrics.rs b/examples/using_metrics.rs index d73a7d8..2f13d41 100644 --- a/examples/using_metrics.rs +++ b/examples/using_metrics.rs @@ -1,116 +1,114 @@ -// use orx_parallel::*; -// use std::cell::UnsafeCell; - -// const N: u64 = 10_000_000; -// const MAX_NUM_THREADS: usize = 8; - -// // just some work -// fn fibonacci(n: u64) -> u64 { -// let mut a = 0; -// let mut b = 1; -// for _ in 0..n { -// let c = a + b; -// a = b; -// b = c; -// } -// a -// } - -// #[derive(Default, Debug)] -// struct ThreadMetrics { -// thread_idx: usize, -// num_items_handled: usize, -// handled_42: bool, -// num_filtered_out: usize, -// } - -// struct ThreadMetricsWriter<'a> { -// metrics_ref: &'a mut ThreadMetrics, -// } - -// struct ComputationMetrics { -// thread_metrics: UnsafeCell<[ThreadMetrics; MAX_NUM_THREADS]>, -// } -// impl ComputationMetrics { -// fn new() -> Self { -// let mut thread_metrics: [ThreadMetrics; MAX_NUM_THREADS] = Default::default(); -// for i in 0..MAX_NUM_THREADS { -// thread_metrics[i].thread_idx = i; -// } -// Self { -// thread_metrics: UnsafeCell::new(thread_metrics), -// } -// } -// } - -// impl ComputationMetrics { -// unsafe fn create_for_thread<'a>(&mut self, thread_idx: usize) -> ThreadMetricsWriter<'a> { -// // SAFETY: here we create a mutable variable to the thread_idx-th metrics -// // * If we call this method multiple times with the same index, -// // we create multiple mutable references to the same ThreadMetrics, -// // which would lead to a race condition. -// // * We must make sure that `create_for_thread` is called only once per thread. -// // * If we use `create_for_thread` within the `using` call to create mutable values -// // used by the threads, we are certain that the parallel computation -// // will only call this method once per thread; hence, it will not -// // cause the race condition. -// // * On the other hand, we must ensure that we do not call this method -// // externally. -// let array = unsafe { &mut *self.thread_metrics.get() }; -// ThreadMetricsWriter { -// metrics_ref: &mut array[thread_idx], -// } -// } -// } - -// fn main() { -// let mut metrics = ComputationMetrics::new(); - -// let input: Vec = (0..N).collect(); - -// let sum = input -// .par() -// // SAFETY: we do not call `create_for_thread` externally; -// // it is safe if it is called only by the parallel computation. -// .using(|t| unsafe { metrics.create_for_thread(t) }) -// .map(|m: &mut ThreadMetricsWriter<'_>, i| { -// // collect some useful metrics -// m.metrics_ref.num_items_handled += 1; -// m.metrics_ref.handled_42 |= *i == 42; - -// // actual work -// fibonacci((*i % 50) + 1) % 100 -// }) -// .filter(|m, i| { -// let is_even = i % 2 == 0; - -// if !is_even { -// m.metrics_ref.num_filtered_out += 1; -// } - -// is_even -// }) -// .num_threads(MAX_NUM_THREADS) -// .sum(); - -// println!("\nINPUT-LEN = {N}"); -// println!("SUM = {sum}"); - -// println!("\n\n"); - -// println!("COLLECTED METRICS PER THREAD"); -// for metrics in metrics.thread_metrics.get_mut().iter() { -// println!("* {metrics:?}"); -// } -// let total_by_metrics: usize = metrics -// .thread_metrics -// .get_mut() -// .iter() -// .map(|x| x.num_items_handled) -// .sum(); -// println!("\n-> total num_items_handled by collected metrics: {total_by_metrics:?}\n"); - -// assert_eq!(N as usize, total_by_metrics); -// } - -fn main() {} +use orx_parallel::*; +use std::cell::UnsafeCell; + +const N: u64 = 10_000_000; +const MAX_NUM_THREADS: usize = 8; + +// just some work +fn fibonacci(n: u64) -> u64 { + let mut a = 0; + let mut b = 1; + for _ in 0..n { + let c = a + b; + a = b; + b = c; + } + a +} + +#[derive(Default, Debug)] +struct ThreadMetrics { + thread_idx: usize, + num_items_handled: usize, + handled_42: bool, + num_filtered_out: usize, +} + +struct ThreadMetricsWriter<'a> { + metrics_ref: &'a mut ThreadMetrics, +} + +struct ComputationMetrics { + thread_metrics: UnsafeCell<[ThreadMetrics; MAX_NUM_THREADS]>, +} +impl ComputationMetrics { + fn new() -> Self { + let mut thread_metrics: [ThreadMetrics; MAX_NUM_THREADS] = Default::default(); + for i in 0..MAX_NUM_THREADS { + thread_metrics[i].thread_idx = i; + } + Self { + thread_metrics: UnsafeCell::new(thread_metrics), + } + } +} + +impl ComputationMetrics { + unsafe fn create_for_thread<'a>(&mut self, thread_idx: usize) -> ThreadMetricsWriter<'a> { + // SAFETY: here we create a mutable variable to the thread_idx-th metrics + // * If we call this method multiple times with the same index, + // we create multiple mutable references to the same ThreadMetrics, + // which would lead to a race condition. + // * We must make sure that `create_for_thread` is called only once per thread. + // * If we use `create_for_thread` within the `using` call to create mutable values + // used by the threads, we are certain that the parallel computation + // will only call this method once per thread; hence, it will not + // cause the race condition. + // * On the other hand, we must ensure that we do not call this method + // externally. + let array = unsafe { &mut *self.thread_metrics.get() }; + ThreadMetricsWriter { + metrics_ref: &mut array[thread_idx], + } + } +} + +fn main() { + let mut metrics = ComputationMetrics::new(); + + let input: Vec = (0..N).collect(); + + let sum = input + .par() + // SAFETY: we do not call `create_for_thread` externally; + // it is safe if it is called only by the parallel computation. + .using(|t| unsafe { metrics.create_for_thread(t) }) + .map(|m: &mut ThreadMetricsWriter<'_>, i| { + // collect some useful metrics + m.metrics_ref.num_items_handled += 1; + m.metrics_ref.handled_42 |= *i == 42; + + // actual work + fibonacci((*i % 50) + 1) % 100 + }) + .filter(|m, i| { + let is_even = i % 2 == 0; + + if !is_even { + m.metrics_ref.num_filtered_out += 1; + } + + is_even + }) + .num_threads(MAX_NUM_THREADS) + .sum(); + + println!("\nINPUT-LEN = {N}"); + println!("SUM = {sum}"); + + println!("\n\n"); + + println!("COLLECTED METRICS PER THREAD"); + for metrics in metrics.thread_metrics.get_mut().iter() { + println!("* {metrics:?}"); + } + let total_by_metrics: usize = metrics + .thread_metrics + .get_mut() + .iter() + .map(|x| x.num_items_handled) + .sum(); + println!("\n-> total num_items_handled by collected metrics: {total_by_metrics:?}\n"); + + assert_eq!(N as usize, total_by_metrics); +} diff --git a/src/par_iter.rs b/src/par_iter.rs index f7e3d20..5ead958 100644 --- a/src/par_iter.rs +++ b/src/par_iter.rs @@ -437,7 +437,39 @@ where /// /// let input: Vec = (0..N).collect(); /// - + /// let sum = input + /// .par() + /// // SAFETY: we do not call `create_for_thread` externally; + /// // it is safe if it is called only by the parallel computation. + /// .using(|t| unsafe { metrics.create_for_thread(t) }) + /// .map(|m: &mut ThreadMetricsWriter<'_>, i| { + /// // collect some useful metrics + /// m.metrics_ref.num_items_handled += 1; + /// m.metrics_ref.handled_42 |= *i == 42; + /// + /// // actual work + /// fibonacci((*i % 20) + 1) % 100 + /// }) + /// .filter(|m, i| { + /// let is_even = i % 2 == 0; + /// + /// if !is_even { + /// m.metrics_ref.num_filtered_out += 1; + /// } + /// + /// is_even + /// }) + /// .num_threads(MAX_NUM_THREADS) + /// .sum(); + /// + /// let total_by_metrics: usize = metrics + /// .thread_metrics + /// .get_mut() + /// .iter() + /// .map(|x| x.num_items_handled) + /// .sum(); + /// + /// assert_eq!(N as usize, total_by_metrics); /// ``` /// fn using( From b42cedf676a728207457577ce3db4d61e915f547 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 12:01:39 +0200 Subject: [PATCH 182/264] fix tests --- examples/using_metrics.rs | 6 +++++- src/computational_variants/map.rs | 6 +++--- src/computational_variants/par.rs | 6 +++--- src/computational_variants/xap.rs | 6 +++--- src/par_iter.rs | 3 +++ 5 files changed, 17 insertions(+), 10 deletions(-) diff --git a/examples/using_metrics.rs b/examples/using_metrics.rs index 2f13d41..8601ea2 100644 --- a/examples/using_metrics.rs +++ b/examples/using_metrics.rs @@ -43,8 +43,10 @@ impl ComputationMetrics { } } +unsafe impl Sync for ComputationMetrics {} + impl ComputationMetrics { - unsafe fn create_for_thread<'a>(&mut self, thread_idx: usize) -> ThreadMetricsWriter<'a> { + unsafe fn create_for_thread<'a>(&self, thread_idx: usize) -> ThreadMetricsWriter<'a> { // SAFETY: here we create a mutable variable to the thread_idx-th metrics // * If we call this method multiple times with the same index, // we create multiple mutable references to the same ThreadMetrics, @@ -72,6 +74,8 @@ fn main() { .par() // SAFETY: we do not call `create_for_thread` externally; // it is safe if it is called only by the parallel computation. + // Since we unsafely implement Sync for ComputationMetrics, + // we must ensure that ComputationMetrics is not used elsewhere. .using(|t| unsafe { metrics.create_for_thread(t) }) .map(|m: &mut ThreadMetricsWriter<'_>, i| { // collect some useful metrics diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index 6c5a134..e1ac3af 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -1,5 +1,4 @@ use super::xap::ParXap; -use crate::ParIterResult; use crate::computational_variants::fallible_result::ParMapResult; use crate::generic_values::{Vector, WhilstAtom}; use crate::orch::{DefaultOrchestrator, Orchestrator}; @@ -7,6 +6,7 @@ use crate::par_iter_result::IntoResult; use crate::runner::parallel_runner_compute as prc; use crate::using::{UParMap, UsingClone, UsingFun}; use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, Params}; +use crate::{ParIterResult, ParIterUsing}; use orx_concurrent_iter::ConcurrentIter; /// A parallel iterator that maps inputs. @@ -101,7 +101,7 @@ where fn using( self, using: F, - ) -> impl crate::ParIterUsing, R, Item = >::Item> + ) -> impl ParIterUsing, R, Item = >::Item> where U: 'static, F: Fn(usize) -> U + Sync, @@ -115,7 +115,7 @@ where fn using_clone( self, value: U, - ) -> impl crate::ParIterUsing, R, Item = >::Item> + ) -> impl ParIterUsing, R, Item = >::Item> where U: Clone + 'static, { diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 19a2f17..bfa838f 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -8,7 +8,7 @@ use crate::using::{UPar, UsingClone, UsingFun}; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, Params, default_fns::map_self, }; -use crate::{IntoParIter, ParIterResult}; +use crate::{IntoParIter, ParIterResult, ParIterUsing}; use orx_concurrent_iter::chain::ChainKnownLenI; use orx_concurrent_iter::{ConcurrentIter, ExactSizeConcurrentIter}; @@ -96,7 +96,7 @@ where fn using( self, using: F, - ) -> impl crate::ParIterUsing, R, Item = >::Item> + ) -> impl ParIterUsing, R, Item = >::Item> where U: 'static, F: Fn(usize) -> U + Sync, @@ -109,7 +109,7 @@ where fn using_clone( self, value: U, - ) -> impl crate::ParIterUsing, R, Item = >::Item> + ) -> impl ParIterUsing, R, Item = >::Item> where U: Clone + 'static, { diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index cda0e4c..b811307 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -1,4 +1,3 @@ -use crate::ParIterResult; use crate::computational_variants::fallible_result::ParXapResult; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; @@ -7,6 +6,7 @@ use crate::par_iter_result::IntoResult; use crate::runner::parallel_runner_compute as prc; use crate::using::{UParXap, UsingClone, UsingFun}; use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, Params}; +use crate::{ParIterResult, ParIterUsing}; use orx_concurrent_iter::ConcurrentIter; /// A parallel iterator that xaps inputs. @@ -108,7 +108,7 @@ where fn using( self, using: F, - ) -> impl crate::ParIterUsing, R, Item = >::Item> + ) -> impl ParIterUsing, R, Item = >::Item> where U: 'static, F: Fn(usize) -> U + Sync, @@ -122,7 +122,7 @@ where fn using_clone( self, value: U, - ) -> impl crate::ParIterUsing, R, Item = >::Item> + ) -> impl ParIterUsing, R, Item = >::Item> where U: Clone + 'static, { diff --git a/src/par_iter.rs b/src/par_iter.rs index 5ead958..b9f3a96 100644 --- a/src/par_iter.rs +++ b/src/par_iter.rs @@ -401,6 +401,7 @@ where /// struct ComputationMetrics { /// thread_metrics: UnsafeCell<[ThreadMetrics; MAX_NUM_THREADS]>, /// } + /// unsafe impl Sync for ComputationMetrics {} /// impl ComputationMetrics { /// fn new() -> Self { /// let mut thread_metrics: [ThreadMetrics; MAX_NUM_THREADS] = Default::default(); @@ -441,6 +442,8 @@ where /// .par() /// // SAFETY: we do not call `create_for_thread` externally; /// // it is safe if it is called only by the parallel computation. + /// // Since we unsafely implement Sync for ComputationMetrics, + /// // we must ensure that ComputationMetrics is not used elsewhere. /// .using(|t| unsafe { metrics.create_for_thread(t) }) /// .map(|m: &mut ThreadMetricsWriter<'_>, i| { /// // collect some useful metrics From c16229b5c767a3a03350d968b1c329d99cb35d9a Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 12:07:52 +0200 Subject: [PATCH 183/264] fix thread index (0-based) in using computations --- src/orch/par_thread_pool.rs | 6 ++++-- src/par_iter.rs | 4 +++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/orch/par_thread_pool.rs b/src/orch/par_thread_pool.rs index 8016e76..15d539a 100644 --- a/src/orch/par_thread_pool.rs +++ b/src/orch/par_thread_pool.rs @@ -45,9 +45,10 @@ pub trait ParThreadPoolCompute: ParThreadPool { let bag = &thread_results; self.scoped_computation(|s| { while do_spawn(nt) { + let num_spawned = nt; nt.increment(); let work = move || { - bag.push(thread_map(nt)); + bag.push(thread_map(num_spawned)); }; Self::run_in_scope(&s, work); } @@ -68,8 +69,9 @@ pub trait ParThreadPoolCompute: ParThreadPool { let mut nt = NumSpawned::zero(); self.scoped_computation(|s| { while do_spawn(nt) { + let num_spawned = nt; nt.increment(); - let work = move || thread_do(nt); + let work = move || thread_do(num_spawned); Self::run_in_scope(&s, work); } }); diff --git a/src/par_iter.rs b/src/par_iter.rs index b9f3a96..3f0642c 100644 --- a/src/par_iter.rs +++ b/src/par_iter.rs @@ -415,7 +415,7 @@ where /// } /// /// impl ComputationMetrics { - /// unsafe fn create_for_thread<'a>(&mut self, thread_idx: usize) -> ThreadMetricsWriter<'a> { + /// unsafe fn create_for_thread<'a>(&self, thread_idx: usize) -> ThreadMetricsWriter<'a> { /// // SAFETY: here we create a mutable variable to the thread_idx-th metrics /// // * If we call this method multiple times with the same index, /// // we create multiple mutable references to the same ThreadMetrics, @@ -465,6 +465,8 @@ where /// .num_threads(MAX_NUM_THREADS) /// .sum(); /// + /// assert_eq!(sum, 9100); + /// /// let total_by_metrics: usize = metrics /// .thread_metrics /// .get_mut() From 88a68a082be3ba769c3a811a995eab43e6dc4bbc Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 14:10:20 +0200 Subject: [PATCH 184/264] dependency update, std to core --- Cargo.toml | 3 ++- src/computational_variants/fallible_option.rs | 2 +- src/computational_variants/fallible_result/map_result.rs | 2 +- src/computational_variants/fallible_result/par_result.rs | 2 +- src/computational_variants/fallible_result/xap_result.rs | 2 +- src/computational_variants/tests/mod.rs | 8 ++++---- src/default_fns.rs | 2 +- src/generic_iterator/reduce.rs | 2 +- src/generic_values/runner_results/collect_arbitrary.rs | 4 ++-- src/generic_values/runner_results/collect_ordered.rs | 4 ++-- src/generic_values/runner_results/fallibility.rs | 2 +- src/generic_values/runner_results/reduce.rs | 2 +- src/lib.rs | 2 +- src/orch/implementations/default_std_orchestrator.rs | 2 +- src/orch/implementations/rayon.rs | 2 +- src/orch/implementations/scoped_threadpool.rs | 2 +- src/parallel_drainable.rs | 2 +- src/parameters/chunk_size.rs | 2 +- src/parameters/num_threads.rs | 2 +- src/runner/fixed_chunk_runner/parallel_runner.rs | 2 +- src/special_type_sets/sum.rs | 2 +- tests/parallel_drainable.rs | 2 +- 22 files changed, 28 insertions(+), 27 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ea3dae6..7db88b5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,6 +43,7 @@ name = "find_iter_into_par" harness = false [features] -default = ["std", "scoped_threadpool", "rayon"] +default = ["std"] +# default = ["std", "scoped_threadpool", "rayon"] std = [] generic_iterator = ["rayon"] diff --git a/src/computational_variants/fallible_option.rs b/src/computational_variants/fallible_option.rs index 429ef33..c86e45c 100644 --- a/src/computational_variants/fallible_option.rs +++ b/src/computational_variants/fallible_option.rs @@ -3,7 +3,7 @@ use crate::{ orch::{DefaultOrchestrator, Orchestrator}, par_iter_option::{ParIterOption, ResultIntoOption}, }; -use std::marker::PhantomData; +use core::marker::PhantomData; /// A parallel iterator for which the computation either completely succeeds, /// or fails and **early exits** with None. diff --git a/src/computational_variants/fallible_result/map_result.rs b/src/computational_variants/fallible_result/map_result.rs index 3bbb440..71c4697 100644 --- a/src/computational_variants/fallible_result/map_result.rs +++ b/src/computational_variants/fallible_result/map_result.rs @@ -3,8 +3,8 @@ use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::runner::parallel_runner_compute as prc; use crate::{IterationOrder, ParCollectInto, ParIter}; +use core::marker::PhantomData; use orx_concurrent_iter::ConcurrentIter; -use std::marker::PhantomData; /// A parallel iterator for which the computation either completely succeeds, /// or fails and **early exits** with an error. diff --git a/src/computational_variants/fallible_result/par_result.rs b/src/computational_variants/fallible_result/par_result.rs index 0305af7..123bd72 100644 --- a/src/computational_variants/fallible_result/par_result.rs +++ b/src/computational_variants/fallible_result/par_result.rs @@ -3,8 +3,8 @@ use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::runner::parallel_runner_compute as prc; use crate::{IterationOrder, ParCollectInto, ParIter}; +use core::marker::PhantomData; use orx_concurrent_iter::ConcurrentIter; -use std::marker::PhantomData; /// A parallel iterator for which the computation either completely succeeds, /// or fails and **early exits** with an error. diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index 919fd7f..ccda24c 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -5,8 +5,8 @@ use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::runner::parallel_runner_compute as prc; use crate::{IterationOrder, ParCollectInto, Params}; +use core::marker::PhantomData; use orx_concurrent_iter::ConcurrentIter; -use std::marker::PhantomData; /// A parallel iterator for which the computation either completely succeeds, /// or fails and **early exits** with an error. diff --git a/src/computational_variants/tests/mod.rs b/src/computational_variants/tests/mod.rs index 54f1bfc..ad37051 100644 --- a/src/computational_variants/tests/mod.rs +++ b/src/computational_variants/tests/mod.rs @@ -3,12 +3,12 @@ mod count; mod flatten; mod for_each; mod inspect; -mod iter_consuming; -mod iter_ref; +// mod iter_consuming; +// mod iter_ref; mod map; mod min_max; -mod range; -mod slice; +// mod range; +// mod slice; mod sum; mod vectors; mod xap; diff --git a/src/default_fns.rs b/src/default_fns.rs index daa9d11..523904d 100644 --- a/src/default_fns.rs +++ b/src/default_fns.rs @@ -1,4 +1,4 @@ -use std::ops::Add; +use core::ops::Add; #[inline(always)] pub fn map_self(input: T) -> T { diff --git a/src/generic_iterator/reduce.rs b/src/generic_iterator/reduce.rs index cfd27e8..73809ce 100644 --- a/src/generic_iterator/reduce.rs +++ b/src/generic_iterator/reduce.rs @@ -159,7 +159,7 @@ where /// [`sum`]: crate::ParIter::sum pub fn sum(self) -> T where - T: crate::special_type_sets::Sum + std::iter::Sum, + T: crate::special_type_sets::Sum + core::iter::Sum, { match self { GenericIterator::Sequential(x) => x.sum(), diff --git a/src/generic_values/runner_results/collect_arbitrary.rs b/src/generic_values/runner_results/collect_arbitrary.rs index 3ff1edd..ea6cf6b 100644 --- a/src/generic_values/runner_results/collect_arbitrary.rs +++ b/src/generic_values/runner_results/collect_arbitrary.rs @@ -26,7 +26,7 @@ impl ThreadCollectArbitrary { } impl core::fmt::Debug for ThreadCollectArbitrary { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { Self::AllCollected => write!(f, "AllCollected"), Self::StoppedByWhileCondition => write!(f, "StoppedByWhileCondition"), @@ -54,7 +54,7 @@ where V: Values, P: IntoConcurrentPinnedVec, { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { Self::AllOrUntilWhileCollected { pinned_vec } => f .debug_struct("AllCollected") diff --git a/src/generic_values/runner_results/collect_ordered.rs b/src/generic_values/runner_results/collect_ordered.rs index 6b3e671..2d67750 100644 --- a/src/generic_values/runner_results/collect_ordered.rs +++ b/src/generic_values/runner_results/collect_ordered.rs @@ -29,7 +29,7 @@ where } impl Debug for ThreadCollect { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { Self::AllCollected { vec } => f .debug_struct("AllCollected") @@ -76,7 +76,7 @@ where V: Values, P: IntoConcurrentPinnedVec, { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { Self::AllCollected { pinned_vec } => f .debug_struct("AllCollected") diff --git a/src/generic_values/runner_results/fallibility.rs b/src/generic_values/runner_results/fallibility.rs index 71cd353..6a02580 100644 --- a/src/generic_values/runner_results/fallibility.rs +++ b/src/generic_values/runner_results/fallibility.rs @@ -5,7 +5,7 @@ use crate::generic_values::{ }, }; use alloc::vec::Vec; -use std::marker::PhantomData; +use core::marker::PhantomData; pub trait Fallibility: Sized { type Error: Send; diff --git a/src/generic_values/runner_results/reduce.rs b/src/generic_values/runner_results/reduce.rs index ffd5730..9704230 100644 --- a/src/generic_values/runner_results/reduce.rs +++ b/src/generic_values/runner_results/reduce.rs @@ -23,7 +23,7 @@ impl Reduce { } impl core::fmt::Debug for Reduce { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { Self::Done { acc: _ } => f.debug_struct("Done").finish(), Self::StoppedByWhileCondition { acc: _ } => { diff --git a/src/lib.rs b/src/lib.rs index 9e5f7f3..f9cfed1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,7 +10,7 @@ clippy::missing_panics_doc, clippy::todo )] -// #![no_std] +#![no_std] extern crate alloc; diff --git a/src/orch/implementations/default_std_orchestrator.rs b/src/orch/implementations/default_std_orchestrator.rs index 1f43f5b..fd27b53 100644 --- a/src/orch/implementations/default_std_orchestrator.rs +++ b/src/orch/implementations/default_std_orchestrator.rs @@ -1,6 +1,6 @@ use crate::{DefaultRunner, orch::Orchestrator}; use crate::{env::MAX_NUM_THREADS_ENV_VARIABLE, orch::ParThreadPool}; -use std::num::NonZeroUsize; +use core::num::NonZeroUsize; // POOL diff --git a/src/orch/implementations/rayon.rs b/src/orch/implementations/rayon.rs index 99f43f8..e0c406d 100644 --- a/src/orch/implementations/rayon.rs +++ b/src/orch/implementations/rayon.rs @@ -2,9 +2,9 @@ use crate::{ DefaultRunner, ParallelRunner, orch::{Orchestrator, ParThreadPool}, }; +use core::{marker::PhantomData, num::NonZeroUsize}; use orx_self_or::SoR; use rayon::ThreadPool; -use std::{marker::PhantomData, num::NonZeroUsize}; // POOL diff --git a/src/orch/implementations/scoped_threadpool.rs b/src/orch/implementations/scoped_threadpool.rs index 27311d4..50cba62 100644 --- a/src/orch/implementations/scoped_threadpool.rs +++ b/src/orch/implementations/scoped_threadpool.rs @@ -2,9 +2,9 @@ use crate::{ DefaultRunner, ParallelRunner, orch::{Orchestrator, ParThreadPool}, }; +use core::{marker::PhantomData, num::NonZeroUsize}; use orx_self_or::SoM; use scoped_threadpool::Pool; -use std::{marker::PhantomData, num::NonZeroUsize}; // POOL diff --git a/src/parallel_drainable.rs b/src/parallel_drainable.rs index 1f0ffe5..70f8f1d 100644 --- a/src/parallel_drainable.rs +++ b/src/parallel_drainable.rs @@ -1,6 +1,6 @@ use crate::{Params, computational_variants::Par, orch::DefaultOrchestrator}; +use core::ops::RangeBounds; use orx_concurrent_iter::ConcurrentDrainableOverSlice; -use std::ops::RangeBounds; /// A type which can create a parallel draining iterator over any of its sub-slices. /// diff --git a/src/parameters/chunk_size.rs b/src/parameters/chunk_size.rs index 66b4d4a..49fdd06 100644 --- a/src/parameters/chunk_size.rs +++ b/src/parameters/chunk_size.rs @@ -1,4 +1,4 @@ -use std::num::NonZeroUsize; +use core::num::NonZeroUsize; /// `ChunkSize` represents the batch size of elements each thread will pull from the main iterator once it becomes idle again. /// It is possible to define a minimum or exact chunk size. diff --git a/src/parameters/num_threads.rs b/src/parameters/num_threads.rs index d620088..4716d2d 100644 --- a/src/parameters/num_threads.rs +++ b/src/parameters/num_threads.rs @@ -1,4 +1,4 @@ -use std::num::NonZeroUsize; +use core::num::NonZeroUsize; /// `NumThreads` represents the degree of parallelization. It is possible to define an upper bound on the number of threads to be used for the parallel computation. /// When set to **1**, the computation will be executed sequentially without any overhead. diff --git a/src/runner/fixed_chunk_runner/parallel_runner.rs b/src/runner/fixed_chunk_runner/parallel_runner.rs index 7cb5879..99b835e 100644 --- a/src/runner/fixed_chunk_runner/parallel_runner.rs +++ b/src/runner/fixed_chunk_runner/parallel_runner.rs @@ -7,8 +7,8 @@ use crate::{ parameters::Params, runner::{computation_kind::ComputationKind, parallel_runner::ParallelRunner}, }; +use core::sync::atomic::{AtomicUsize, Ordering}; use orx_concurrent_iter::ConcurrentIter; -use std::sync::atomic::{AtomicUsize, Ordering}; const LAG_PERIODICITY: usize = 4; diff --git a/src/special_type_sets/sum.rs b/src/special_type_sets/sum.rs index 73fbd63..6a62752 100644 --- a/src/special_type_sets/sum.rs +++ b/src/special_type_sets/sum.rs @@ -1,4 +1,4 @@ -use std::ops::Add; +use core::ops::Add; /// Number that can be summed over. pub trait Sum { diff --git a/tests/parallel_drainable.rs b/tests/parallel_drainable.rs index 2112ed6..c9e1014 100644 --- a/tests/parallel_drainable.rs +++ b/tests/parallel_drainable.rs @@ -1,5 +1,5 @@ +use core::ops::Range; use orx_parallel::*; -use std::ops::Range; use test_case::test_matrix; #[derive(Clone, Debug)] From e808cbd3087b050cc20a64bb431886b08dea824b Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 14:11:18 +0200 Subject: [PATCH 185/264] upgrade pinned vec dependencies --- Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7db88b5..24fcfea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,8 +13,8 @@ categories = ["concurrency", "algorithms"] [dependencies] orx-pseudo-default = { version = "2.1.0", default-features = false } orx-pinned-vec = { version = "3.17.0", default-features = false } -orx-fixed-vec = { version = "3.18.0", default-features = false } -orx-split-vec = { version = "3.18.0", default-features = false } +orx-fixed-vec = { version = "3.19.0", default-features = false } +orx-split-vec = { version = "3.19.0", default-features = false } orx-pinned-concurrent-col = { version = "2.14.0", default-features = false } orx-concurrent-bag = { version = "3.0.0", default-features = false } orx-concurrent-ordered-bag = { version = "3.0.0", default-features = false } From e0b955b1d5c562c1957f33282650d53c95956fe8 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 14:12:21 +0200 Subject: [PATCH 186/264] re-enabled parallelization tests on pinned vectors --- tests/trait_bounds.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/trait_bounds.rs b/tests/trait_bounds.rs index e1d0caf..5ac87c4 100644 --- a/tests/trait_bounds.rs +++ b/tests/trait_bounds.rs @@ -13,8 +13,8 @@ fn trait_bounds_parallelizable() { fun(&vec![1, 2, 3]); fun(&VecDeque::::new()); fun(0..9); - // fun(&FixedVec::::new(3)); - // fun(&SplitVec::::new()); + fun(&FixedVec::::new(3)); + fun(&SplitVec::::new()); } #[test] @@ -26,8 +26,8 @@ fn trait_bounds_parallelizable_collection() { fun(vec![1, 2, 3]); fun(VecDeque::::new()); - // fun(FixedVec::::new(3)); - // fun(SplitVec::::new()); + fun(FixedVec::::new(3)); + fun(SplitVec::::new()); } #[test] @@ -40,14 +40,14 @@ fn trait_bounds_into_par_iter() { // owned fun(vec![1, 2, 3]); fun(VecDeque::::new()); - // fun(FixedVec::::new(3)); - // fun(SplitVec::::new()); + fun(FixedVec::::new(3)); + fun(SplitVec::::new()); // ref fun(vec![1, 2, 3].as_slice()); fun(&vec![1, 2, 3]); fun(&VecDeque::::new()); fun(0..9); - // fun(&FixedVec::::new(3)); - // fun(&SplitVec::::new()); + fun(&FixedVec::::new(3)); + fun(&SplitVec::::new()); } From 3a26e1008381a007e1139846b80366595081cc04 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 14:18:32 +0200 Subject: [PATCH 187/264] upgrade criterion dep --- Cargo.toml | 4 ++-- benches/chain3_collect_map.rs | 3 ++- benches/chain4_collect_map.rs | 3 ++- benches/chain_collect_map.rs | 3 ++- benches/collect_filter.rs | 3 ++- benches/collect_filtermap.rs | 3 ++- benches/collect_flatmap.rs | 3 ++- benches/collect_iter_into_par.rs | 3 ++- benches/collect_long_chain.rs | 3 ++- benches/collect_map.rs | 3 ++- benches/collect_map_filter.rs | 3 ++- benches/collect_map_filter_hash_set.rs | 3 ++- benches/count_filtermap.rs | 3 ++- benches/count_flatmap.rs | 3 ++- benches/count_map.rs | 3 ++- benches/count_map_filter.rs | 3 ++- benches/drain_vec_collect_map_filter.rs | 3 ++- benches/find.rs | 3 ++- benches/find_any.rs | 3 ++- benches/find_flatmap.rs | 3 ++- benches/find_iter_into_par.rs | 3 ++- benches/find_map_filter.rs | 3 ++- benches/mut_for_each_slice.rs | 3 ++- benches/reduce.rs | 3 ++- benches/reduce_iter_into_par.rs | 3 ++- benches/reduce_long_chain.rs | 3 ++- benches/reduce_map.rs | 3 ++- benches/reduce_map_filter.rs | 3 ++- benches/result_collect_map.rs | 3 ++- benches/result_reduce_map.rs | 3 ++- benches/sum.rs | 3 ++- benches/sum_filtermap.rs | 3 ++- benches/sum_flatmap.rs | 3 ++- benches/sum_map_filter.rs | 3 ++- benches/vec_deque_collect_map_filter.rs | 3 ++- benches/vec_deque_collect_map_filter_owned.rs | 3 ++- 36 files changed, 72 insertions(+), 37 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 24fcfea..9e7a67b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,9 +30,9 @@ orx-self-or = { version = "1.2.0" } [dev-dependencies] chrono = "0.4.39" clap = { version = "4.5.36", features = ["derive"] } -criterion = "0.5.1" +criterion = "0.7.0" orx-concurrent-option = { version = "1.5.0", default-features = false } -orx-concurrent-vec = "3.6.0" +orx-concurrent-vec = "3.8.0" rand = "0.9" rand_chacha = "0.9" rayon = "1.10.0" diff --git a/benches/chain3_collect_map.rs b/benches/chain3_collect_map.rs index 2d1acf2..1c46573 100644 --- a/benches/chain3_collect_map.rs +++ b/benches/chain3_collect_map.rs @@ -1,9 +1,10 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use orx_split_vec::SplitVec; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/chain4_collect_map.rs b/benches/chain4_collect_map.rs index 6cdce5e..af4bb5c 100644 --- a/benches/chain4_collect_map.rs +++ b/benches/chain4_collect_map.rs @@ -1,9 +1,10 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use orx_split_vec::SplitVec; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/chain_collect_map.rs b/benches/chain_collect_map.rs index 2e7d9d9..b119604 100644 --- a/benches/chain_collect_map.rs +++ b/benches/chain_collect_map.rs @@ -1,9 +1,10 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use orx_split_vec::SplitVec; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/collect_filter.rs b/benches/collect_filter.rs index 28ad18e..6f76d03 100644 --- a/benches/collect_filter.rs +++ b/benches/collect_filter.rs @@ -1,9 +1,10 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use orx_split_vec::SplitVec; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/collect_filtermap.rs b/benches/collect_filtermap.rs index 2d5620c..7bacf98 100644 --- a/benches/collect_filtermap.rs +++ b/benches/collect_filtermap.rs @@ -1,9 +1,10 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use orx_split_vec::SplitVec; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/collect_flatmap.rs b/benches/collect_flatmap.rs index b741073..4a38b5e 100644 --- a/benches/collect_flatmap.rs +++ b/benches/collect_flatmap.rs @@ -1,9 +1,10 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use orx_split_vec::SplitVec; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/collect_iter_into_par.rs b/benches/collect_iter_into_par.rs index 9c86f9a..b47498c 100644 --- a/benches/collect_iter_into_par.rs +++ b/benches/collect_iter_into_par.rs @@ -1,9 +1,10 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use orx_split_vec::SplitVec; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::ParallelBridge; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/collect_long_chain.rs b/benches/collect_long_chain.rs index 0b50311..7607fc6 100644 --- a/benches/collect_long_chain.rs +++ b/benches/collect_long_chain.rs @@ -1,9 +1,10 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use orx_split_vec::SplitVec; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const SEED: u64 = 5426; const FIB_UPPER_BOUND: u32 = 29; diff --git a/benches/collect_map.rs b/benches/collect_map.rs index 26ca089..a54a89e 100644 --- a/benches/collect_map.rs +++ b/benches/collect_map.rs @@ -1,9 +1,10 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use orx_split_vec::SplitVec; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/collect_map_filter.rs b/benches/collect_map_filter.rs index d8b908e..7f8d4c1 100644 --- a/benches/collect_map_filter.rs +++ b/benches/collect_map_filter.rs @@ -1,9 +1,10 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use orx_split_vec::SplitVec; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/collect_map_filter_hash_set.rs b/benches/collect_map_filter_hash_set.rs index 4f30c36..5e3472f 100644 --- a/benches/collect_map_filter_hash_set.rs +++ b/benches/collect_map_filter_hash_set.rs @@ -1,9 +1,10 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; use std::collections::HashSet; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/count_filtermap.rs b/benches/count_filtermap.rs index 17d6e94..a73e4d9 100644 --- a/benches/count_filtermap.rs +++ b/benches/count_filtermap.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/count_flatmap.rs b/benches/count_flatmap.rs index 32a66eb..8a54fb3 100644 --- a/benches/count_flatmap.rs +++ b/benches/count_flatmap.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = true; diff --git a/benches/count_map.rs b/benches/count_map.rs index 8181386..78d2da4 100644 --- a/benches/count_map.rs +++ b/benches/count_map.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/count_map_filter.rs b/benches/count_map_filter.rs index 28607ce..6e00a37 100644 --- a/benches/count_map_filter.rs +++ b/benches/count_map_filter.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = true; diff --git a/benches/drain_vec_collect_map_filter.rs b/benches/drain_vec_collect_map_filter.rs index 4570325..d47ff31 100644 --- a/benches/drain_vec_collect_map_filter.rs +++ b/benches/drain_vec_collect_map_filter.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use orx_split_vec::SplitVec; use rand::prelude::*; use rand_chacha::ChaCha8Rng; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/find.rs b/benches/find.rs index 03097b7..53d87c1 100644 --- a/benches/find.rs +++ b/benches/find.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/find_any.rs b/benches/find_any.rs index 03e0dd2..16773b2 100644 --- a/benches/find_any.rs +++ b/benches/find_any.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/find_flatmap.rs b/benches/find_flatmap.rs index a556f59..8923ab2 100644 --- a/benches/find_flatmap.rs +++ b/benches/find_flatmap.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/find_iter_into_par.rs b/benches/find_iter_into_par.rs index 221efde..e5d43f8 100644 --- a/benches/find_iter_into_par.rs +++ b/benches/find_iter_into_par.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::ParallelBridge; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/find_map_filter.rs b/benches/find_map_filter.rs index 892b34e..1af9888 100644 --- a/benches/find_map_filter.rs +++ b/benches/find_map_filter.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/mut_for_each_slice.rs b/benches/mut_for_each_slice.rs index c151d7b..30e8549 100644 --- a/benches/mut_for_each_slice.rs +++ b/benches/mut_for_each_slice.rs @@ -1,4 +1,5 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; +use std::hint::black_box; #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] struct Data { diff --git a/benches/reduce.rs b/benches/reduce.rs index 281bd8a..d3e8ae5 100644 --- a/benches/reduce.rs +++ b/benches/reduce.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/reduce_iter_into_par.rs b/benches/reduce_iter_into_par.rs index 7ec709a..7d00d3a 100644 --- a/benches/reduce_iter_into_par.rs +++ b/benches/reduce_iter_into_par.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::ParallelBridge; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/reduce_long_chain.rs b/benches/reduce_long_chain.rs index fa23968..2c8415b 100644 --- a/benches/reduce_long_chain.rs +++ b/benches/reduce_long_chain.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const SEED: u64 = 5426; const FIB_UPPER_BOUND: u32 = 29; diff --git a/benches/reduce_map.rs b/benches/reduce_map.rs index e66312d..9eff300 100644 --- a/benches/reduce_map.rs +++ b/benches/reduce_map.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/reduce_map_filter.rs b/benches/reduce_map_filter.rs index 532d693..287a716 100644 --- a/benches/reduce_map_filter.rs +++ b/benches/reduce_map_filter.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/result_collect_map.rs b/benches/result_collect_map.rs index 040d0ce..833c6eb 100644 --- a/benches/result_collect_map.rs +++ b/benches/result_collect_map.rs @@ -1,6 +1,7 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use rand::prelude::*; use rand_chacha::ChaCha8Rng; +use std::hint::black_box; use std::num::ParseIntError; type ERR = ParseIntError; diff --git a/benches/result_reduce_map.rs b/benches/result_reduce_map.rs index 6a1c771..6dd82e9 100644 --- a/benches/result_reduce_map.rs +++ b/benches/result_reduce_map.rs @@ -1,7 +1,8 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_concurrent_option::{ConcurrentOption, IntoOption}; use rand::prelude::*; use rand_chacha::ChaCha8Rng; +use std::hint::black_box; use std::num::ParseIntError; type ERR = ParseIntError; diff --git a/benches/sum.rs b/benches/sum.rs index 0ed6fb3..db45575 100644 --- a/benches/sum.rs +++ b/benches/sum.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const SEED: u64 = 9562; const FIB_UPPER_BOUND: u32 = 201; diff --git a/benches/sum_filtermap.rs b/benches/sum_filtermap.rs index ec77fd3..f4fb35e 100644 --- a/benches/sum_filtermap.rs +++ b/benches/sum_filtermap.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/sum_flatmap.rs b/benches/sum_flatmap.rs index 469abca..0e3f747 100644 --- a/benches/sum_flatmap.rs +++ b/benches/sum_flatmap.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/sum_map_filter.rs b/benches/sum_map_filter.rs index 1d2cf7a..cccda74 100644 --- a/benches/sum_map_filter.rs +++ b/benches/sum_map_filter.rs @@ -1,8 +1,9 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; +use std::hint::black_box; const SEED: u64 = 5426; const FIB_UPPER_BOUND: u32 = 201; diff --git a/benches/vec_deque_collect_map_filter.rs b/benches/vec_deque_collect_map_filter.rs index accb874..9ff9b51 100644 --- a/benches/vec_deque_collect_map_filter.rs +++ b/benches/vec_deque_collect_map_filter.rs @@ -1,10 +1,11 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use orx_split_vec::SplitVec; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; use std::collections::VecDeque; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; diff --git a/benches/vec_deque_collect_map_filter_owned.rs b/benches/vec_deque_collect_map_filter_owned.rs index 1423533..42e496c 100644 --- a/benches/vec_deque_collect_map_filter_owned.rs +++ b/benches/vec_deque_collect_map_filter_owned.rs @@ -1,10 +1,11 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use orx_parallel::*; use orx_split_vec::SplitVec; use rand::prelude::*; use rand_chacha::ChaCha8Rng; use rayon::iter::IntoParallelIterator; use std::collections::VecDeque; +use std::hint::black_box; const TEST_LARGE_OUTPUT: bool = false; From 14ba446fd525a6ce4f1b96c3ed76b4009c37b934 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 14:23:50 +0200 Subject: [PATCH 188/264] upgrade dependencies --- Cargo.toml | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9e7a67b..7f7ce0c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,31 +11,30 @@ keywords = ["parallel", "concurrency", "performance", "thread", "iterator"] categories = ["concurrency", "algorithms"] [dependencies] -orx-pseudo-default = { version = "2.1.0", default-features = false } orx-pinned-vec = { version = "3.17.0", default-features = false } orx-fixed-vec = { version = "3.19.0", default-features = false } orx-split-vec = { version = "3.19.0", default-features = false } -orx-pinned-concurrent-col = { version = "2.14.0", default-features = false } -orx-concurrent-bag = { version = "3.0.0", default-features = false } -orx-concurrent-ordered-bag = { version = "3.0.0", default-features = false } +orx-concurrent-iter = { version = "3.1.0", default-features = false } +orx-concurrent-bag = { version = "3.1.0", default-features = false } +orx-concurrent-ordered-bag = { version = "3.1.0", default-features = false } orx-iterable = { version = "1.3.0", default-features = false } +orx-pinned-concurrent-col = { version = "2.15.0", default-features = false } orx-priority-queue = { version = "1.7.0", default-features = false } -orx-concurrent-iter = { version = "3.1.0", default-features = false } -rayon = { version = "1.10.0", optional = true } +orx-pseudo-default = { version = "2.1.0", default-features = false } +orx-self-or = { version = "1.2.0" } # optional thread pool dependencies -threadpool = { version = "1.8.1", optional = true } +rayon = { version = "1.11.0", optional = true } scoped_threadpool = { version = "0.1.9", optional = true } -orx-self-or = { version = "1.2.0" } [dev-dependencies] -chrono = "0.4.39" -clap = { version = "4.5.36", features = ["derive"] } +chrono = "0.4.42" +clap = { version = "4.5.47", features = ["derive"] } criterion = "0.7.0" orx-concurrent-option = { version = "1.5.0", default-features = false } orx-concurrent-vec = "3.8.0" -rand = "0.9" +rand = "0.9.2" rand_chacha = "0.9" -rayon = "1.10.0" +rayon = "1.11.0" test-case = "3.3.1" [[bench]] From fd692fb3b24734c2e02cc6c643c6658215388463 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 14:27:47 +0200 Subject: [PATCH 189/264] clean up --- src/runner/fixed_chunk_runner/num_threads.rs | 2 +- .../fixed_chunk_runner/parallel_runner.rs | 12 ++++++------ src/runner/mod.rs | 1 - src/runner/parallel_runner.rs | 8 +++++++- src/runner/parallel_runner_compute/compute.rs | 17 ----------------- src/runner/parallel_runner_compute/mod.rs | 4 ---- 6 files changed, 14 insertions(+), 30 deletions(-) delete mode 100644 src/runner/parallel_runner_compute/compute.rs diff --git a/src/runner/fixed_chunk_runner/num_threads.rs b/src/runner/fixed_chunk_runner/num_threads.rs index dcfd45f..d90f4cd 100644 --- a/src/runner/fixed_chunk_runner/num_threads.rs +++ b/src/runner/fixed_chunk_runner/num_threads.rs @@ -2,7 +2,7 @@ use crate::{env::MAX_NUM_THREADS_ENV_VARIABLE, parameters::NumThreads}; const MAX_UNSET_NUM_THREADS: usize = 8; -pub fn maximum_num_threads(input_len: Option, num_threads: NumThreads) -> usize { +pub fn maximum_num_threads2(input_len: Option, num_threads: NumThreads) -> usize { let max_num_threads = max_num_threads_by_env_variable().unwrap_or(usize::MAX); match num_threads { NumThreads::Auto => from_auto_num_threads(input_len), diff --git a/src/runner/fixed_chunk_runner/parallel_runner.rs b/src/runner/fixed_chunk_runner/parallel_runner.rs index 99b835e..3a42b13 100644 --- a/src/runner/fixed_chunk_runner/parallel_runner.rs +++ b/src/runner/fixed_chunk_runner/parallel_runner.rs @@ -1,11 +1,11 @@ -use super::{ - chunk_size::ResolvedChunkSize, num_threads::maximum_num_threads, - thread_runner::FixedChunkThreadRunner, -}; +use super::{chunk_size::ResolvedChunkSize, thread_runner::FixedChunkThreadRunner}; use crate::{ orch::NumSpawned, parameters::Params, - runner::{computation_kind::ComputationKind, parallel_runner::ParallelRunner}, + runner::{ + computation_kind::ComputationKind, fixed_chunk_runner::num_threads::maximum_num_threads2, + parallel_runner::ParallelRunner, + }, }; use core::sync::atomic::{AtomicUsize, Ordering}; use orx_concurrent_iter::ConcurrentIter; @@ -79,7 +79,7 @@ impl ParallelRunner for FixedChunkRunner { type ThreadRunner = FixedChunkThreadRunner; fn new(kind: ComputationKind, params: Params, initial_len: Option) -> Self { - let max_num_threads = maximum_num_threads(initial_len, params.num_threads); + let max_num_threads = maximum_num_threads2(initial_len, params.num_threads); let resolved_chunk_size = ResolvedChunkSize::new(kind, initial_len, max_num_threads, params.chunk_size); diff --git a/src/runner/mod.rs b/src/runner/mod.rs index 0d7b8fe..5097764 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -7,7 +7,6 @@ mod thread_runner_compute; pub use computation_kind::ComputationKind; pub use parallel_runner::ParallelRunner; -pub(crate) use parallel_runner_compute::ParallelRunnerCompute; pub use thread_runner::ThreadRunner; /// Default parallel runner. diff --git a/src/runner/parallel_runner.rs b/src/runner/parallel_runner.rs index 982462d..81caced 100644 --- a/src/runner/parallel_runner.rs +++ b/src/runner/parallel_runner.rs @@ -1,5 +1,6 @@ use super::{computation_kind::ComputationKind, thread_runner::ThreadRunner}; use crate::{orch::NumSpawned, parameters::Params}; +use core::num::NonZeroUsize; use orx_concurrent_iter::ConcurrentIter; /// A parallel runner which is responsible for taking a computation defined as a composition @@ -14,7 +15,12 @@ pub trait ParallelRunner: Sized + Sync + 'static { /// Creates a new parallel runner for the given computation `kind`, parallelization `params` /// and `initial_input_len`. - fn new(kind: ComputationKind, params: Params, initial_input_len: Option) -> Self; + fn new( + kind: ComputationKind, + params: Params, + initial_input_len: Option, + // max_num_threads: NonZeroUsize, + ) -> Self; /// Creates an initial shared state. fn new_shared_state(&self) -> Self::SharedState; diff --git a/src/runner/parallel_runner_compute/compute.rs b/src/runner/parallel_runner_compute/compute.rs deleted file mode 100644 index 6591b85..0000000 --- a/src/runner/parallel_runner_compute/compute.rs +++ /dev/null @@ -1,17 +0,0 @@ -use crate::{ParallelRunner, Params, runner::ComputationKind}; - -pub trait ParallelRunnerCompute: ParallelRunner { - fn collection(params: Params, len: Option) -> Self { - Self::new(ComputationKind::Collect, params, len) - } - - fn early_return(params: Params, len: Option) -> Self { - Self::new(ComputationKind::EarlyReturn, params, len) - } - - fn reduce(params: Params, len: Option) -> Self { - Self::new(ComputationKind::Reduce, params, len) - } -} - -impl ParallelRunnerCompute for X {} diff --git a/src/runner/parallel_runner_compute/mod.rs b/src/runner/parallel_runner_compute/mod.rs index 7516313..6ccd3f7 100644 --- a/src/runner/parallel_runner_compute/mod.rs +++ b/src/runner/parallel_runner_compute/mod.rs @@ -3,7 +3,3 @@ pub(crate) mod collect_ordered; pub(crate) mod next; pub(crate) mod next_any; pub(crate) mod reduce; - -mod compute; - -pub use compute::ParallelRunnerCompute; From 813547fcd35ec3ca51d9a3d1b4df9486053aea24 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 14:43:13 +0200 Subject: [PATCH 190/264] remove std dependency --- src/env.rs | 19 +++++- .../default_std_orchestrator.rs | 18 ++---- src/orch/orchestrator.rs | 14 +++-- src/runner/fixed_chunk_runner/chunk_size.rs | 14 +++-- src/runner/fixed_chunk_runner/mod.rs | 1 - src/runner/fixed_chunk_runner/num_threads.rs | 58 ------------------- .../fixed_chunk_runner/parallel_runner.rs | 20 ++++--- src/runner/parallel_runner.rs | 2 +- 8 files changed, 54 insertions(+), 92 deletions(-) delete mode 100644 src/runner/fixed_chunk_runner/num_threads.rs diff --git a/src/env.rs b/src/env.rs index f9cb08d..02fb12a 100644 --- a/src/env.rs +++ b/src/env.rs @@ -1 +1,18 @@ -pub const MAX_NUM_THREADS_ENV_VARIABLE: &str = "ORX_PARALLEL_MAX_NUM_THREADS"; +use core::num::NonZeroUsize; + +const MAX_NUM_THREADS_ENV_VARIABLE: &str = "ORX_PARALLEL_MAX_NUM_THREADS"; + +pub fn max_num_threads_by_env_variable() -> Option { + #[cfg(feature = "std")] + match std::env::var(MAX_NUM_THREADS_ENV_VARIABLE) { + Ok(s) => match s.parse::() { + Ok(0) => None, // consistent with .num_threads(0) representing no bound + Ok(x) => Some(NonZeroUsize::new(x).expect("x>0")), // set to a positive bound + Err(_e) => None, // not a number, ignored assuming no bound + }, + Err(_e) => None, // not set, no bound + } + + #[cfg(not(feature = "std"))] + None +} diff --git a/src/orch/implementations/default_std_orchestrator.rs b/src/orch/implementations/default_std_orchestrator.rs index fd27b53..ef73590 100644 --- a/src/orch/implementations/default_std_orchestrator.rs +++ b/src/orch/implementations/default_std_orchestrator.rs @@ -1,10 +1,10 @@ +use crate::orch::ParThreadPool; use crate::{DefaultRunner, orch::Orchestrator}; -use crate::{env::MAX_NUM_THREADS_ENV_VARIABLE, orch::ParThreadPool}; use core::num::NonZeroUsize; // POOL -const MAX_UNSET_NUM_THREADS: usize = 8; +const MAX_UNSET_NUM_THREADS: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(8) }; pub struct StdDefaultPool { max_num_threads: NonZeroUsize, @@ -12,17 +12,9 @@ pub struct StdDefaultPool { impl Default for StdDefaultPool { fn default() -> Self { - let env_max_num_threads = match std::env::var(MAX_NUM_THREADS_ENV_VARIABLE) { - Ok(s) => match s.parse::() { - Ok(0) => None, // consistent with .num_threads(0) representing no bound - Ok(x) => Some(x), // set to a positive bound - Err(_e) => None, // not a number, ignored assuming no bound - }, - Err(_e) => None, // not set, no bound - }; + let env_max_num_threads = crate::env::max_num_threads_by_env_variable(); - let ava_max_num_threads: Option = - std::thread::available_parallelism().map(|x| x.into()).ok(); + let ava_max_num_threads = std::thread::available_parallelism().ok(); let max_num_threads = match (env_max_num_threads, ava_max_num_threads) { (Some(env), Some(ava)) => env.min(ava), @@ -31,8 +23,6 @@ impl Default for StdDefaultPool { (None, None) => MAX_UNSET_NUM_THREADS, }; - let max_num_threads = NonZeroUsize::new(max_num_threads.max(1)).expect(">=1"); - Self { max_num_threads } } } diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 95a74a1..a46a234 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -14,11 +14,13 @@ pub trait Orchestrator { type ThreadPool: ParThreadPool; fn new_runner( + &self, kind: ComputationKind, params: Params, initial_input_len: Option, ) -> Self::Runner { - ::new(kind, params, initial_input_len) + let max_num_threads = self.max_num_threads_for_computation(params, initial_input_len); + ::new(kind, params, initial_input_len, max_num_threads) } fn thread_pool(&self) -> &Self::ThreadPool; @@ -38,7 +40,7 @@ pub trait Orchestrator { I: ConcurrentIter, F: Fn(NumSpawned, &I, &SharedStateOf, ThreadRunnerOf) + Sync, { - let runner = Self::new_runner(kind, params, iter.try_get_len()); + let runner = self.new_runner(kind, params, iter.try_get_len()); let state = runner.new_shared_state(); let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); let work = |num_spawned| { @@ -63,7 +65,7 @@ pub trait Orchestrator { F::Error: Send, { let iter_len = iter.try_get_len(); - let runner = Self::new_runner(kind, params, iter_len); + let runner = self.new_runner(kind, params, iter_len); let state = runner.new_shared_state(); let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); let work = |nt| thread_map(nt, &iter, &state, runner.new_thread_runner(&state)); @@ -93,7 +95,9 @@ pub trait Orchestrator { params: Params, iter_len: Option, ) -> NonZeroUsize { - let ava = self.thread_pool().max_num_threads(); + let pool = self.thread_pool().max_num_threads(); + + let env = crate::env::max_num_threads_by_env_variable().unwrap_or(NonZeroUsize::MAX); let req = match (iter_len, params.num_threads) { (Some(len), NumThreads::Auto) => NonZeroUsize::new(len.max(1)).expect(">0"), @@ -102,7 +106,7 @@ pub trait Orchestrator { (None, NumThreads::Max(nt)) => nt, }; - req.min(ava) + req.min(pool.min(env)) } } diff --git a/src/runner/fixed_chunk_runner/chunk_size.rs b/src/runner/fixed_chunk_runner/chunk_size.rs index 58e942f..4659980 100644 --- a/src/runner/fixed_chunk_runner/chunk_size.rs +++ b/src/runner/fixed_chunk_runner/chunk_size.rs @@ -1,4 +1,5 @@ use crate::{parameters::ChunkSize, runner::computation_kind::ComputationKind}; +use core::num::NonZeroUsize; const MAX_CHUNK_SIZE: usize = 1 << 20; const DESIRED_MIN_CHUNK_SIZE: usize = 64; @@ -13,7 +14,7 @@ impl ResolvedChunkSize { pub fn new( kind: ComputationKind, initial_len: Option, - max_num_threads: usize, + max_num_threads: NonZeroUsize, chunk_size: ChunkSize, ) -> Self { match chunk_size { @@ -42,7 +43,7 @@ const fn min_required_len(kind: ComputationKind, one_round_len: usize) -> usize fn auto_chunk_size( kind: ComputationKind, initial_len: Option, - max_num_threads: usize, + max_num_threads: NonZeroUsize, ) -> usize { fn find_chunk_size(kind: ComputationKind, input_len: usize, num_threads: usize) -> usize { let mut chunk_size = MAX_CHUNK_SIZE; @@ -74,11 +75,16 @@ fn auto_chunk_size( match initial_len { None => 1, // TODO: is this a good choice? Some(0) => 1, - Some(input_len) => find_chunk_size(kind, input_len, max_num_threads), + Some(input_len) => find_chunk_size(kind, input_len, max_num_threads.into()), } } -fn min_chunk_size(initial_len: Option, max_num_threads: usize, chunk_size: usize) -> usize { +fn min_chunk_size( + initial_len: Option, + max_num_threads: NonZeroUsize, + chunk_size: usize, +) -> usize { + let max_num_threads: usize = max_num_threads.into(); match initial_len { None => chunk_size, Some(0) => 1, diff --git a/src/runner/fixed_chunk_runner/mod.rs b/src/runner/fixed_chunk_runner/mod.rs index 00f26a2..794137d 100644 --- a/src/runner/fixed_chunk_runner/mod.rs +++ b/src/runner/fixed_chunk_runner/mod.rs @@ -1,5 +1,4 @@ mod chunk_size; -mod num_threads; mod parallel_runner; mod thread_runner; diff --git a/src/runner/fixed_chunk_runner/num_threads.rs b/src/runner/fixed_chunk_runner/num_threads.rs deleted file mode 100644 index d90f4cd..0000000 --- a/src/runner/fixed_chunk_runner/num_threads.rs +++ /dev/null @@ -1,58 +0,0 @@ -use crate::{env::MAX_NUM_THREADS_ENV_VARIABLE, parameters::NumThreads}; - -const MAX_UNSET_NUM_THREADS: usize = 8; - -pub fn maximum_num_threads2(input_len: Option, num_threads: NumThreads) -> usize { - let max_num_threads = max_num_threads_by_env_variable().unwrap_or(usize::MAX); - match num_threads { - NumThreads::Auto => from_auto_num_threads(input_len), - NumThreads::Max(x) => from_max_num_threads(input_len, x.into()), - } - .max(1) - .min(max_num_threads) -} - -fn from_auto_num_threads(input_len: Option) -> usize { - match std::thread::available_parallelism() { - Err(e) => { - debug_assert!( - false, - "Failed to get maximum available parallelism (std::thread::available_parallelism()): {e}", - ); - input_len - .unwrap_or(MAX_UNSET_NUM_THREADS) - .min(MAX_UNSET_NUM_THREADS) - } - Ok(available_threads) => input_len - .unwrap_or(MAX_UNSET_NUM_THREADS) - .min(available_threads.into()), - } -} - -fn from_max_num_threads(input_len: Option, max_num_threads: usize) -> usize { - // TODO: need to get the number of free threads? - match std::thread::available_parallelism() { - Err(e) => { - debug_assert!( - false, - "Failed to get maximum available parallelism (std::thread::available_parallelism()); falling back to sequential execution.: {e}", - ); - input_len.unwrap_or(max_num_threads).min(max_num_threads) - } - Ok(available_threads) => input_len - .unwrap_or(usize::MAX) - .min(max_num_threads) - .min(available_threads.into()), - } -} - -fn max_num_threads_by_env_variable() -> Option { - match std::env::var(MAX_NUM_THREADS_ENV_VARIABLE) { - Ok(s) => match s.parse::() { - Ok(0) => None, // consistent with .num_threads(0) representing no bound - Ok(x) => Some(x), // set to a positive bound - Err(_e) => None, // not a number, ignored assuming no bound - }, - Err(_e) => None, // not set, no bound - } -} diff --git a/src/runner/fixed_chunk_runner/parallel_runner.rs b/src/runner/fixed_chunk_runner/parallel_runner.rs index 3a42b13..63f4026 100644 --- a/src/runner/fixed_chunk_runner/parallel_runner.rs +++ b/src/runner/fixed_chunk_runner/parallel_runner.rs @@ -2,12 +2,12 @@ use super::{chunk_size::ResolvedChunkSize, thread_runner::FixedChunkThreadRunner use crate::{ orch::NumSpawned, parameters::Params, - runner::{ - computation_kind::ComputationKind, fixed_chunk_runner::num_threads::maximum_num_threads2, - parallel_runner::ParallelRunner, - }, + runner::{computation_kind::ComputationKind, parallel_runner::ParallelRunner}, +}; +use core::{ + num::NonZeroUsize, + sync::atomic::{AtomicUsize, Ordering}, }; -use core::sync::atomic::{AtomicUsize, Ordering}; use orx_concurrent_iter::ConcurrentIter; const LAG_PERIODICITY: usize = 4; @@ -78,15 +78,19 @@ impl ParallelRunner for FixedChunkRunner { type ThreadRunner = FixedChunkThreadRunner; - fn new(kind: ComputationKind, params: Params, initial_len: Option) -> Self { - let max_num_threads = maximum_num_threads2(initial_len, params.num_threads); + fn new( + kind: ComputationKind, + params: Params, + initial_len: Option, + max_num_threads: NonZeroUsize, + ) -> Self { let resolved_chunk_size = ResolvedChunkSize::new(kind, initial_len, max_num_threads, params.chunk_size); Self { initial_len, resolved_chunk_size, - max_num_threads, + max_num_threads: max_num_threads.into(), current_chunk_size: resolved_chunk_size.chunk_size().into(), } } diff --git a/src/runner/parallel_runner.rs b/src/runner/parallel_runner.rs index 81caced..7782ad3 100644 --- a/src/runner/parallel_runner.rs +++ b/src/runner/parallel_runner.rs @@ -19,7 +19,7 @@ pub trait ParallelRunner: Sized + Sync + 'static { kind: ComputationKind, params: Params, initial_input_len: Option, - // max_num_threads: NonZeroUsize, + max_num_threads: NonZeroUsize, ) -> Self; /// Creates an initial shared state. From 3291955529af0aec1d9d1de8a0e5ac0108209b6d Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 14:44:21 +0200 Subject: [PATCH 191/264] no feature dependencies fixed --- src/generic_iterator/collect.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/generic_iterator/collect.rs b/src/generic_iterator/collect.rs index 92d641b..a74bfd3 100644 --- a/src/generic_iterator/collect.rs +++ b/src/generic_iterator/collect.rs @@ -1,5 +1,6 @@ use super::iter::GenericIterator; use crate::ParIter; +use alloc::vec::Vec; impl GenericIterator where From 8bfd366e8477565ded961c77018554ebcc089f93 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 14:45:35 +0200 Subject: [PATCH 192/264] upgrade criterion dep --- tests/mut_iter.rs | 2 +- tests/whilst/collect.rs | 2 +- tests/whilst/collect_arbitrary.rs | 2 +- tests/whilst/find.rs | 2 +- tests/whilst/reduce.rs | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/mut_iter.rs b/tests/mut_iter.rs index 50e5a3a..f945a9b 100644 --- a/tests/mut_iter.rs +++ b/tests/mut_iter.rs @@ -1,6 +1,6 @@ -use criterion::black_box; use orx_parallel::*; use std::collections::HashMap; +use std::hint::black_box; use test_case::test_matrix; #[cfg(miri)] diff --git a/tests/whilst/collect.rs b/tests/whilst/collect.rs index d1d93bd..03858c6 100644 --- a/tests/whilst/collect.rs +++ b/tests/whilst/collect.rs @@ -1,5 +1,5 @@ use crate::fibonacci; -use criterion::black_box; +use std::hint::black_box; use orx_parallel::*; use test_case::test_case; diff --git a/tests/whilst/collect_arbitrary.rs b/tests/whilst/collect_arbitrary.rs index 47c0e33..2bebc53 100644 --- a/tests/whilst/collect_arbitrary.rs +++ b/tests/whilst/collect_arbitrary.rs @@ -1,6 +1,6 @@ use crate::fibonacci; -use criterion::black_box; use orx_parallel::*; +use std::hint::black_box; use test_case::test_case; #[test_case(512, 4, 0, 3, "22", 220)] diff --git a/tests/whilst/find.rs b/tests/whilst/find.rs index 366de81..06ddbe7 100644 --- a/tests/whilst/find.rs +++ b/tests/whilst/find.rs @@ -1,5 +1,5 @@ use crate::fibonacci; -use criterion::black_box; +use std::hint::black_box; use orx_parallel::*; use test_case::test_case; diff --git a/tests/whilst/reduce.rs b/tests/whilst/reduce.rs index 6a2a339..2f46f8d 100644 --- a/tests/whilst/reduce.rs +++ b/tests/whilst/reduce.rs @@ -1,5 +1,5 @@ use crate::fibonacci; -use criterion::black_box; +use std::hint::black_box; use orx_parallel::*; use test_case::test_case; From f9406a98e1660e5015a30dfedcb7733b60291106 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 14:47:29 +0200 Subject: [PATCH 193/264] fix tests: checkpoint -> no-std compiles --- src/computational_variants/tests/iter_consuming.rs | 1 + src/computational_variants/tests/iter_ref.rs | 2 ++ src/computational_variants/tests/mod.rs | 8 ++++---- src/computational_variants/tests/range.rs | 1 + src/computational_variants/tests/slice.rs | 2 ++ 5 files changed, 10 insertions(+), 4 deletions(-) diff --git a/src/computational_variants/tests/iter_consuming.rs b/src/computational_variants/tests/iter_consuming.rs index 7bee27d..0f15424 100644 --- a/src/computational_variants/tests/iter_consuming.rs +++ b/src/computational_variants/tests/iter_consuming.rs @@ -1,4 +1,5 @@ use crate::{test_utils::*, *}; +use alloc::format; use alloc::string::{String, ToString}; use alloc::vec; use alloc::vec::Vec; diff --git a/src/computational_variants/tests/iter_ref.rs b/src/computational_variants/tests/iter_ref.rs index f8ce262..f17bf7a 100644 --- a/src/computational_variants/tests/iter_ref.rs +++ b/src/computational_variants/tests/iter_ref.rs @@ -1,5 +1,7 @@ use crate::{collect_into::ParCollectIntoCore, test_utils::*, *}; +use alloc::format; use alloc::string::{String, ToString}; +use alloc::vec; use alloc::vec::Vec; use orx_fixed_vec::FixedVec; use orx_iterable::{Collection, IntoCloningIterable}; diff --git a/src/computational_variants/tests/mod.rs b/src/computational_variants/tests/mod.rs index ad37051..54f1bfc 100644 --- a/src/computational_variants/tests/mod.rs +++ b/src/computational_variants/tests/mod.rs @@ -3,12 +3,12 @@ mod count; mod flatten; mod for_each; mod inspect; -// mod iter_consuming; -// mod iter_ref; +mod iter_consuming; +mod iter_ref; mod map; mod min_max; -// mod range; -// mod slice; +mod range; +mod slice; mod sum; mod vectors; mod xap; diff --git a/src/computational_variants/tests/range.rs b/src/computational_variants/tests/range.rs index 2e7a4d9..4d7bf50 100644 --- a/src/computational_variants/tests/range.rs +++ b/src/computational_variants/tests/range.rs @@ -1,5 +1,6 @@ use crate::{test_utils::*, *}; use alloc::string::{String, ToString}; +use alloc::vec; use alloc::vec::Vec; use orx_fixed_vec::FixedVec; use orx_iterable::Iterable; diff --git a/src/computational_variants/tests/slice.rs b/src/computational_variants/tests/slice.rs index 9443408..991dc19 100644 --- a/src/computational_variants/tests/slice.rs +++ b/src/computational_variants/tests/slice.rs @@ -1,5 +1,7 @@ use crate::{collect_into::ParCollectIntoCore, test_utils::*, *}; +use alloc::format; use alloc::string::{String, ToString}; +use alloc::vec; use alloc::vec::Vec; use orx_fixed_vec::FixedVec; use orx_iterable::Collection; From 10da62ae21ce3d78e2e964d3aad5146b99d7ba06 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 14:58:32 +0200 Subject: [PATCH 194/264] renaming --- Cargo.toml | 7 +++++-- src/collect_into/vec.rs | 15 -------------- src/computational_variants/tests/flatten.rs | 2 +- src/lib.rs | 2 +- .../default_std_orchestrator.rs | 4 ++-- src/orch/implementations/rayon.rs | 12 +++++------ src/orch/implementations/scoped_threadpool.rs | 12 +++++------ src/orch/orchestrator.rs | 20 ++++++++++++------- src/orch/par_thread_pool.rs | 19 ++++++++++++++++++ .../chunk_size.rs | 0 src/runner/fixed_chunk_executor/mod.rs | 5 +++++ .../parallel_executor.rs} | 12 +++++------ .../thread_executor.rs} | 6 +++--- src/runner/fixed_chunk_runner/mod.rs | 5 ----- src/runner/mod.rs | 19 +++++++----------- ...arallel_runner.rs => parallel_executor.rs} | 18 ++++++++--------- .../{thread_runner.rs => thread_executor.rs} | 10 +++++----- .../collect_arbitrary.rs | 6 +++--- .../thread_runner_compute/collect_ordered.rs | 6 +++--- src/runner/thread_runner_compute/next.rs | 6 +++--- src/runner/thread_runner_compute/next_any.rs | 6 +++--- src/runner/thread_runner_compute/reduce.rs | 6 +++--- .../collect_arbitrary.rs | 6 +++--- .../thread_runner_compute/collect_ordered.rs | 6 +++--- .../runner/thread_runner_compute/next.rs | 6 +++--- .../runner/thread_runner_compute/next_any.rs | 6 +++--- .../runner/thread_runner_compute/reduce.rs | 6 +++--- 27 files changed, 118 insertions(+), 110 deletions(-) rename src/runner/{fixed_chunk_runner => fixed_chunk_executor}/chunk_size.rs (100%) create mode 100644 src/runner/fixed_chunk_executor/mod.rs rename src/runner/{fixed_chunk_runner/parallel_runner.rs => fixed_chunk_executor/parallel_executor.rs} (90%) rename src/runner/{fixed_chunk_runner/thread_runner.rs => fixed_chunk_executor/thread_executor.rs} (76%) delete mode 100644 src/runner/fixed_chunk_runner/mod.rs rename src/runner/{parallel_runner.rs => parallel_executor.rs} (57%) rename src/runner/{thread_runner.rs => thread_executor.rs} (79%) diff --git a/Cargo.toml b/Cargo.toml index 7f7ce0c..9a8973b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,8 +41,11 @@ test-case = "3.3.1" name = "find_iter_into_par" harness = false +[package.metadata."docs.rs"] +all-features = true + [features] -default = ["std"] -# default = ["std", "scoped_threadpool", "rayon"] +# default = ["std"] +default = ["std", "scoped_threadpool", "rayon", "generic_iterator"] std = [] generic_iterator = ["rayon"] diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index 3f5af17..41b26ed 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -95,18 +95,3 @@ where self.len() } } - -// #[cfg(test)] -// mod tsts { -// use crate::*; -// use alloc::vec::Vec; -// use orx_split_vec::SplitVec; - -// #[test] -// fn abc() { -// fn take>(c: C) {} - -// take(SplitVec::new()); -// take(Vec::new()); -// } -// } diff --git a/src/computational_variants/tests/flatten.rs b/src/computational_variants/tests/flatten.rs index e2aa555..a2db7df 100644 --- a/src/computational_variants/tests/flatten.rs +++ b/src/computational_variants/tests/flatten.rs @@ -73,7 +73,7 @@ fn flatten_xap_filter_xap(n: &[usize], nt: &[usize], chunk: &[usize]) { }; let filter = |x: &Vec| x.len() == 2; let map = |mut x: Vec| { - x.push("abc".to_string()); + x.push("lorem".to_string()); x }; let expected: Vec<_> = input() diff --git a/src/lib.rs b/src/lib.rs index f9cfed1..858fa78 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -67,6 +67,6 @@ pub use parallelizable::Parallelizable; pub use parallelizable_collection::ParallelizableCollection; pub use parallelizable_collection_mut::ParallelizableCollectionMut; pub use parameters::{ChunkSize, IterationOrder, NumThreads, Params}; -pub use runner::{DefaultRunner, ParallelRunner, ThreadRunner}; +pub use runner::{DefaultExecutor, ParallelExecutor, ThreadExecutor}; pub use special_type_sets::Sum; pub use using::ParIterUsing; diff --git a/src/orch/implementations/default_std_orchestrator.rs b/src/orch/implementations/default_std_orchestrator.rs index ef73590..829b792 100644 --- a/src/orch/implementations/default_std_orchestrator.rs +++ b/src/orch/implementations/default_std_orchestrator.rs @@ -1,5 +1,5 @@ use crate::orch::ParThreadPool; -use crate::{DefaultRunner, orch::Orchestrator}; +use crate::{DefaultExecutor, orch::Orchestrator}; use core::num::NonZeroUsize; // POOL @@ -62,7 +62,7 @@ impl ParThreadPool for StdDefaultPool { pub struct DefaultStdOrchestrator(StdDefaultPool); impl Orchestrator for DefaultStdOrchestrator { - type Runner = DefaultRunner; + type Runner = DefaultExecutor; type ThreadPool = StdDefaultPool; diff --git a/src/orch/implementations/rayon.rs b/src/orch/implementations/rayon.rs index e0c406d..38b86d2 100644 --- a/src/orch/implementations/rayon.rs +++ b/src/orch/implementations/rayon.rs @@ -1,5 +1,5 @@ use crate::{ - DefaultRunner, ParallelRunner, + DefaultExecutor, ParallelExecutor, orch::{Orchestrator, ParThreadPool}, }; use core::{marker::PhantomData, num::NonZeroUsize}; @@ -68,9 +68,9 @@ impl<'a> ParThreadPool for &'a rayon::ThreadPool { // ORCH -pub struct RayonOrchestrator +pub struct RayonOrchestrator where - R: ParallelRunner, + R: ParallelExecutor, P: SoR + ParThreadPool, { pool: P, @@ -79,7 +79,7 @@ where impl From for RayonOrchestrator where - R: ParallelRunner, + R: ParallelExecutor, { fn from(pool: ThreadPool) -> Self { Self { @@ -91,7 +91,7 @@ where impl<'a, R> From<&'a ThreadPool> for RayonOrchestrator<&'a ThreadPool, R> where - R: ParallelRunner, + R: ParallelExecutor, { fn from(pool: &'a ThreadPool) -> Self { Self { @@ -103,7 +103,7 @@ where impl Orchestrator for RayonOrchestrator where - R: ParallelRunner, + R: ParallelExecutor, P: SoR + ParThreadPool, { type Runner = R; diff --git a/src/orch/implementations/scoped_threadpool.rs b/src/orch/implementations/scoped_threadpool.rs index 50cba62..95e6d16 100644 --- a/src/orch/implementations/scoped_threadpool.rs +++ b/src/orch/implementations/scoped_threadpool.rs @@ -1,5 +1,5 @@ use crate::{ - DefaultRunner, ParallelRunner, + DefaultExecutor, ParallelExecutor, orch::{Orchestrator, ParThreadPool}, }; use core::{marker::PhantomData, num::NonZeroUsize}; @@ -68,9 +68,9 @@ impl<'a> ParThreadPool for &'a mut Pool { // ORCH -pub struct ScopedThreadPoolOrchestrator +pub struct ScopedThreadPoolOrchestrator where - R: ParallelRunner, + R: ParallelExecutor, P: SoM + ParThreadPool, { pool: P, @@ -79,7 +79,7 @@ where impl From for ScopedThreadPoolOrchestrator where - R: ParallelRunner, + R: ParallelExecutor, { fn from(pool: Pool) -> Self { Self { @@ -91,7 +91,7 @@ where impl<'a, R> From<&'a mut Pool> for ScopedThreadPoolOrchestrator<&'a mut Pool, R> where - R: ParallelRunner, + R: ParallelExecutor, { fn from(pool: &'a mut Pool) -> Self { Self { @@ -103,7 +103,7 @@ where impl Orchestrator for ScopedThreadPoolOrchestrator where - R: ParallelRunner, + R: ParallelExecutor, P: SoM + ParThreadPool, { type Runner = R; diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index a46a234..eb6e0e4 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -1,5 +1,5 @@ use crate::{ - NumThreads, ParallelRunner, Params, + NumThreads, ParallelExecutor, Params, generic_values::runner_results::{Fallibility, Infallible, Never}, orch::{NumSpawned, ParThreadPool, ParThreadPoolCompute}, runner::ComputationKind, @@ -9,7 +9,7 @@ use core::num::NonZeroUsize; use orx_concurrent_iter::ConcurrentIter; pub trait Orchestrator { - type Runner: ParallelRunner; + type Runner: ParallelExecutor; type ThreadPool: ParThreadPool; @@ -20,7 +20,7 @@ pub trait Orchestrator { initial_input_len: Option, ) -> Self::Runner { let max_num_threads = self.max_num_threads_for_computation(params, initial_input_len); - ::new(kind, params, initial_input_len, max_num_threads) + ::new(kind, params, initial_input_len, max_num_threads) } fn thread_pool(&self) -> &Self::ThreadPool; @@ -44,7 +44,12 @@ pub trait Orchestrator { let state = runner.new_shared_state(); let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); let work = |num_spawned| { - thread_do(num_spawned, &iter, &state, runner.new_thread_runner(&state)); + thread_do( + num_spawned, + &iter, + &state, + runner.new_thread_executor(&state), + ); }; self.thread_pool_mut().run_in_pool(do_spawn, work) } @@ -68,7 +73,7 @@ pub trait Orchestrator { let runner = self.new_runner(kind, params, iter_len); let state = runner.new_shared_state(); let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); - let work = |nt| thread_map(nt, &iter, &state, runner.new_thread_runner(&state)); + let work = |nt| thread_map(nt, &iter, &state, runner.new_thread_executor(&state)); let max_num_threads = self.max_num_threads_for_computation(params, iter_len); self.thread_pool_mut() .map_in_pool::(do_spawn, work, max_num_threads) @@ -110,8 +115,9 @@ pub trait Orchestrator { } } -pub(crate) type SharedStateOf = <::Runner as ParallelRunner>::SharedState; -pub(crate) type ThreadRunnerOf = <::Runner as ParallelRunner>::ThreadRunner; +pub(crate) type SharedStateOf = <::Runner as ParallelExecutor>::SharedState; +pub(crate) type ThreadRunnerOf = + <::Runner as ParallelExecutor>::ThreadExecutor; // auto impl for &mut pool diff --git a/src/orch/par_thread_pool.rs b/src/orch/par_thread_pool.rs index 15d539a..842a0f0 100644 --- a/src/orch/par_thread_pool.rs +++ b/src/orch/par_thread_pool.rs @@ -3,6 +3,12 @@ use alloc::vec::Vec; use core::num::NonZeroUsize; use orx_concurrent_bag::ConcurrentBag; +/// A thread pool that can be used for parallel computation. +/// +/// # Examples +/// +/// ``` +/// ``` pub trait ParThreadPool { type ScopeRef<'s, 'env, 'scope> where @@ -80,3 +86,16 @@ pub trait ParThreadPoolCompute: ParThreadPool { } impl ParThreadPoolCompute for X {} + +#[cfg(test)] +mod tsts { + use crate::*; + + #[test] + fn abc() { + let pool = rayon::ThreadPoolBuilder::new() + .num_threads(4) + .build() + .unwrap(); + } +} diff --git a/src/runner/fixed_chunk_runner/chunk_size.rs b/src/runner/fixed_chunk_executor/chunk_size.rs similarity index 100% rename from src/runner/fixed_chunk_runner/chunk_size.rs rename to src/runner/fixed_chunk_executor/chunk_size.rs diff --git a/src/runner/fixed_chunk_executor/mod.rs b/src/runner/fixed_chunk_executor/mod.rs new file mode 100644 index 0000000..093860c --- /dev/null +++ b/src/runner/fixed_chunk_executor/mod.rs @@ -0,0 +1,5 @@ +mod chunk_size; +mod parallel_executor; +mod thread_executor; + +pub use parallel_executor::FixedChunkRunner; diff --git a/src/runner/fixed_chunk_runner/parallel_runner.rs b/src/runner/fixed_chunk_executor/parallel_executor.rs similarity index 90% rename from src/runner/fixed_chunk_runner/parallel_runner.rs rename to src/runner/fixed_chunk_executor/parallel_executor.rs index 63f4026..ec68b16 100644 --- a/src/runner/fixed_chunk_runner/parallel_runner.rs +++ b/src/runner/fixed_chunk_executor/parallel_executor.rs @@ -1,8 +1,8 @@ -use super::{chunk_size::ResolvedChunkSize, thread_runner::FixedChunkThreadRunner}; +use super::{chunk_size::ResolvedChunkSize, thread_executor::FixedChunkThreadExecutor}; use crate::{ orch::NumSpawned, parameters::Params, - runner::{computation_kind::ComputationKind, parallel_runner::ParallelRunner}, + runner::{computation_kind::ComputationKind, parallel_executor::ParallelExecutor}, }; use core::{ num::NonZeroUsize, @@ -73,10 +73,10 @@ impl FixedChunkRunner { } } -impl ParallelRunner for FixedChunkRunner { +impl ParallelExecutor for FixedChunkRunner { type SharedState = (); - type ThreadRunner = FixedChunkThreadRunner; + type ThreadExecutor = FixedChunkThreadExecutor; fn new( kind: ComputationKind, @@ -112,8 +112,8 @@ impl ParallelRunner for FixedChunkRunner { self.spawn_new(num_spawned, iter.try_get_len()) } - fn new_thread_runner(&self, _: &Self::SharedState) -> Self::ThreadRunner { - Self::ThreadRunner { + fn new_thread_executor(&self, _: &Self::SharedState) -> Self::ThreadExecutor { + Self::ThreadExecutor { chunk_size: self.current_chunk_size.load(Ordering::Relaxed), } } diff --git a/src/runner/fixed_chunk_runner/thread_runner.rs b/src/runner/fixed_chunk_executor/thread_executor.rs similarity index 76% rename from src/runner/fixed_chunk_runner/thread_runner.rs rename to src/runner/fixed_chunk_executor/thread_executor.rs index 55ea907..65f45f6 100644 --- a/src/runner/fixed_chunk_runner/thread_runner.rs +++ b/src/runner/fixed_chunk_executor/thread_executor.rs @@ -1,11 +1,11 @@ -use crate::runner::thread_runner::ThreadRunner; +use crate::runner::thread_executor::ThreadExecutor; use orx_concurrent_iter::ConcurrentIter; -pub struct FixedChunkThreadRunner { +pub struct FixedChunkThreadExecutor { pub(super) chunk_size: usize, } -impl ThreadRunner for FixedChunkThreadRunner { +impl ThreadExecutor for FixedChunkThreadExecutor { type SharedState = (); #[inline(always)] diff --git a/src/runner/fixed_chunk_runner/mod.rs b/src/runner/fixed_chunk_runner/mod.rs deleted file mode 100644 index 794137d..0000000 --- a/src/runner/fixed_chunk_runner/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -mod chunk_size; -mod parallel_runner; -mod thread_runner; - -pub use parallel_runner::FixedChunkRunner; diff --git a/src/runner/mod.rs b/src/runner/mod.rs index 5097764..2cc077a 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -1,18 +1,13 @@ mod computation_kind; -mod fixed_chunk_runner; -mod parallel_runner; +mod fixed_chunk_executor; +mod parallel_executor; pub(crate) mod parallel_runner_compute; -mod thread_runner; +mod thread_executor; mod thread_runner_compute; pub use computation_kind::ComputationKind; -pub use parallel_runner::ParallelRunner; -pub use thread_runner::ThreadRunner; +pub use parallel_executor::ParallelExecutor; +pub use thread_executor::ThreadExecutor; -/// Default parallel runner. -/// -/// Unless explicitly set to another parallel runner by [`with_runner`] method, -/// parallel computations will be executed using the default parallel runner. -/// -/// [`with_runner`]: crate::ParIter::with_runner -pub type DefaultRunner = fixed_chunk_runner::FixedChunkRunner; +/// Default parallel executor. +pub type DefaultExecutor = fixed_chunk_executor::FixedChunkRunner; diff --git a/src/runner/parallel_runner.rs b/src/runner/parallel_executor.rs similarity index 57% rename from src/runner/parallel_runner.rs rename to src/runner/parallel_executor.rs index 7782ad3..8eddc9b 100644 --- a/src/runner/parallel_runner.rs +++ b/src/runner/parallel_executor.rs @@ -1,19 +1,19 @@ -use super::{computation_kind::ComputationKind, thread_runner::ThreadRunner}; +use super::{computation_kind::ComputationKind, thread_executor::ThreadExecutor}; use crate::{orch::NumSpawned, parameters::Params}; use core::num::NonZeroUsize; use orx_concurrent_iter::ConcurrentIter; -/// A parallel runner which is responsible for taking a computation defined as a composition +/// A parallel executor which is responsible for taking a computation defined as a composition /// of iterator methods, spawns threads, shares tasks and returns the result of the parallel /// execution. -pub trait ParallelRunner: Sized + Sync + 'static { - /// Data shared to the thread runners. +pub trait ParallelExecutor: Sized + Sync + 'static { + /// Data shared to the thread executors. type SharedState: Send + Sync; - /// Thread runner that is responsible for executing the tasks allocated to a thread. - type ThreadRunner: ThreadRunner; + /// Thread executor that is responsible for executing the tasks allocated to a thread. + type ThreadExecutor: ThreadExecutor; - /// Creates a new parallel runner for the given computation `kind`, parallelization `params` + /// Creates a new parallel executor for the given computation `kind`, parallelization `params` /// and `initial_input_len`. fn new( kind: ComputationKind, @@ -38,7 +38,7 @@ pub trait ParallelRunner: Sized + Sync + 'static { where I: ConcurrentIter; - /// Creates a new thread runner provided that the current parallel execution state is + /// Creates a new thread executor provided that the current parallel execution state is /// `shared_state`. - fn new_thread_runner(&self, shared_state: &Self::SharedState) -> Self::ThreadRunner; + fn new_thread_executor(&self, shared_state: &Self::SharedState) -> Self::ThreadExecutor; } diff --git a/src/runner/thread_runner.rs b/src/runner/thread_executor.rs similarity index 79% rename from src/runner/thread_runner.rs rename to src/runner/thread_executor.rs index 4a50f6a..dd96e07 100644 --- a/src/runner/thread_runner.rs +++ b/src/runner/thread_executor.rs @@ -1,8 +1,8 @@ use orx_concurrent_iter::ConcurrentIter; -/// Thread runner responsible for executing the tasks assigned to the thread by the -/// parallel runner. -pub trait ThreadRunner: Sized { +/// Thread executor responsible for executing the tasks assigned to the thread by the +/// parallel executor. +pub trait ThreadExecutor: Sized { /// Type of the shared state among threads. type SharedState; @@ -17,11 +17,11 @@ pub trait ThreadRunner: Sized { /// Hook that will be called after completing the chunk of the given `chunk_size`. /// The `shared_state` is also provided so that it can be updated to send information to the - /// parallel runner and other thread runners. + /// parallel executor and other thread executors. fn complete_chunk(&mut self, shared_state: &Self::SharedState, chunk_size: usize); /// Hook that will be called after completing the task. /// The `shared_state` is also provided so that it can be updated to send information to the - /// parallel runner and other thread runners. + /// parallel executor and other thread executors. fn complete_task(&mut self, shared_state: &Self::SharedState); } diff --git a/src/runner/thread_runner_compute/collect_arbitrary.rs b/src/runner/thread_runner_compute/collect_arbitrary.rs index 2d7ffde..0b632e3 100644 --- a/src/runner/thread_runner_compute/collect_arbitrary.rs +++ b/src/runner/thread_runner_compute/collect_arbitrary.rs @@ -1,4 +1,4 @@ -use crate::ThreadRunner; +use crate::ThreadExecutor; use crate::generic_values::Values; use crate::generic_values::runner_results::{Stop, ThreadCollectArbitrary}; use orx_concurrent_bag::ConcurrentBag; @@ -15,7 +15,7 @@ pub fn m( map1: &M1, bag: &ConcurrentBag, ) where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, M1: Fn(I::Item) -> O, P: IntoConcurrentPinnedVec, @@ -62,7 +62,7 @@ pub fn x( bag: &ConcurrentBag, ) -> ThreadCollectArbitrary where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, Vo: Values, X1: Fn(I::Item) -> Vo, diff --git a/src/runner/thread_runner_compute/collect_ordered.rs b/src/runner/thread_runner_compute/collect_ordered.rs index a942d7a..10984a0 100644 --- a/src/runner/thread_runner_compute/collect_ordered.rs +++ b/src/runner/thread_runner_compute/collect_ordered.rs @@ -1,4 +1,4 @@ -use crate::ThreadRunner; +use crate::ThreadExecutor; use crate::generic_values::Values; use crate::generic_values::runner_results::{StopWithIdx, ThreadCollect}; use alloc::vec::Vec; @@ -14,7 +14,7 @@ pub fn m( o_bag: &ConcurrentOrderedBag, offset: usize, ) where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, M1: Fn(I::Item) -> O, P: IntoConcurrentPinnedVec, @@ -60,7 +60,7 @@ pub fn x( xap1: &X1, ) -> ThreadCollect where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, Vo: Values, X1: Fn(I::Item) -> Vo, diff --git a/src/runner/thread_runner_compute/next.rs b/src/runner/thread_runner_compute/next.rs index e432579..d6d6fac 100644 --- a/src/runner/thread_runner_compute/next.rs +++ b/src/runner/thread_runner_compute/next.rs @@ -1,5 +1,5 @@ use crate::{ - ThreadRunner, + ThreadExecutor, generic_values::Values, generic_values::runner_results::{Next, NextWithIdx}, }; @@ -12,7 +12,7 @@ pub fn m( map1: &M1, ) -> Option<(usize, O)> where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, M1: Fn(I::Item) -> O, { @@ -69,7 +69,7 @@ pub fn x( xap1: &X1, ) -> NextWithIdx where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, Vo: Values, X1: Fn(I::Item) -> Vo, diff --git a/src/runner/thread_runner_compute/next_any.rs b/src/runner/thread_runner_compute/next_any.rs index 433badc..a09926f 100644 --- a/src/runner/thread_runner_compute/next_any.rs +++ b/src/runner/thread_runner_compute/next_any.rs @@ -1,5 +1,5 @@ use crate::{ - ThreadRunner, + ThreadExecutor, generic_values::Values, generic_values::runner_results::{Fallibility, Next}, }; @@ -12,7 +12,7 @@ pub fn m( map1: &M1, ) -> Option where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, O: Send, M1: Fn(I::Item) -> O, @@ -70,7 +70,7 @@ pub fn x( xap1: &X1, ) -> Result, ::Error> where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, Vo: Values, Vo::Item: Send, diff --git a/src/runner/thread_runner_compute/reduce.rs b/src/runner/thread_runner_compute/reduce.rs index ce05404..9b0b03b 100644 --- a/src/runner/thread_runner_compute/reduce.rs +++ b/src/runner/thread_runner_compute/reduce.rs @@ -1,5 +1,5 @@ use crate::{ - ThreadRunner, + ThreadExecutor, generic_values::{ Values, runner_results::{Reduce, StopReduce}, @@ -17,7 +17,7 @@ pub fn m( reduce: &Red, ) -> Option where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, M1: Fn(I::Item) -> O, Red: Fn(O, O) -> O, @@ -80,7 +80,7 @@ pub fn x( reduce: &Red, ) -> Reduce where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, Vo: Values, X1: Fn(I::Item) -> Vo, diff --git a/src/using/runner/thread_runner_compute/collect_arbitrary.rs b/src/using/runner/thread_runner_compute/collect_arbitrary.rs index bcba9ca..dcc5802 100644 --- a/src/using/runner/thread_runner_compute/collect_arbitrary.rs +++ b/src/using/runner/thread_runner_compute/collect_arbitrary.rs @@ -1,4 +1,4 @@ -use crate::ThreadRunner; +use crate::ThreadExecutor; use crate::generic_values::Values; use crate::generic_values::runner_results::{Stop, ThreadCollectArbitrary}; use orx_concurrent_bag::ConcurrentBag; @@ -16,7 +16,7 @@ pub fn m( map1: &M1, bag: &ConcurrentBag, ) where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, M1: Fn(&mut U, I::Item) -> O, P: IntoConcurrentPinnedVec, @@ -65,7 +65,7 @@ pub fn x( bag: &ConcurrentBag, ) -> ThreadCollectArbitrary where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, Vo: Values, X1: Fn(&mut U, I::Item) -> Vo, diff --git a/src/using/runner/thread_runner_compute/collect_ordered.rs b/src/using/runner/thread_runner_compute/collect_ordered.rs index e6716c6..698a187 100644 --- a/src/using/runner/thread_runner_compute/collect_ordered.rs +++ b/src/using/runner/thread_runner_compute/collect_ordered.rs @@ -1,4 +1,4 @@ -use crate::ThreadRunner; +use crate::ThreadExecutor; use crate::generic_values::Values; use crate::generic_values::runner_results::{StopWithIdx, ThreadCollect}; use alloc::vec::Vec; @@ -15,7 +15,7 @@ pub fn m( o_bag: &ConcurrentOrderedBag, offset: usize, ) where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, M1: Fn(&mut U, I::Item) -> O, P: IntoConcurrentPinnedVec, @@ -63,7 +63,7 @@ pub fn x( xap1: &X1, ) -> ThreadCollect where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, Vo: Values, X1: Fn(&mut U, I::Item) -> Vo, diff --git a/src/using/runner/thread_runner_compute/next.rs b/src/using/runner/thread_runner_compute/next.rs index 8aa33e8..6ad9f07 100644 --- a/src/using/runner/thread_runner_compute/next.rs +++ b/src/using/runner/thread_runner_compute/next.rs @@ -1,5 +1,5 @@ use crate::{ - ThreadRunner, + ThreadExecutor, generic_values::Values, generic_values::runner_results::{Next, NextWithIdx}, }; @@ -13,7 +13,7 @@ pub fn m( map1: &M1, ) -> Option<(usize, O)> where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, M1: Fn(&mut U, I::Item) -> O, { @@ -72,7 +72,7 @@ pub fn x( xap1: &X1, ) -> NextWithIdx where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, Vo: Values, X1: Fn(&mut U, I::Item) -> Vo, diff --git a/src/using/runner/thread_runner_compute/next_any.rs b/src/using/runner/thread_runner_compute/next_any.rs index c44d320..cf19973 100644 --- a/src/using/runner/thread_runner_compute/next_any.rs +++ b/src/using/runner/thread_runner_compute/next_any.rs @@ -1,5 +1,5 @@ use crate::{ - ThreadRunner, + ThreadExecutor, generic_values::Values, generic_values::runner_results::{Fallibility, Next}, }; @@ -13,7 +13,7 @@ pub fn m( map1: &M1, ) -> Option where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, O: Send, M1: Fn(&mut U, I::Item) -> O, @@ -73,7 +73,7 @@ pub fn x( xap1: &X1, ) -> Result, ::Error> where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, Vo: Values, Vo::Item: Send, diff --git a/src/using/runner/thread_runner_compute/reduce.rs b/src/using/runner/thread_runner_compute/reduce.rs index 11f70ea..8e1c8e5 100644 --- a/src/using/runner/thread_runner_compute/reduce.rs +++ b/src/using/runner/thread_runner_compute/reduce.rs @@ -1,5 +1,5 @@ use crate::{ - ThreadRunner, + ThreadExecutor, generic_values::{ Values, runner_results::{Reduce, StopReduce}, @@ -18,7 +18,7 @@ pub fn m( reduce: &Red, ) -> Option where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, M1: Fn(&mut U, I::Item) -> O, Red: Fn(&mut U, O, O) -> O, @@ -90,7 +90,7 @@ pub fn x( reduce: &Red, ) -> Reduce where - C: ThreadRunner, + C: ThreadExecutor, I: ConcurrentIter, Vo: Values, X1: Fn(&mut U, I::Item) -> Vo, From bcaeb0993864e877dafc3e39e9cdd6bef3967f5f Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 15:03:17 +0200 Subject: [PATCH 195/264] renaming --- src/collect_into/collect.rs | 2 +- .../fallible_result/map_result.rs | 2 +- .../fallible_result/par_result.rs | 2 +- .../fallible_result/xap_result.rs | 2 +- src/computational_variants/map.rs | 2 +- src/computational_variants/par.rs | 2 +- src/computational_variants/tests/map/find.rs | 9 +++------ src/computational_variants/tests/map/reduce.rs | 15 ++++----------- src/computational_variants/xap.rs | 2 +- src/runner/mod.rs | 4 ++-- .../collect_arbitrary.rs | 2 +- .../collect_ordered.rs | 2 +- .../mod.rs | 0 .../next.rs | 2 +- .../next_any.rs | 2 +- .../reduce.rs | 2 +- .../collect_arbitrary.rs | 0 .../collect_ordered.rs | 0 .../mod.rs | 0 .../next.rs | 0 .../next_any.rs | 0 .../reduce.rs | 0 src/using/collect_into/collect.rs | 2 +- src/using/computational_variants/u_map.rs | 2 +- src/using/computational_variants/u_par.rs | 2 +- src/using/computational_variants/u_xap.rs | 2 +- src/using/runner/mod.rs | 4 ++-- .../collect_arbitrary.rs | 2 +- .../collect_ordered.rs | 2 +- .../mod.rs | 0 .../next.rs | 2 +- .../next_any.rs | 2 +- .../reduce.rs | 2 +- .../collect_arbitrary.rs | 0 .../collect_ordered.rs | 0 .../mod.rs | 0 .../next.rs | 0 .../next_any.rs | 0 .../reduce.rs | 0 39 files changed, 32 insertions(+), 42 deletions(-) rename src/runner/{parallel_runner_compute => parallel_compute}/collect_arbitrary.rs (97%) rename src/runner/{parallel_runner_compute => parallel_compute}/collect_ordered.rs (96%) rename src/runner/{parallel_runner_compute => parallel_compute}/mod.rs (100%) rename src/runner/{parallel_runner_compute => parallel_compute}/next.rs (96%) rename src/runner/{parallel_runner_compute => parallel_compute}/next_any.rs (96%) rename src/runner/{parallel_runner_compute => parallel_compute}/reduce.rs (96%) rename src/runner/{thread_runner_compute => thread_compute}/collect_arbitrary.rs (100%) rename src/runner/{thread_runner_compute => thread_compute}/collect_ordered.rs (100%) rename src/runner/{thread_runner_compute => thread_compute}/mod.rs (100%) rename src/runner/{thread_runner_compute => thread_compute}/next.rs (100%) rename src/runner/{thread_runner_compute => thread_compute}/next_any.rs (100%) rename src/runner/{thread_runner_compute => thread_compute}/reduce.rs (100%) rename src/using/runner/{parallel_runner_compute => parallel_compute}/collect_arbitrary.rs (98%) rename src/using/runner/{parallel_runner_compute => parallel_compute}/collect_ordered.rs (97%) rename src/using/runner/{parallel_runner_compute => parallel_compute}/mod.rs (100%) rename src/using/runner/{parallel_runner_compute => parallel_compute}/next.rs (97%) rename src/using/runner/{parallel_runner_compute => parallel_compute}/next_any.rs (97%) rename src/using/runner/{parallel_runner_compute => parallel_compute}/reduce.rs (97%) rename src/using/runner/{thread_runner_compute => thread_compute}/collect_arbitrary.rs (100%) rename src/using/runner/{thread_runner_compute => thread_compute}/collect_ordered.rs (100%) rename src/using/runner/{thread_runner_compute => thread_compute}/mod.rs (100%) rename src/using/runner/{thread_runner_compute => thread_compute}/next.rs (100%) rename src/using/runner/{thread_runner_compute => thread_compute}/next_any.rs (100%) rename src/using/runner/{thread_runner_compute => thread_compute}/reduce.rs (100%) diff --git a/src/collect_into/collect.rs b/src/collect_into/collect.rs index bbd03b5..248907d 100644 --- a/src/collect_into/collect.rs +++ b/src/collect_into/collect.rs @@ -3,7 +3,7 @@ use crate::generic_values::runner_results::{ Fallibility, Infallible, ParallelCollect, ParallelCollectArbitrary, Stop, }; use crate::orch::{NumSpawned, Orchestrator}; -use crate::runner::parallel_runner_compute as prc; +use crate::runner::parallel_compute as prc; use crate::{IterationOrder, generic_values::Values}; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; diff --git a/src/computational_variants/fallible_result/map_result.rs b/src/computational_variants/fallible_result/map_result.rs index 71c4697..c10b92d 100644 --- a/src/computational_variants/fallible_result/map_result.rs +++ b/src/computational_variants/fallible_result/map_result.rs @@ -1,7 +1,7 @@ use crate::computational_variants::ParMap; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; -use crate::runner::parallel_runner_compute as prc; +use crate::runner::parallel_compute as prc; use crate::{IterationOrder, ParCollectInto, ParIter}; use core::marker::PhantomData; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/computational_variants/fallible_result/par_result.rs b/src/computational_variants/fallible_result/par_result.rs index 123bd72..88b5619 100644 --- a/src/computational_variants/fallible_result/par_result.rs +++ b/src/computational_variants/fallible_result/par_result.rs @@ -1,7 +1,7 @@ use crate::computational_variants::Par; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; -use crate::runner::parallel_runner_compute as prc; +use crate::runner::parallel_compute as prc; use crate::{IterationOrder, ParCollectInto, ParIter}; use core::marker::PhantomData; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index ccda24c..4d6a2c5 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -3,7 +3,7 @@ use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; -use crate::runner::parallel_runner_compute as prc; +use crate::runner::parallel_compute as prc; use crate::{IterationOrder, ParCollectInto, Params}; use core::marker::PhantomData; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index e1ac3af..a21f685 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -3,7 +3,7 @@ use crate::computational_variants::fallible_result::ParMapResult; use crate::generic_values::{Vector, WhilstAtom}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; -use crate::runner::parallel_runner_compute as prc; +use crate::runner::parallel_compute as prc; use crate::using::{UParMap, UsingClone, UsingFun}; use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, Params}; use crate::{ParIterResult, ParIterUsing}; diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index bfa838f..a4ba6df 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -3,7 +3,7 @@ use crate::computational_variants::fallible_result::ParResult; use crate::generic_values::{Vector, WhilstAtom}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; -use crate::runner::parallel_runner_compute as prc; +use crate::runner::parallel_compute as prc; use crate::using::{UPar, UsingClone, UsingFun}; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, Params, default_fns::map_self, diff --git a/src/computational_variants/tests/map/find.rs b/src/computational_variants/tests/map/find.rs index fcf30c5..f100188 100644 --- a/src/computational_variants/tests/map/find.rs +++ b/src/computational_variants/tests/map/find.rs @@ -1,6 +1,4 @@ -use crate::{ - Params, default_fns::map_self, orch::DefaultOrchestrator, runner::parallel_runner_compute, -}; +use crate::{Params, default_fns::map_self, orch::DefaultOrchestrator, runner::parallel_compute}; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; @@ -26,7 +24,7 @@ fn m_find(n: usize, nt: usize, chunk: usize) { let iter = input.into_con_iter(); let output = - parallel_runner_compute::next::m(DefaultOrchestrator::default(), params, iter, map_self).1; + parallel_compute::next::m(DefaultOrchestrator::default(), params, iter, map_self).1; assert_eq!(expected, output); } @@ -43,8 +41,7 @@ fn m_map_find(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let output = - parallel_runner_compute::next::m(DefaultOrchestrator::default(), params, iter, map).1; + let output = parallel_compute::next::m(DefaultOrchestrator::default(), params, iter, map).1; assert_eq!(expected, output); } diff --git a/src/computational_variants/tests/map/reduce.rs b/src/computational_variants/tests/map/reduce.rs index 95036d2..3672f04 100644 --- a/src/computational_variants/tests/map/reduce.rs +++ b/src/computational_variants/tests/map/reduce.rs @@ -1,6 +1,4 @@ -use crate::{ - Params, default_fns::map_self, orch::DefaultOrchestrator, runner::parallel_runner_compute, -}; +use crate::{Params, default_fns::map_self, orch::DefaultOrchestrator, runner::parallel_compute}; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; @@ -28,7 +26,7 @@ fn m_reduce(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let (_, output) = parallel_runner_compute::reduce::m( + let (_, output) = parallel_compute::reduce::m( DefaultOrchestrator::default(), params, iter, @@ -56,13 +54,8 @@ fn m_map_reduce(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let (_, output) = parallel_runner_compute::reduce::m( - DefaultOrchestrator::default(), - params, - iter, - map, - reduce, - ); + let (_, output) = + parallel_compute::reduce::m(DefaultOrchestrator::default(), params, iter, map, reduce); assert_eq!(expected, output); } diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index b811307..e96eef1 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -3,7 +3,7 @@ use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; -use crate::runner::parallel_runner_compute as prc; +use crate::runner::parallel_compute as prc; use crate::using::{UParXap, UsingClone, UsingFun}; use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, Params}; use crate::{ParIterResult, ParIterUsing}; diff --git a/src/runner/mod.rs b/src/runner/mod.rs index 2cc077a..74fa1b1 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -1,9 +1,9 @@ mod computation_kind; mod fixed_chunk_executor; +pub(crate) mod parallel_compute; mod parallel_executor; -pub(crate) mod parallel_runner_compute; +mod thread_compute; mod thread_executor; -mod thread_runner_compute; pub use computation_kind::ComputationKind; pub use parallel_executor::ParallelExecutor; diff --git a/src/runner/parallel_runner_compute/collect_arbitrary.rs b/src/runner/parallel_compute/collect_arbitrary.rs similarity index 97% rename from src/runner/parallel_runner_compute/collect_arbitrary.rs rename to src/runner/parallel_compute/collect_arbitrary.rs index 9da669b..ca3543b 100644 --- a/src/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/runner/parallel_compute/collect_arbitrary.rs @@ -3,7 +3,7 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::ParallelCollectArbitrary; use crate::orch::Orchestrator; use crate::orch::{NumSpawned, SharedStateOf, ThreadRunnerOf}; -use crate::runner::{ComputationKind, thread_runner_compute as th}; +use crate::runner::{ComputationKind, thread_compute as th}; use orx_concurrent_bag::ConcurrentBag; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; diff --git a/src/runner/parallel_runner_compute/collect_ordered.rs b/src/runner/parallel_compute/collect_ordered.rs similarity index 96% rename from src/runner/parallel_runner_compute/collect_ordered.rs rename to src/runner/parallel_compute/collect_ordered.rs index 43460f3..338f51a 100644 --- a/src/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/runner/parallel_compute/collect_ordered.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect}; use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; -use crate::runner::{ComputationKind, thread_runner_compute as th}; +use crate::runner::{ComputationKind, thread_compute as th}; use orx_concurrent_iter::ConcurrentIter; use orx_concurrent_ordered_bag::ConcurrentOrderedBag; use orx_fixed_vec::IntoConcurrentPinnedVec; diff --git a/src/runner/parallel_runner_compute/mod.rs b/src/runner/parallel_compute/mod.rs similarity index 100% rename from src/runner/parallel_runner_compute/mod.rs rename to src/runner/parallel_compute/mod.rs diff --git a/src/runner/parallel_runner_compute/next.rs b/src/runner/parallel_compute/next.rs similarity index 96% rename from src/runner/parallel_runner_compute/next.rs rename to src/runner/parallel_compute/next.rs index e2aaa7b..8752238 100644 --- a/src/runner/parallel_runner_compute/next.rs +++ b/src/runner/parallel_compute/next.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; use crate::orch::{NumSpawned, Orchestrator, SharedStateOf}; -use crate::runner::{ComputationKind, thread_runner_compute as th}; +use crate::runner::{ComputationKind, thread_compute as th}; use orx_concurrent_iter::ConcurrentIter; pub fn m( diff --git a/src/runner/parallel_runner_compute/next_any.rs b/src/runner/parallel_compute/next_any.rs similarity index 96% rename from src/runner/parallel_runner_compute/next_any.rs rename to src/runner/parallel_compute/next_any.rs index f5f1514..1586fa7 100644 --- a/src/runner/parallel_runner_compute/next_any.rs +++ b/src/runner/parallel_compute/next_any.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; use crate::orch::{NumSpawned, Orchestrator, SharedStateOf}; -use crate::runner::{ComputationKind, thread_runner_compute as th}; +use crate::runner::{ComputationKind, thread_compute as th}; use orx_concurrent_iter::ConcurrentIter; pub fn m( diff --git a/src/runner/parallel_runner_compute/reduce.rs b/src/runner/parallel_compute/reduce.rs similarity index 96% rename from src/runner/parallel_runner_compute/reduce.rs rename to src/runner/parallel_compute/reduce.rs index e455049..037c36d 100644 --- a/src/runner/parallel_runner_compute/reduce.rs +++ b/src/runner/parallel_compute/reduce.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; -use crate::runner::{ComputationKind, thread_runner_compute as th}; +use crate::runner::{ComputationKind, thread_compute as th}; use orx_concurrent_iter::ConcurrentIter; pub fn m( diff --git a/src/runner/thread_runner_compute/collect_arbitrary.rs b/src/runner/thread_compute/collect_arbitrary.rs similarity index 100% rename from src/runner/thread_runner_compute/collect_arbitrary.rs rename to src/runner/thread_compute/collect_arbitrary.rs diff --git a/src/runner/thread_runner_compute/collect_ordered.rs b/src/runner/thread_compute/collect_ordered.rs similarity index 100% rename from src/runner/thread_runner_compute/collect_ordered.rs rename to src/runner/thread_compute/collect_ordered.rs diff --git a/src/runner/thread_runner_compute/mod.rs b/src/runner/thread_compute/mod.rs similarity index 100% rename from src/runner/thread_runner_compute/mod.rs rename to src/runner/thread_compute/mod.rs diff --git a/src/runner/thread_runner_compute/next.rs b/src/runner/thread_compute/next.rs similarity index 100% rename from src/runner/thread_runner_compute/next.rs rename to src/runner/thread_compute/next.rs diff --git a/src/runner/thread_runner_compute/next_any.rs b/src/runner/thread_compute/next_any.rs similarity index 100% rename from src/runner/thread_runner_compute/next_any.rs rename to src/runner/thread_compute/next_any.rs diff --git a/src/runner/thread_runner_compute/reduce.rs b/src/runner/thread_compute/reduce.rs similarity index 100% rename from src/runner/thread_runner_compute/reduce.rs rename to src/runner/thread_compute/reduce.rs diff --git a/src/using/collect_into/collect.rs b/src/using/collect_into/collect.rs index 1042eca..27587d7 100644 --- a/src/using/collect_into/collect.rs +++ b/src/using/collect_into/collect.rs @@ -3,7 +3,7 @@ use crate::generic_values::runner_results::{ Infallible, ParallelCollect, ParallelCollectArbitrary, }; use crate::orch::{NumSpawned, Orchestrator}; -use crate::using::runner::parallel_runner_compute as prc; +use crate::using::runner::parallel_compute as prc; use crate::using::using_variants::Using; use crate::{IterationOrder, generic_values::Values}; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/computational_variants/u_map.rs b/src/using/computational_variants/u_map.rs index 38e244b..95b601d 100644 --- a/src/using/computational_variants/u_map.rs +++ b/src/using/computational_variants/u_map.rs @@ -2,7 +2,7 @@ use crate::ParIterUsing; use crate::generic_values::Vector; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::using::computational_variants::u_xap::UParXap; -use crate::using::runner::parallel_runner_compute as prc; +use crate::using::runner::parallel_compute as prc; use crate::using::using_variants::Using; use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params}; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/computational_variants/u_par.rs b/src/using/computational_variants/u_par.rs index 3dd7671..299ed2f 100644 --- a/src/using/computational_variants/u_par.rs +++ b/src/using/computational_variants/u_par.rs @@ -4,7 +4,7 @@ use crate::generic_values::Vector; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::using::computational_variants::u_map::UParMap; use crate::using::computational_variants::u_xap::UParXap; -use crate::using::runner::parallel_runner_compute as prc; +use crate::using::runner::parallel_compute as prc; use crate::using::using_variants::Using; use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params}; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/computational_variants/u_xap.rs b/src/using/computational_variants/u_xap.rs index 8ff6574..ed91672 100644 --- a/src/using/computational_variants/u_xap.rs +++ b/src/using/computational_variants/u_xap.rs @@ -1,4 +1,4 @@ -use crate::using::runner::parallel_runner_compute as prc; +use crate::using::runner::parallel_compute as prc; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIterUsing, Params, generic_values::{TransformableValues, runner_results::Infallible}, diff --git a/src/using/runner/mod.rs b/src/using/runner/mod.rs index 298ff9c..9bd9ee3 100644 --- a/src/using/runner/mod.rs +++ b/src/using/runner/mod.rs @@ -1,2 +1,2 @@ -pub(super) mod parallel_runner_compute; -mod thread_runner_compute; +pub(super) mod parallel_compute; +mod thread_compute; diff --git a/src/using/runner/parallel_runner_compute/collect_arbitrary.rs b/src/using/runner/parallel_compute/collect_arbitrary.rs similarity index 98% rename from src/using/runner/parallel_runner_compute/collect_arbitrary.rs rename to src/using/runner/parallel_compute/collect_arbitrary.rs index 0b67c3d..435d507 100644 --- a/src/using/runner/parallel_runner_compute/collect_arbitrary.rs +++ b/src/using/runner/parallel_compute/collect_arbitrary.rs @@ -4,7 +4,7 @@ use crate::generic_values::runner_results::ParallelCollectArbitrary; use crate::orch::Orchestrator; use crate::orch::{NumSpawned, SharedStateOf, ThreadRunnerOf}; use crate::runner::ComputationKind; -use crate::using::runner::thread_runner_compute as th; +use crate::using::runner::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_bag::ConcurrentBag; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/runner/parallel_runner_compute/collect_ordered.rs b/src/using/runner/parallel_compute/collect_ordered.rs similarity index 97% rename from src/using/runner/parallel_runner_compute/collect_ordered.rs rename to src/using/runner/parallel_compute/collect_ordered.rs index 5c5224a..3f43926 100644 --- a/src/using/runner/parallel_runner_compute/collect_ordered.rs +++ b/src/using/runner/parallel_compute/collect_ordered.rs @@ -3,7 +3,7 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect}; use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; use crate::runner::ComputationKind; -use crate::using::runner::thread_runner_compute as th; +use crate::using::runner::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; use orx_concurrent_ordered_bag::ConcurrentOrderedBag; diff --git a/src/using/runner/parallel_runner_compute/mod.rs b/src/using/runner/parallel_compute/mod.rs similarity index 100% rename from src/using/runner/parallel_runner_compute/mod.rs rename to src/using/runner/parallel_compute/mod.rs diff --git a/src/using/runner/parallel_runner_compute/next.rs b/src/using/runner/parallel_compute/next.rs similarity index 97% rename from src/using/runner/parallel_runner_compute/next.rs rename to src/using/runner/parallel_compute/next.rs index 5dbaf0a..729fcc5 100644 --- a/src/using/runner/parallel_runner_compute/next.rs +++ b/src/using/runner/parallel_compute/next.rs @@ -3,7 +3,7 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; use crate::orch::{NumSpawned, Orchestrator, SharedStateOf}; use crate::runner::ComputationKind; -use crate::using::runner::thread_runner_compute as th; +use crate::using::runner::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/runner/parallel_runner_compute/next_any.rs b/src/using/runner/parallel_compute/next_any.rs similarity index 97% rename from src/using/runner/parallel_runner_compute/next_any.rs rename to src/using/runner/parallel_compute/next_any.rs index 303d268..6eae73d 100644 --- a/src/using/runner/parallel_runner_compute/next_any.rs +++ b/src/using/runner/parallel_compute/next_any.rs @@ -3,7 +3,7 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; use crate::orch::{NumSpawned, Orchestrator, SharedStateOf}; use crate::runner::ComputationKind; -use crate::using::runner::thread_runner_compute as th; +use crate::using::runner::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/runner/parallel_runner_compute/reduce.rs b/src/using/runner/parallel_compute/reduce.rs similarity index 97% rename from src/using/runner/parallel_runner_compute/reduce.rs rename to src/using/runner/parallel_compute/reduce.rs index 7eb8812..dd24d40 100644 --- a/src/using/runner/parallel_runner_compute/reduce.rs +++ b/src/using/runner/parallel_compute/reduce.rs @@ -3,7 +3,7 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; use crate::runner::ComputationKind; -use crate::using::runner::thread_runner_compute as th; +use crate::using::runner::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/runner/thread_runner_compute/collect_arbitrary.rs b/src/using/runner/thread_compute/collect_arbitrary.rs similarity index 100% rename from src/using/runner/thread_runner_compute/collect_arbitrary.rs rename to src/using/runner/thread_compute/collect_arbitrary.rs diff --git a/src/using/runner/thread_runner_compute/collect_ordered.rs b/src/using/runner/thread_compute/collect_ordered.rs similarity index 100% rename from src/using/runner/thread_runner_compute/collect_ordered.rs rename to src/using/runner/thread_compute/collect_ordered.rs diff --git a/src/using/runner/thread_runner_compute/mod.rs b/src/using/runner/thread_compute/mod.rs similarity index 100% rename from src/using/runner/thread_runner_compute/mod.rs rename to src/using/runner/thread_compute/mod.rs diff --git a/src/using/runner/thread_runner_compute/next.rs b/src/using/runner/thread_compute/next.rs similarity index 100% rename from src/using/runner/thread_runner_compute/next.rs rename to src/using/runner/thread_compute/next.rs diff --git a/src/using/runner/thread_runner_compute/next_any.rs b/src/using/runner/thread_compute/next_any.rs similarity index 100% rename from src/using/runner/thread_runner_compute/next_any.rs rename to src/using/runner/thread_compute/next_any.rs diff --git a/src/using/runner/thread_runner_compute/reduce.rs b/src/using/runner/thread_compute/reduce.rs similarity index 100% rename from src/using/runner/thread_runner_compute/reduce.rs rename to src/using/runner/thread_compute/reduce.rs From 39d90ba7bda53c7d08df54eb99a00ae1e6c997de Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 15:05:21 +0200 Subject: [PATCH 196/264] renaming --- src/collect_into/collect.rs | 2 +- src/computational_variants/fallible_result/map_result.rs | 2 +- src/computational_variants/fallible_result/par_result.rs | 2 +- src/computational_variants/fallible_result/xap_result.rs | 2 +- src/computational_variants/map.rs | 2 +- src/computational_variants/par.rs | 2 +- src/computational_variants/tests/map/find.rs | 2 +- src/computational_variants/tests/map/reduce.rs | 2 +- src/computational_variants/xap.rs | 2 +- src/{runner => executor}/computation_kind.rs | 0 src/{runner => executor}/fixed_chunk_executor/chunk_size.rs | 2 +- src/{runner => executor}/fixed_chunk_executor/mod.rs | 0 .../fixed_chunk_executor/parallel_executor.rs | 2 +- .../fixed_chunk_executor/thread_executor.rs | 2 +- src/{runner => executor}/mod.rs | 0 .../parallel_compute/collect_arbitrary.rs | 0 .../parallel_compute/collect_ordered.rs | 0 src/{runner => executor}/parallel_compute/mod.rs | 0 src/{runner => executor}/parallel_compute/next.rs | 0 src/{runner => executor}/parallel_compute/next_any.rs | 0 src/{runner => executor}/parallel_compute/reduce.rs | 0 src/{runner => executor}/parallel_executor.rs | 0 .../thread_compute/collect_arbitrary.rs | 0 src/{runner => executor}/thread_compute/collect_ordered.rs | 0 src/{runner => executor}/thread_compute/mod.rs | 0 src/{runner => executor}/thread_compute/next.rs | 0 src/{runner => executor}/thread_compute/next_any.rs | 0 src/{runner => executor}/thread_compute/reduce.rs | 0 src/{runner => executor}/thread_executor.rs | 0 src/lib.rs | 6 +++--- src/orch/orchestrator.rs | 2 +- src/using/runner/parallel_compute/collect_arbitrary.rs | 2 +- src/using/runner/parallel_compute/collect_ordered.rs | 2 +- src/using/runner/parallel_compute/next.rs | 2 +- src/using/runner/parallel_compute/next_any.rs | 2 +- src/using/runner/parallel_compute/reduce.rs | 2 +- 36 files changed, 21 insertions(+), 21 deletions(-) rename src/{runner => executor}/computation_kind.rs (100%) rename src/{runner => executor}/fixed_chunk_executor/chunk_size.rs (97%) rename src/{runner => executor}/fixed_chunk_executor/mod.rs (100%) rename src/{runner => executor}/fixed_chunk_executor/parallel_executor.rs (97%) rename src/{runner => executor}/fixed_chunk_executor/thread_executor.rs (90%) rename src/{runner => executor}/mod.rs (100%) rename src/{runner => executor}/parallel_compute/collect_arbitrary.rs (100%) rename src/{runner => executor}/parallel_compute/collect_ordered.rs (100%) rename src/{runner => executor}/parallel_compute/mod.rs (100%) rename src/{runner => executor}/parallel_compute/next.rs (100%) rename src/{runner => executor}/parallel_compute/next_any.rs (100%) rename src/{runner => executor}/parallel_compute/reduce.rs (100%) rename src/{runner => executor}/parallel_executor.rs (100%) rename src/{runner => executor}/thread_compute/collect_arbitrary.rs (100%) rename src/{runner => executor}/thread_compute/collect_ordered.rs (100%) rename src/{runner => executor}/thread_compute/mod.rs (100%) rename src/{runner => executor}/thread_compute/next.rs (100%) rename src/{runner => executor}/thread_compute/next_any.rs (100%) rename src/{runner => executor}/thread_compute/reduce.rs (100%) rename src/{runner => executor}/thread_executor.rs (100%) diff --git a/src/collect_into/collect.rs b/src/collect_into/collect.rs index 248907d..b839aee 100644 --- a/src/collect_into/collect.rs +++ b/src/collect_into/collect.rs @@ -3,7 +3,7 @@ use crate::generic_values::runner_results::{ Fallibility, Infallible, ParallelCollect, ParallelCollectArbitrary, Stop, }; use crate::orch::{NumSpawned, Orchestrator}; -use crate::runner::parallel_compute as prc; +use crate::executor::parallel_compute as prc; use crate::{IterationOrder, generic_values::Values}; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; diff --git a/src/computational_variants/fallible_result/map_result.rs b/src/computational_variants/fallible_result/map_result.rs index c10b92d..5a50f67 100644 --- a/src/computational_variants/fallible_result/map_result.rs +++ b/src/computational_variants/fallible_result/map_result.rs @@ -1,7 +1,7 @@ use crate::computational_variants::ParMap; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; -use crate::runner::parallel_compute as prc; +use crate::executor::parallel_compute as prc; use crate::{IterationOrder, ParCollectInto, ParIter}; use core::marker::PhantomData; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/computational_variants/fallible_result/par_result.rs b/src/computational_variants/fallible_result/par_result.rs index 88b5619..484769a 100644 --- a/src/computational_variants/fallible_result/par_result.rs +++ b/src/computational_variants/fallible_result/par_result.rs @@ -1,7 +1,7 @@ use crate::computational_variants::Par; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; -use crate::runner::parallel_compute as prc; +use crate::executor::parallel_compute as prc; use crate::{IterationOrder, ParCollectInto, ParIter}; use core::marker::PhantomData; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index 4d6a2c5..a43ca85 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -3,7 +3,7 @@ use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; -use crate::runner::parallel_compute as prc; +use crate::executor::parallel_compute as prc; use crate::{IterationOrder, ParCollectInto, Params}; use core::marker::PhantomData; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index a21f685..7df02a1 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -3,7 +3,7 @@ use crate::computational_variants::fallible_result::ParMapResult; use crate::generic_values::{Vector, WhilstAtom}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; -use crate::runner::parallel_compute as prc; +use crate::executor::parallel_compute as prc; use crate::using::{UParMap, UsingClone, UsingFun}; use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, Params}; use crate::{ParIterResult, ParIterUsing}; diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index a4ba6df..2a73566 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -3,7 +3,7 @@ use crate::computational_variants::fallible_result::ParResult; use crate::generic_values::{Vector, WhilstAtom}; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; -use crate::runner::parallel_compute as prc; +use crate::executor::parallel_compute as prc; use crate::using::{UPar, UsingClone, UsingFun}; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, Params, default_fns::map_self, diff --git a/src/computational_variants/tests/map/find.rs b/src/computational_variants/tests/map/find.rs index f100188..28a04e7 100644 --- a/src/computational_variants/tests/map/find.rs +++ b/src/computational_variants/tests/map/find.rs @@ -1,4 +1,4 @@ -use crate::{Params, default_fns::map_self, orch::DefaultOrchestrator, runner::parallel_compute}; +use crate::{Params, default_fns::map_self, orch::DefaultOrchestrator, executor::parallel_compute}; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; diff --git a/src/computational_variants/tests/map/reduce.rs b/src/computational_variants/tests/map/reduce.rs index 3672f04..f2b08a9 100644 --- a/src/computational_variants/tests/map/reduce.rs +++ b/src/computational_variants/tests/map/reduce.rs @@ -1,4 +1,4 @@ -use crate::{Params, default_fns::map_self, orch::DefaultOrchestrator, runner::parallel_compute}; +use crate::{Params, default_fns::map_self, orch::DefaultOrchestrator, executor::parallel_compute}; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index e96eef1..87402d4 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -3,7 +3,7 @@ use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; -use crate::runner::parallel_compute as prc; +use crate::executor::parallel_compute as prc; use crate::using::{UParXap, UsingClone, UsingFun}; use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, Params}; use crate::{ParIterResult, ParIterUsing}; diff --git a/src/runner/computation_kind.rs b/src/executor/computation_kind.rs similarity index 100% rename from src/runner/computation_kind.rs rename to src/executor/computation_kind.rs diff --git a/src/runner/fixed_chunk_executor/chunk_size.rs b/src/executor/fixed_chunk_executor/chunk_size.rs similarity index 97% rename from src/runner/fixed_chunk_executor/chunk_size.rs rename to src/executor/fixed_chunk_executor/chunk_size.rs index 4659980..52083cc 100644 --- a/src/runner/fixed_chunk_executor/chunk_size.rs +++ b/src/executor/fixed_chunk_executor/chunk_size.rs @@ -1,4 +1,4 @@ -use crate::{parameters::ChunkSize, runner::computation_kind::ComputationKind}; +use crate::{parameters::ChunkSize, executor::computation_kind::ComputationKind}; use core::num::NonZeroUsize; const MAX_CHUNK_SIZE: usize = 1 << 20; diff --git a/src/runner/fixed_chunk_executor/mod.rs b/src/executor/fixed_chunk_executor/mod.rs similarity index 100% rename from src/runner/fixed_chunk_executor/mod.rs rename to src/executor/fixed_chunk_executor/mod.rs diff --git a/src/runner/fixed_chunk_executor/parallel_executor.rs b/src/executor/fixed_chunk_executor/parallel_executor.rs similarity index 97% rename from src/runner/fixed_chunk_executor/parallel_executor.rs rename to src/executor/fixed_chunk_executor/parallel_executor.rs index ec68b16..ab6b3d2 100644 --- a/src/runner/fixed_chunk_executor/parallel_executor.rs +++ b/src/executor/fixed_chunk_executor/parallel_executor.rs @@ -2,7 +2,7 @@ use super::{chunk_size::ResolvedChunkSize, thread_executor::FixedChunkThreadExec use crate::{ orch::NumSpawned, parameters::Params, - runner::{computation_kind::ComputationKind, parallel_executor::ParallelExecutor}, + executor::{computation_kind::ComputationKind, parallel_executor::ParallelExecutor}, }; use core::{ num::NonZeroUsize, diff --git a/src/runner/fixed_chunk_executor/thread_executor.rs b/src/executor/fixed_chunk_executor/thread_executor.rs similarity index 90% rename from src/runner/fixed_chunk_executor/thread_executor.rs rename to src/executor/fixed_chunk_executor/thread_executor.rs index 65f45f6..186d714 100644 --- a/src/runner/fixed_chunk_executor/thread_executor.rs +++ b/src/executor/fixed_chunk_executor/thread_executor.rs @@ -1,4 +1,4 @@ -use crate::runner::thread_executor::ThreadExecutor; +use crate::executor::thread_executor::ThreadExecutor; use orx_concurrent_iter::ConcurrentIter; pub struct FixedChunkThreadExecutor { diff --git a/src/runner/mod.rs b/src/executor/mod.rs similarity index 100% rename from src/runner/mod.rs rename to src/executor/mod.rs diff --git a/src/runner/parallel_compute/collect_arbitrary.rs b/src/executor/parallel_compute/collect_arbitrary.rs similarity index 100% rename from src/runner/parallel_compute/collect_arbitrary.rs rename to src/executor/parallel_compute/collect_arbitrary.rs diff --git a/src/runner/parallel_compute/collect_ordered.rs b/src/executor/parallel_compute/collect_ordered.rs similarity index 100% rename from src/runner/parallel_compute/collect_ordered.rs rename to src/executor/parallel_compute/collect_ordered.rs diff --git a/src/runner/parallel_compute/mod.rs b/src/executor/parallel_compute/mod.rs similarity index 100% rename from src/runner/parallel_compute/mod.rs rename to src/executor/parallel_compute/mod.rs diff --git a/src/runner/parallel_compute/next.rs b/src/executor/parallel_compute/next.rs similarity index 100% rename from src/runner/parallel_compute/next.rs rename to src/executor/parallel_compute/next.rs diff --git a/src/runner/parallel_compute/next_any.rs b/src/executor/parallel_compute/next_any.rs similarity index 100% rename from src/runner/parallel_compute/next_any.rs rename to src/executor/parallel_compute/next_any.rs diff --git a/src/runner/parallel_compute/reduce.rs b/src/executor/parallel_compute/reduce.rs similarity index 100% rename from src/runner/parallel_compute/reduce.rs rename to src/executor/parallel_compute/reduce.rs diff --git a/src/runner/parallel_executor.rs b/src/executor/parallel_executor.rs similarity index 100% rename from src/runner/parallel_executor.rs rename to src/executor/parallel_executor.rs diff --git a/src/runner/thread_compute/collect_arbitrary.rs b/src/executor/thread_compute/collect_arbitrary.rs similarity index 100% rename from src/runner/thread_compute/collect_arbitrary.rs rename to src/executor/thread_compute/collect_arbitrary.rs diff --git a/src/runner/thread_compute/collect_ordered.rs b/src/executor/thread_compute/collect_ordered.rs similarity index 100% rename from src/runner/thread_compute/collect_ordered.rs rename to src/executor/thread_compute/collect_ordered.rs diff --git a/src/runner/thread_compute/mod.rs b/src/executor/thread_compute/mod.rs similarity index 100% rename from src/runner/thread_compute/mod.rs rename to src/executor/thread_compute/mod.rs diff --git a/src/runner/thread_compute/next.rs b/src/executor/thread_compute/next.rs similarity index 100% rename from src/runner/thread_compute/next.rs rename to src/executor/thread_compute/next.rs diff --git a/src/runner/thread_compute/next_any.rs b/src/executor/thread_compute/next_any.rs similarity index 100% rename from src/runner/thread_compute/next_any.rs rename to src/executor/thread_compute/next_any.rs diff --git a/src/runner/thread_compute/reduce.rs b/src/executor/thread_compute/reduce.rs similarity index 100% rename from src/runner/thread_compute/reduce.rs rename to src/executor/thread_compute/reduce.rs diff --git a/src/runner/thread_executor.rs b/src/executor/thread_executor.rs similarity index 100% rename from src/runner/thread_executor.rs rename to src/executor/thread_executor.rs diff --git a/src/lib.rs b/src/lib.rs index 858fa78..460d1bc 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -22,6 +22,8 @@ mod collect_into; pub mod computational_variants; mod default_fns; mod env; +/// Module defining the parallel runner trait and the default parallel runner. +pub mod executor; mod generic_values; mod heap_sort; mod into_par_iter; @@ -38,8 +40,6 @@ mod parallelizable; mod parallelizable_collection; mod parallelizable_collection_mut; mod parameters; -/// Module defining the parallel runner trait and the default parallel runner. -pub mod runner; mod special_type_sets; /// Module defining parallel iterators with mutable access to values distributed to each thread. pub mod using; @@ -57,6 +57,7 @@ pub mod generic_iterator; mod test_utils; pub use collect_into::ParCollectInto; +pub use executor::{DefaultExecutor, ParallelExecutor, ThreadExecutor}; pub use into_par_iter::IntoParIter; pub use iter_into_par_iter::IterIntoParIter; pub use par_iter::ParIter; @@ -67,6 +68,5 @@ pub use parallelizable::Parallelizable; pub use parallelizable_collection::ParallelizableCollection; pub use parallelizable_collection_mut::ParallelizableCollectionMut; pub use parameters::{ChunkSize, IterationOrder, NumThreads, Params}; -pub use runner::{DefaultExecutor, ParallelExecutor, ThreadExecutor}; pub use special_type_sets::Sum; pub use using::ParIterUsing; diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index eb6e0e4..84f7ccb 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -1,8 +1,8 @@ use crate::{ NumThreads, ParallelExecutor, Params, + executor::ComputationKind, generic_values::runner_results::{Fallibility, Infallible, Never}, orch::{NumSpawned, ParThreadPool, ParThreadPoolCompute}, - runner::ComputationKind, }; use alloc::vec::Vec; use core::num::NonZeroUsize; diff --git a/src/using/runner/parallel_compute/collect_arbitrary.rs b/src/using/runner/parallel_compute/collect_arbitrary.rs index 435d507..604d04d 100644 --- a/src/using/runner/parallel_compute/collect_arbitrary.rs +++ b/src/using/runner/parallel_compute/collect_arbitrary.rs @@ -1,9 +1,9 @@ use crate::Params; +use crate::executor::ComputationKind; use crate::generic_values::Values; use crate::generic_values::runner_results::ParallelCollectArbitrary; use crate::orch::Orchestrator; use crate::orch::{NumSpawned, SharedStateOf, ThreadRunnerOf}; -use crate::runner::ComputationKind; use crate::using::runner::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_bag::ConcurrentBag; diff --git a/src/using/runner/parallel_compute/collect_ordered.rs b/src/using/runner/parallel_compute/collect_ordered.rs index 3f43926..d9dbfdb 100644 --- a/src/using/runner/parallel_compute/collect_ordered.rs +++ b/src/using/runner/parallel_compute/collect_ordered.rs @@ -1,8 +1,8 @@ use crate::Params; +use crate::executor::ComputationKind; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect}; use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; -use crate::runner::ComputationKind; use crate::using::runner::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/runner/parallel_compute/next.rs b/src/using/runner/parallel_compute/next.rs index 729fcc5..b4a3394 100644 --- a/src/using/runner/parallel_compute/next.rs +++ b/src/using/runner/parallel_compute/next.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; use crate::orch::{NumSpawned, Orchestrator, SharedStateOf}; -use crate::runner::ComputationKind; +use crate::executor::ComputationKind; use crate::using::runner::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/runner/parallel_compute/next_any.rs b/src/using/runner/parallel_compute/next_any.rs index 6eae73d..69de16c 100644 --- a/src/using/runner/parallel_compute/next_any.rs +++ b/src/using/runner/parallel_compute/next_any.rs @@ -1,8 +1,8 @@ use crate::Params; +use crate::executor::ComputationKind; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; use crate::orch::{NumSpawned, Orchestrator, SharedStateOf}; -use crate::runner::ComputationKind; use crate::using::runner::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/runner/parallel_compute/reduce.rs b/src/using/runner/parallel_compute/reduce.rs index dd24d40..cd36ab0 100644 --- a/src/using/runner/parallel_compute/reduce.rs +++ b/src/using/runner/parallel_compute/reduce.rs @@ -1,8 +1,8 @@ use crate::Params; +use crate::executor::ComputationKind; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; -use crate::runner::ComputationKind; use crate::using::runner::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; From e50629d501b245649920b2c71952d464c5cf6c97 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 15:06:05 +0200 Subject: [PATCH 197/264] renaming --- src/using/collect_into/collect.rs | 2 +- src/using/computational_variants/u_map.rs | 2 +- src/using/computational_variants/u_par.rs | 2 +- src/using/computational_variants/u_xap.rs | 2 +- src/using/{runner => executor}/mod.rs | 0 .../{runner => executor}/parallel_compute/collect_arbitrary.rs | 2 +- .../{runner => executor}/parallel_compute/collect_ordered.rs | 2 +- src/using/{runner => executor}/parallel_compute/mod.rs | 0 src/using/{runner => executor}/parallel_compute/next.rs | 0 src/using/{runner => executor}/parallel_compute/next_any.rs | 2 +- src/using/{runner => executor}/parallel_compute/reduce.rs | 0 .../{runner => executor}/thread_compute/collect_arbitrary.rs | 0 .../{runner => executor}/thread_compute/collect_ordered.rs | 0 src/using/{runner => executor}/thread_compute/mod.rs | 0 src/using/{runner => executor}/thread_compute/next.rs | 0 src/using/{runner => executor}/thread_compute/next_any.rs | 0 src/using/{runner => executor}/thread_compute/reduce.rs | 0 src/using/mod.rs | 2 +- 18 files changed, 8 insertions(+), 8 deletions(-) rename src/using/{runner => executor}/mod.rs (100%) rename src/using/{runner => executor}/parallel_compute/collect_arbitrary.rs (98%) rename src/using/{runner => executor}/parallel_compute/collect_ordered.rs (97%) rename src/using/{runner => executor}/parallel_compute/mod.rs (100%) rename src/using/{runner => executor}/parallel_compute/next.rs (100%) rename src/using/{runner => executor}/parallel_compute/next_any.rs (97%) rename src/using/{runner => executor}/parallel_compute/reduce.rs (100%) rename src/using/{runner => executor}/thread_compute/collect_arbitrary.rs (100%) rename src/using/{runner => executor}/thread_compute/collect_ordered.rs (100%) rename src/using/{runner => executor}/thread_compute/mod.rs (100%) rename src/using/{runner => executor}/thread_compute/next.rs (100%) rename src/using/{runner => executor}/thread_compute/next_any.rs (100%) rename src/using/{runner => executor}/thread_compute/reduce.rs (100%) diff --git a/src/using/collect_into/collect.rs b/src/using/collect_into/collect.rs index 27587d7..8eda27d 100644 --- a/src/using/collect_into/collect.rs +++ b/src/using/collect_into/collect.rs @@ -3,7 +3,7 @@ use crate::generic_values::runner_results::{ Infallible, ParallelCollect, ParallelCollectArbitrary, }; use crate::orch::{NumSpawned, Orchestrator}; -use crate::using::runner::parallel_compute as prc; +use crate::using::executor::parallel_compute as prc; use crate::using::using_variants::Using; use crate::{IterationOrder, generic_values::Values}; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/computational_variants/u_map.rs b/src/using/computational_variants/u_map.rs index 95b601d..399e1d1 100644 --- a/src/using/computational_variants/u_map.rs +++ b/src/using/computational_variants/u_map.rs @@ -2,7 +2,7 @@ use crate::ParIterUsing; use crate::generic_values::Vector; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::using::computational_variants::u_xap::UParXap; -use crate::using::runner::parallel_compute as prc; +use crate::using::executor::parallel_compute as prc; use crate::using::using_variants::Using; use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params}; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/computational_variants/u_par.rs b/src/using/computational_variants/u_par.rs index 299ed2f..7f64e77 100644 --- a/src/using/computational_variants/u_par.rs +++ b/src/using/computational_variants/u_par.rs @@ -4,7 +4,7 @@ use crate::generic_values::Vector; use crate::orch::{DefaultOrchestrator, Orchestrator}; use crate::using::computational_variants::u_map::UParMap; use crate::using::computational_variants::u_xap::UParXap; -use crate::using::runner::parallel_compute as prc; +use crate::using::executor::parallel_compute as prc; use crate::using::using_variants::Using; use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params}; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/computational_variants/u_xap.rs b/src/using/computational_variants/u_xap.rs index ed91672..705b455 100644 --- a/src/using/computational_variants/u_xap.rs +++ b/src/using/computational_variants/u_xap.rs @@ -1,4 +1,4 @@ -use crate::using::runner::parallel_compute as prc; +use crate::using::executor::parallel_compute as prc; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIterUsing, Params, generic_values::{TransformableValues, runner_results::Infallible}, diff --git a/src/using/runner/mod.rs b/src/using/executor/mod.rs similarity index 100% rename from src/using/runner/mod.rs rename to src/using/executor/mod.rs diff --git a/src/using/runner/parallel_compute/collect_arbitrary.rs b/src/using/executor/parallel_compute/collect_arbitrary.rs similarity index 98% rename from src/using/runner/parallel_compute/collect_arbitrary.rs rename to src/using/executor/parallel_compute/collect_arbitrary.rs index 604d04d..7268428 100644 --- a/src/using/runner/parallel_compute/collect_arbitrary.rs +++ b/src/using/executor/parallel_compute/collect_arbitrary.rs @@ -4,7 +4,7 @@ use crate::generic_values::Values; use crate::generic_values::runner_results::ParallelCollectArbitrary; use crate::orch::Orchestrator; use crate::orch::{NumSpawned, SharedStateOf, ThreadRunnerOf}; -use crate::using::runner::thread_compute as th; +use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_bag::ConcurrentBag; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/runner/parallel_compute/collect_ordered.rs b/src/using/executor/parallel_compute/collect_ordered.rs similarity index 97% rename from src/using/runner/parallel_compute/collect_ordered.rs rename to src/using/executor/parallel_compute/collect_ordered.rs index d9dbfdb..be94efa 100644 --- a/src/using/runner/parallel_compute/collect_ordered.rs +++ b/src/using/executor/parallel_compute/collect_ordered.rs @@ -3,7 +3,7 @@ use crate::executor::ComputationKind; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect}; use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; -use crate::using::runner::thread_compute as th; +use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; use orx_concurrent_ordered_bag::ConcurrentOrderedBag; diff --git a/src/using/runner/parallel_compute/mod.rs b/src/using/executor/parallel_compute/mod.rs similarity index 100% rename from src/using/runner/parallel_compute/mod.rs rename to src/using/executor/parallel_compute/mod.rs diff --git a/src/using/runner/parallel_compute/next.rs b/src/using/executor/parallel_compute/next.rs similarity index 100% rename from src/using/runner/parallel_compute/next.rs rename to src/using/executor/parallel_compute/next.rs diff --git a/src/using/runner/parallel_compute/next_any.rs b/src/using/executor/parallel_compute/next_any.rs similarity index 97% rename from src/using/runner/parallel_compute/next_any.rs rename to src/using/executor/parallel_compute/next_any.rs index 69de16c..f57f81f 100644 --- a/src/using/runner/parallel_compute/next_any.rs +++ b/src/using/executor/parallel_compute/next_any.rs @@ -3,7 +3,7 @@ use crate::executor::ComputationKind; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; use crate::orch::{NumSpawned, Orchestrator, SharedStateOf}; -use crate::using::runner::thread_compute as th; +use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/runner/parallel_compute/reduce.rs b/src/using/executor/parallel_compute/reduce.rs similarity index 100% rename from src/using/runner/parallel_compute/reduce.rs rename to src/using/executor/parallel_compute/reduce.rs diff --git a/src/using/runner/thread_compute/collect_arbitrary.rs b/src/using/executor/thread_compute/collect_arbitrary.rs similarity index 100% rename from src/using/runner/thread_compute/collect_arbitrary.rs rename to src/using/executor/thread_compute/collect_arbitrary.rs diff --git a/src/using/runner/thread_compute/collect_ordered.rs b/src/using/executor/thread_compute/collect_ordered.rs similarity index 100% rename from src/using/runner/thread_compute/collect_ordered.rs rename to src/using/executor/thread_compute/collect_ordered.rs diff --git a/src/using/runner/thread_compute/mod.rs b/src/using/executor/thread_compute/mod.rs similarity index 100% rename from src/using/runner/thread_compute/mod.rs rename to src/using/executor/thread_compute/mod.rs diff --git a/src/using/runner/thread_compute/next.rs b/src/using/executor/thread_compute/next.rs similarity index 100% rename from src/using/runner/thread_compute/next.rs rename to src/using/executor/thread_compute/next.rs diff --git a/src/using/runner/thread_compute/next_any.rs b/src/using/executor/thread_compute/next_any.rs similarity index 100% rename from src/using/runner/thread_compute/next_any.rs rename to src/using/executor/thread_compute/next_any.rs diff --git a/src/using/runner/thread_compute/reduce.rs b/src/using/executor/thread_compute/reduce.rs similarity index 100% rename from src/using/runner/thread_compute/reduce.rs rename to src/using/executor/thread_compute/reduce.rs diff --git a/src/using/mod.rs b/src/using/mod.rs index 9701892..f5a7fd2 100644 --- a/src/using/mod.rs +++ b/src/using/mod.rs @@ -1,6 +1,6 @@ mod collect_into; mod computational_variants; -mod runner; +mod executor; mod u_par_iter; mod using_variants; From fb395810ee4ceac0f5d3af28e4ae98fb64773449 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 15:11:05 +0200 Subject: [PATCH 198/264] renaming --- src/executor/fixed_chunk_executor/chunk_size.rs | 2 +- src/executor/fixed_chunk_executor/parallel_executor.rs | 7 ++----- src/executor/mod.rs | 2 -- src/executor/parallel_compute/collect_arbitrary.rs | 5 ++--- src/executor/parallel_compute/collect_ordered.rs | 4 ++-- src/executor/parallel_compute/next.rs | 4 ++-- src/executor/parallel_compute/next_any.rs | 4 ++-- src/executor/parallel_compute/reduce.rs | 4 ++-- src/executor/parallel_executor.rs | 7 +++++-- src/orch/computation_kind.rs | 10 ++++++++++ src/orch/mod.rs | 2 ++ src/orch/orchestrator.rs | 2 +- .../executor/parallel_compute/collect_arbitrary.rs | 4 +--- src/using/executor/parallel_compute/collect_ordered.rs | 3 +-- src/using/executor/parallel_compute/next.rs | 5 ++--- src/using/executor/parallel_compute/next_any.rs | 3 +-- src/using/executor/parallel_compute/reduce.rs | 5 ++--- 17 files changed, 38 insertions(+), 35 deletions(-) create mode 100644 src/orch/computation_kind.rs diff --git a/src/executor/fixed_chunk_executor/chunk_size.rs b/src/executor/fixed_chunk_executor/chunk_size.rs index 52083cc..f63632b 100644 --- a/src/executor/fixed_chunk_executor/chunk_size.rs +++ b/src/executor/fixed_chunk_executor/chunk_size.rs @@ -1,4 +1,4 @@ -use crate::{parameters::ChunkSize, executor::computation_kind::ComputationKind}; +use crate::{orch::ComputationKind, parameters::ChunkSize}; use core::num::NonZeroUsize; const MAX_CHUNK_SIZE: usize = 1 << 20; diff --git a/src/executor/fixed_chunk_executor/parallel_executor.rs b/src/executor/fixed_chunk_executor/parallel_executor.rs index ab6b3d2..90f4fc9 100644 --- a/src/executor/fixed_chunk_executor/parallel_executor.rs +++ b/src/executor/fixed_chunk_executor/parallel_executor.rs @@ -1,9 +1,6 @@ use super::{chunk_size::ResolvedChunkSize, thread_executor::FixedChunkThreadExecutor}; -use crate::{ - orch::NumSpawned, - parameters::Params, - executor::{computation_kind::ComputationKind, parallel_executor::ParallelExecutor}, -}; +use crate::orch::ComputationKind; +use crate::{executor::parallel_executor::ParallelExecutor, orch::NumSpawned, parameters::Params}; use core::{ num::NonZeroUsize, sync::atomic::{AtomicUsize, Ordering}, diff --git a/src/executor/mod.rs b/src/executor/mod.rs index 74fa1b1..8deafde 100644 --- a/src/executor/mod.rs +++ b/src/executor/mod.rs @@ -1,11 +1,9 @@ -mod computation_kind; mod fixed_chunk_executor; pub(crate) mod parallel_compute; mod parallel_executor; mod thread_compute; mod thread_executor; -pub use computation_kind::ComputationKind; pub use parallel_executor::ParallelExecutor; pub use thread_executor::ThreadExecutor; diff --git a/src/executor/parallel_compute/collect_arbitrary.rs b/src/executor/parallel_compute/collect_arbitrary.rs index ca3543b..2308e83 100644 --- a/src/executor/parallel_compute/collect_arbitrary.rs +++ b/src/executor/parallel_compute/collect_arbitrary.rs @@ -1,9 +1,8 @@ use crate::Params; +use crate::executor::thread_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::ParallelCollectArbitrary; -use crate::orch::Orchestrator; -use crate::orch::{NumSpawned, SharedStateOf, ThreadRunnerOf}; -use crate::runner::{ComputationKind, thread_compute as th}; +use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; use orx_concurrent_bag::ConcurrentBag; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; diff --git a/src/executor/parallel_compute/collect_ordered.rs b/src/executor/parallel_compute/collect_ordered.rs index 338f51a..737ca93 100644 --- a/src/executor/parallel_compute/collect_ordered.rs +++ b/src/executor/parallel_compute/collect_ordered.rs @@ -1,8 +1,8 @@ use crate::Params; +use crate::executor::thread_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect}; -use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; -use crate::runner::{ComputationKind, thread_compute as th}; +use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; use orx_concurrent_iter::ConcurrentIter; use orx_concurrent_ordered_bag::ConcurrentOrderedBag; use orx_fixed_vec::IntoConcurrentPinnedVec; diff --git a/src/executor/parallel_compute/next.rs b/src/executor/parallel_compute/next.rs index 8752238..9521105 100644 --- a/src/executor/parallel_compute/next.rs +++ b/src/executor/parallel_compute/next.rs @@ -1,8 +1,8 @@ use crate::Params; +use crate::executor::thread_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; -use crate::orch::{NumSpawned, Orchestrator, SharedStateOf}; -use crate::runner::{ComputationKind, thread_compute as th}; +use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf}; use orx_concurrent_iter::ConcurrentIter; pub fn m( diff --git a/src/executor/parallel_compute/next_any.rs b/src/executor/parallel_compute/next_any.rs index 1586fa7..cab2dcf 100644 --- a/src/executor/parallel_compute/next_any.rs +++ b/src/executor/parallel_compute/next_any.rs @@ -1,8 +1,8 @@ use crate::Params; +use crate::executor::thread_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; -use crate::orch::{NumSpawned, Orchestrator, SharedStateOf}; -use crate::runner::{ComputationKind, thread_compute as th}; +use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf}; use orx_concurrent_iter::ConcurrentIter; pub fn m( diff --git a/src/executor/parallel_compute/reduce.rs b/src/executor/parallel_compute/reduce.rs index 037c36d..ca429ae 100644 --- a/src/executor/parallel_compute/reduce.rs +++ b/src/executor/parallel_compute/reduce.rs @@ -1,8 +1,8 @@ use crate::Params; +use crate::executor::thread_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; -use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; -use crate::runner::{ComputationKind, thread_compute as th}; +use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; use orx_concurrent_iter::ConcurrentIter; pub fn m( diff --git a/src/executor/parallel_executor.rs b/src/executor/parallel_executor.rs index 8eddc9b..bf367b4 100644 --- a/src/executor/parallel_executor.rs +++ b/src/executor/parallel_executor.rs @@ -1,5 +1,8 @@ -use super::{computation_kind::ComputationKind, thread_executor::ThreadExecutor}; -use crate::{orch::NumSpawned, parameters::Params}; +use super::thread_executor::ThreadExecutor; +use crate::{ + orch::{ComputationKind, NumSpawned}, + parameters::Params, +}; use core::num::NonZeroUsize; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/orch/computation_kind.rs b/src/orch/computation_kind.rs new file mode 100644 index 0000000..0359e43 --- /dev/null +++ b/src/orch/computation_kind.rs @@ -0,0 +1,10 @@ +/// Computation kind. +#[derive(Clone, Copy)] +pub enum ComputationKind { + /// Computation where outputs are collected into a collection. + Collect, + /// Computation where the inputs or intermediate results are reduced to a single value. + Reduce, + /// Computation which allows for early returns, such as `find` operation. + EarlyReturn, +} diff --git a/src/orch/mod.rs b/src/orch/mod.rs index cf9f867..e6c2d7f 100644 --- a/src/orch/mod.rs +++ b/src/orch/mod.rs @@ -1,3 +1,4 @@ +mod computation_kind; mod implementations; mod num_spawned; mod orchestrator; @@ -6,6 +7,7 @@ mod par_thread_pool; pub(crate) use orchestrator::{SharedStateOf, ThreadRunnerOf}; pub use crate::orch::implementations::DefaultStdOrchestrator; +pub use computation_kind::ComputationKind; pub use num_spawned::NumSpawned; pub use orchestrator::Orchestrator; pub use par_thread_pool::{ParThreadPool, ParThreadPoolCompute}; diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 84f7ccb..6130b4d 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -1,7 +1,7 @@ use crate::{ NumThreads, ParallelExecutor, Params, - executor::ComputationKind, generic_values::runner_results::{Fallibility, Infallible, Never}, + orch::ComputationKind, orch::{NumSpawned, ParThreadPool, ParThreadPoolCompute}, }; use alloc::vec::Vec; diff --git a/src/using/executor/parallel_compute/collect_arbitrary.rs b/src/using/executor/parallel_compute/collect_arbitrary.rs index 7268428..2a378c1 100644 --- a/src/using/executor/parallel_compute/collect_arbitrary.rs +++ b/src/using/executor/parallel_compute/collect_arbitrary.rs @@ -1,9 +1,7 @@ use crate::Params; -use crate::executor::ComputationKind; use crate::generic_values::Values; use crate::generic_values::runner_results::ParallelCollectArbitrary; -use crate::orch::Orchestrator; -use crate::orch::{NumSpawned, SharedStateOf, ThreadRunnerOf}; +use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_bag::ConcurrentBag; diff --git a/src/using/executor/parallel_compute/collect_ordered.rs b/src/using/executor/parallel_compute/collect_ordered.rs index be94efa..4249be3 100644 --- a/src/using/executor/parallel_compute/collect_ordered.rs +++ b/src/using/executor/parallel_compute/collect_ordered.rs @@ -1,8 +1,7 @@ use crate::Params; -use crate::executor::ComputationKind; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect}; -use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; +use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/executor/parallel_compute/next.rs b/src/using/executor/parallel_compute/next.rs index b4a3394..8f54df1 100644 --- a/src/using/executor/parallel_compute/next.rs +++ b/src/using/executor/parallel_compute/next.rs @@ -1,9 +1,8 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; -use crate::orch::{NumSpawned, Orchestrator, SharedStateOf}; -use crate::executor::ComputationKind; -use crate::using::runner::thread_compute as th; +use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf}; +use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/executor/parallel_compute/next_any.rs b/src/using/executor/parallel_compute/next_any.rs index f57f81f..2084bb6 100644 --- a/src/using/executor/parallel_compute/next_any.rs +++ b/src/using/executor/parallel_compute/next_any.rs @@ -1,8 +1,7 @@ use crate::Params; -use crate::executor::ComputationKind; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; -use crate::orch::{NumSpawned, Orchestrator, SharedStateOf}; +use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf}; use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/executor/parallel_compute/reduce.rs b/src/using/executor/parallel_compute/reduce.rs index cd36ab0..b327744 100644 --- a/src/using/executor/parallel_compute/reduce.rs +++ b/src/using/executor/parallel_compute/reduce.rs @@ -1,9 +1,8 @@ use crate::Params; -use crate::executor::ComputationKind; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; -use crate::orch::{NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; -use crate::using::runner::thread_compute as th; +use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; +use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; From 25878337dd6d9977ea9173dcae71b8a4969938af Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 15:13:41 +0200 Subject: [PATCH 199/264] reorganization --- src/lib.rs | 2 ++ src/orch/implementations/default_std_orchestrator.rs | 2 +- src/orch/implementations/rayon.rs | 3 +-- src/orch/implementations/scoped_threadpool.rs | 3 +-- src/orch/mod.rs | 2 -- src/orch/orchestrator.rs | 4 ++-- src/{orch => }/par_thread_pool.rs | 2 +- 7 files changed, 8 insertions(+), 10 deletions(-) rename src/{orch => }/par_thread_pool.rs (96%) diff --git a/src/lib.rs b/src/lib.rs index 460d1bc..adcbbd3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -35,6 +35,7 @@ pub mod orch; mod par_iter; mod par_iter_option; mod par_iter_result; +mod par_thread_pool; mod parallel_drainable; mod parallelizable; mod parallelizable_collection; @@ -63,6 +64,7 @@ pub use iter_into_par_iter::IterIntoParIter; pub use par_iter::ParIter; pub use par_iter_option::ParIterOption; pub use par_iter_result::ParIterResult; +pub use par_thread_pool::ParThreadPool; pub use parallel_drainable::ParallelDrainableOverSlice; pub use parallelizable::Parallelizable; pub use parallelizable_collection::ParallelizableCollection; diff --git a/src/orch/implementations/default_std_orchestrator.rs b/src/orch/implementations/default_std_orchestrator.rs index 829b792..c5667d4 100644 --- a/src/orch/implementations/default_std_orchestrator.rs +++ b/src/orch/implementations/default_std_orchestrator.rs @@ -1,4 +1,4 @@ -use crate::orch::ParThreadPool; +use crate::par_thread_pool::ParThreadPool; use crate::{DefaultExecutor, orch::Orchestrator}; use core::num::NonZeroUsize; diff --git a/src/orch/implementations/rayon.rs b/src/orch/implementations/rayon.rs index 38b86d2..9da5960 100644 --- a/src/orch/implementations/rayon.rs +++ b/src/orch/implementations/rayon.rs @@ -1,6 +1,5 @@ use crate::{ - DefaultExecutor, ParallelExecutor, - orch::{Orchestrator, ParThreadPool}, + DefaultExecutor, ParallelExecutor, orch::Orchestrator, par_thread_pool::ParThreadPool, }; use core::{marker::PhantomData, num::NonZeroUsize}; use orx_self_or::SoR; diff --git a/src/orch/implementations/scoped_threadpool.rs b/src/orch/implementations/scoped_threadpool.rs index 95e6d16..2dded51 100644 --- a/src/orch/implementations/scoped_threadpool.rs +++ b/src/orch/implementations/scoped_threadpool.rs @@ -1,6 +1,5 @@ use crate::{ - DefaultExecutor, ParallelExecutor, - orch::{Orchestrator, ParThreadPool}, + DefaultExecutor, ParallelExecutor, orch::Orchestrator, par_thread_pool::ParThreadPool, }; use core::{marker::PhantomData, num::NonZeroUsize}; use orx_self_or::SoM; diff --git a/src/orch/mod.rs b/src/orch/mod.rs index e6c2d7f..9d50e70 100644 --- a/src/orch/mod.rs +++ b/src/orch/mod.rs @@ -2,7 +2,6 @@ mod computation_kind; mod implementations; mod num_spawned; mod orchestrator; -mod par_thread_pool; pub(crate) use orchestrator::{SharedStateOf, ThreadRunnerOf}; @@ -10,6 +9,5 @@ pub use crate::orch::implementations::DefaultStdOrchestrator; pub use computation_kind::ComputationKind; pub use num_spawned::NumSpawned; pub use orchestrator::Orchestrator; -pub use par_thread_pool::{ParThreadPool, ParThreadPoolCompute}; pub type DefaultOrchestrator = DefaultStdOrchestrator; diff --git a/src/orch/orchestrator.rs b/src/orch/orchestrator.rs index 6130b4d..8616ae4 100644 --- a/src/orch/orchestrator.rs +++ b/src/orch/orchestrator.rs @@ -1,8 +1,8 @@ use crate::{ NumThreads, ParallelExecutor, Params, generic_values::runner_results::{Fallibility, Infallible, Never}, - orch::ComputationKind, - orch::{NumSpawned, ParThreadPool, ParThreadPoolCompute}, + orch::{ComputationKind, NumSpawned}, + par_thread_pool::{ParThreadPool, ParThreadPoolCompute}, }; use alloc::vec::Vec; use core::num::NonZeroUsize; diff --git a/src/orch/par_thread_pool.rs b/src/par_thread_pool.rs similarity index 96% rename from src/orch/par_thread_pool.rs rename to src/par_thread_pool.rs index 842a0f0..5df34a8 100644 --- a/src/orch/par_thread_pool.rs +++ b/src/par_thread_pool.rs @@ -1,4 +1,4 @@ -use crate::{generic_values::runner_results::Fallibility, orch::num_spawned::NumSpawned}; +use crate::{generic_values::runner_results::Fallibility, orch::NumSpawned}; use alloc::vec::Vec; use core::num::NonZeroUsize; use orx_concurrent_bag::ConcurrentBag; From b61ea49ad85dea2e671736d205c4e1a3ef602c53 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 15:15:29 +0200 Subject: [PATCH 200/264] renaming --- src/collect_into/collect.rs | 2 +- src/collect_into/fixed_vec.rs | 2 +- src/collect_into/par_collect_into.rs | 2 +- src/collect_into/split_vec.rs | 2 +- src/collect_into/vec.rs | 2 +- src/computational_variants/fallible_option.rs | 2 +- src/computational_variants/fallible_result/map_result.rs | 2 +- src/computational_variants/fallible_result/par_result.rs | 2 +- src/computational_variants/fallible_result/xap_result.rs | 2 +- src/computational_variants/map.rs | 2 +- src/computational_variants/par.rs | 2 +- src/computational_variants/tests/map/collect.rs | 2 +- src/computational_variants/tests/map/find.rs | 4 +++- src/computational_variants/tests/map/reduce.rs | 2 +- src/computational_variants/tests/xap/collect.rs | 2 +- src/computational_variants/tests/xap/find.rs | 2 +- src/computational_variants/tests/xap/reduce.rs | 2 +- src/computational_variants/xap.rs | 2 +- src/executor/fixed_chunk_executor/chunk_size.rs | 2 +- src/executor/fixed_chunk_executor/parallel_executor.rs | 6 ++++-- src/executor/parallel_compute/collect_arbitrary.rs | 2 +- src/executor/parallel_compute/collect_ordered.rs | 2 +- src/executor/parallel_compute/next.rs | 2 +- src/executor/parallel_compute/next_any.rs | 2 +- src/executor/parallel_compute/reduce.rs | 2 +- src/executor/parallel_executor.rs | 2 +- src/into_par_iter.rs | 2 +- src/iter/special_iterators.rs | 2 +- src/iter_into_par_iter.rs | 2 +- src/lib.rs | 4 ++-- src/par_iter.rs | 2 +- src/par_iter_option.rs | 2 +- src/par_iter_result.rs | 2 +- src/par_thread_pool.rs | 2 +- src/parallel_drainable.rs | 2 +- src/parallelizable.rs | 2 +- src/parallelizable_collection.rs | 2 +- src/parallelizable_collection_mut.rs | 2 +- src/{orch => runner}/computation_kind.rs | 0 .../implementations/default_std_orchestrator.rs | 2 +- src/{orch => runner}/implementations/mod.rs | 0 src/{orch => runner}/implementations/rayon.rs | 2 +- src/{orch => runner}/implementations/scoped_threadpool.rs | 2 +- src/{orch => runner}/implementations/tests/mod.rs | 0 src/{orch => runner}/implementations/tests/rayon.rs | 2 +- .../implementations/tests/scoped_threadpool.rs | 2 +- src/{orch => runner}/implementations/tests/utils.rs | 2 +- src/{orch => runner}/mod.rs | 2 +- src/{orch => runner}/num_spawned.rs | 0 src/{orch => runner}/orchestrator.rs | 2 +- src/using/collect_into/collect.rs | 2 +- src/using/collect_into/fixed_vec.rs | 2 +- src/using/collect_into/split_vec.rs | 2 +- src/using/collect_into/u_par_collect_into.rs | 2 +- src/using/collect_into/vec.rs | 2 +- src/using/computational_variants/u_map.rs | 2 +- src/using/computational_variants/u_par.rs | 2 +- src/using/computational_variants/u_xap.rs | 2 +- src/using/executor/parallel_compute/collect_arbitrary.rs | 2 +- src/using/executor/parallel_compute/collect_ordered.rs | 2 +- src/using/executor/parallel_compute/next.rs | 2 +- src/using/executor/parallel_compute/next_any.rs | 2 +- src/using/executor/parallel_compute/reduce.rs | 2 +- src/using/u_par_iter.rs | 2 +- 64 files changed, 66 insertions(+), 62 deletions(-) rename src/{orch => runner}/computation_kind.rs (100%) rename src/{orch => runner}/implementations/default_std_orchestrator.rs (97%) rename src/{orch => runner}/implementations/mod.rs (100%) rename src/{orch => runner}/implementations/rayon.rs (96%) rename src/{orch => runner}/implementations/scoped_threadpool.rs (96%) rename src/{orch => runner}/implementations/tests/mod.rs (100%) rename src/{orch => runner}/implementations/tests/rayon.rs (88%) rename src/{orch => runner}/implementations/tests/scoped_threadpool.rs (86%) rename src/{orch => runner}/implementations/tests/utils.rs (93%) rename src/{orch => runner}/mod.rs (82%) rename src/{orch => runner}/num_spawned.rs (100%) rename src/{orch => runner}/orchestrator.rs (99%) diff --git a/src/collect_into/collect.rs b/src/collect_into/collect.rs index b839aee..d08862e 100644 --- a/src/collect_into/collect.rs +++ b/src/collect_into/collect.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::generic_values::runner_results::{ Fallibility, Infallible, ParallelCollect, ParallelCollectArbitrary, Stop, }; -use crate::orch::{NumSpawned, Orchestrator}; +use crate::runner::{NumSpawned, Orchestrator}; use crate::executor::parallel_compute as prc; use crate::{IterationOrder, generic_values::Values}; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/collect_into/fixed_vec.rs b/src/collect_into/fixed_vec.rs index b369acf..2402161 100644 --- a/src/collect_into/fixed_vec.rs +++ b/src/collect_into/fixed_vec.rs @@ -2,7 +2,7 @@ use super::par_collect_into::ParCollectIntoCore; use crate::Params; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; -use crate::orch::Orchestrator; +use crate::runner::Orchestrator; use alloc::vec::Vec; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; diff --git a/src/collect_into/par_collect_into.rs b/src/collect_into/par_collect_into.rs index 0e7e8ab..19cf859 100644 --- a/src/collect_into/par_collect_into.rs +++ b/src/collect_into/par_collect_into.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; -use crate::orch::Orchestrator; +use crate::runner::Orchestrator; use crate::using::UParCollectIntoCore; use orx_concurrent_iter::ConcurrentIter; use orx_iterable::Collection; diff --git a/src/collect_into/split_vec.rs b/src/collect_into/split_vec.rs index fa16533..61a4f5d 100644 --- a/src/collect_into/split_vec.rs +++ b/src/collect_into/split_vec.rs @@ -4,7 +4,7 @@ use crate::Params; use crate::collect_into::utils::split_vec_reserve; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; -use crate::orch::Orchestrator; +use crate::runner::Orchestrator; use orx_concurrent_iter::ConcurrentIter; #[cfg(test)] use orx_pinned_vec::PinnedVec; diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index 41b26ed..526bea3 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -4,7 +4,7 @@ use crate::collect_into::collect::map_collect_into; use crate::collect_into::utils::extend_vec_from_split; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; -use crate::orch::Orchestrator; +use crate::runner::Orchestrator; use alloc::vec::Vec; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; diff --git a/src/computational_variants/fallible_option.rs b/src/computational_variants/fallible_option.rs index c86e45c..fc29408 100644 --- a/src/computational_variants/fallible_option.rs +++ b/src/computational_variants/fallible_option.rs @@ -1,6 +1,6 @@ use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIterResult, - orch::{DefaultOrchestrator, Orchestrator}, + runner::{DefaultOrchestrator, Orchestrator}, par_iter_option::{ParIterOption, ResultIntoOption}, }; use core::marker::PhantomData; diff --git a/src/computational_variants/fallible_result/map_result.rs b/src/computational_variants/fallible_result/map_result.rs index 5a50f67..a0e128f 100644 --- a/src/computational_variants/fallible_result/map_result.rs +++ b/src/computational_variants/fallible_result/map_result.rs @@ -1,5 +1,5 @@ use crate::computational_variants::ParMap; -use crate::orch::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::executor::parallel_compute as prc; use crate::{IterationOrder, ParCollectInto, ParIter}; diff --git a/src/computational_variants/fallible_result/par_result.rs b/src/computational_variants/fallible_result/par_result.rs index 484769a..ed5acc8 100644 --- a/src/computational_variants/fallible_result/par_result.rs +++ b/src/computational_variants/fallible_result/par_result.rs @@ -1,5 +1,5 @@ use crate::computational_variants::Par; -use crate::orch::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::executor::parallel_compute as prc; use crate::{IterationOrder, ParCollectInto, ParIter}; diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index a43ca85..a416aa7 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -1,7 +1,7 @@ use crate::computational_variants::ParXap; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; -use crate::orch::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::executor::parallel_compute as prc; use crate::{IterationOrder, ParCollectInto, Params}; diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index 7df02a1..57364d4 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -1,7 +1,7 @@ use super::xap::ParXap; use crate::computational_variants::fallible_result::ParMapResult; use crate::generic_values::{Vector, WhilstAtom}; -use crate::orch::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; use crate::executor::parallel_compute as prc; use crate::using::{UParMap, UsingClone, UsingFun}; diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 2a73566..1cdebad 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -1,7 +1,7 @@ use super::{map::ParMap, xap::ParXap}; use crate::computational_variants::fallible_result::ParResult; use crate::generic_values::{Vector, WhilstAtom}; -use crate::orch::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; use crate::executor::parallel_compute as prc; use crate::using::{UPar, UsingClone, UsingFun}; diff --git a/src/computational_variants/tests/map/collect.rs b/src/computational_variants/tests/map/collect.rs index aa28a5b..571e53a 100644 --- a/src/computational_variants/tests/map/collect.rs +++ b/src/computational_variants/tests/map/collect.rs @@ -1,5 +1,5 @@ use crate::collect_into::collect::map_collect_into; -use crate::{IterationOrder, Params, orch::DefaultOrchestrator}; +use crate::{IterationOrder, Params, runner::DefaultOrchestrator}; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; diff --git a/src/computational_variants/tests/map/find.rs b/src/computational_variants/tests/map/find.rs index 28a04e7..b42da93 100644 --- a/src/computational_variants/tests/map/find.rs +++ b/src/computational_variants/tests/map/find.rs @@ -1,4 +1,6 @@ -use crate::{Params, default_fns::map_self, orch::DefaultOrchestrator, executor::parallel_compute}; +use crate::{ + Params, default_fns::map_self, executor::parallel_compute, runner::DefaultOrchestrator, +}; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; diff --git a/src/computational_variants/tests/map/reduce.rs b/src/computational_variants/tests/map/reduce.rs index f2b08a9..79d9646 100644 --- a/src/computational_variants/tests/map/reduce.rs +++ b/src/computational_variants/tests/map/reduce.rs @@ -1,4 +1,4 @@ -use crate::{Params, default_fns::map_self, orch::DefaultOrchestrator, executor::parallel_compute}; +use crate::{Params, default_fns::map_self, runner::DefaultOrchestrator, executor::parallel_compute}; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; diff --git a/src/computational_variants/tests/xap/collect.rs b/src/computational_variants/tests/xap/collect.rs index 125fbc1..d30ab69 100644 --- a/src/computational_variants/tests/xap/collect.rs +++ b/src/computational_variants/tests/xap/collect.rs @@ -1,7 +1,7 @@ use crate::ParIter; use crate::computational_variants::ParXap; use crate::generic_values::Vector; -use crate::orch::DefaultOrchestrator; +use crate::runner::DefaultOrchestrator; use crate::{IterationOrder, Params}; use alloc::format; use alloc::string::{String, ToString}; diff --git a/src/computational_variants/tests/xap/find.rs b/src/computational_variants/tests/xap/find.rs index 5e487da..d7643b3 100644 --- a/src/computational_variants/tests/xap/find.rs +++ b/src/computational_variants/tests/xap/find.rs @@ -2,7 +2,7 @@ use crate::ParIter; use crate::Params; use crate::computational_variants::ParXap; use crate::generic_values::Vector; -use crate::orch::DefaultOrchestrator; +use crate::runner::DefaultOrchestrator; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; diff --git a/src/computational_variants/tests/xap/reduce.rs b/src/computational_variants/tests/xap/reduce.rs index 55593ea..463fa42 100644 --- a/src/computational_variants/tests/xap/reduce.rs +++ b/src/computational_variants/tests/xap/reduce.rs @@ -2,7 +2,7 @@ use crate::ParIter; use crate::Params; use crate::computational_variants::ParXap; use crate::generic_values::Vector; -use crate::orch::DefaultOrchestrator; +use crate::runner::DefaultOrchestrator; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index 87402d4..7dc8316 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -1,7 +1,7 @@ use crate::computational_variants::fallible_result::ParXapResult; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; -use crate::orch::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_result::IntoResult; use crate::executor::parallel_compute as prc; use crate::using::{UParXap, UsingClone, UsingFun}; diff --git a/src/executor/fixed_chunk_executor/chunk_size.rs b/src/executor/fixed_chunk_executor/chunk_size.rs index f63632b..5b0a7fc 100644 --- a/src/executor/fixed_chunk_executor/chunk_size.rs +++ b/src/executor/fixed_chunk_executor/chunk_size.rs @@ -1,4 +1,4 @@ -use crate::{orch::ComputationKind, parameters::ChunkSize}; +use crate::{parameters::ChunkSize, runner::ComputationKind}; use core::num::NonZeroUsize; const MAX_CHUNK_SIZE: usize = 1 << 20; diff --git a/src/executor/fixed_chunk_executor/parallel_executor.rs b/src/executor/fixed_chunk_executor/parallel_executor.rs index 90f4fc9..dc4e872 100644 --- a/src/executor/fixed_chunk_executor/parallel_executor.rs +++ b/src/executor/fixed_chunk_executor/parallel_executor.rs @@ -1,6 +1,8 @@ use super::{chunk_size::ResolvedChunkSize, thread_executor::FixedChunkThreadExecutor}; -use crate::orch::ComputationKind; -use crate::{executor::parallel_executor::ParallelExecutor, orch::NumSpawned, parameters::Params}; +use crate::runner::ComputationKind; +use crate::{ + executor::parallel_executor::ParallelExecutor, parameters::Params, runner::NumSpawned, +}; use core::{ num::NonZeroUsize, sync::atomic::{AtomicUsize, Ordering}, diff --git a/src/executor/parallel_compute/collect_arbitrary.rs b/src/executor/parallel_compute/collect_arbitrary.rs index 2308e83..cbf904d 100644 --- a/src/executor/parallel_compute/collect_arbitrary.rs +++ b/src/executor/parallel_compute/collect_arbitrary.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::executor::thread_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::ParallelCollectArbitrary; -use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; +use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; use orx_concurrent_bag::ConcurrentBag; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; diff --git a/src/executor/parallel_compute/collect_ordered.rs b/src/executor/parallel_compute/collect_ordered.rs index 737ca93..113a44d 100644 --- a/src/executor/parallel_compute/collect_ordered.rs +++ b/src/executor/parallel_compute/collect_ordered.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::executor::thread_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect}; -use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; +use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; use orx_concurrent_iter::ConcurrentIter; use orx_concurrent_ordered_bag::ConcurrentOrderedBag; use orx_fixed_vec::IntoConcurrentPinnedVec; diff --git a/src/executor/parallel_compute/next.rs b/src/executor/parallel_compute/next.rs index 9521105..a73c824 100644 --- a/src/executor/parallel_compute/next.rs +++ b/src/executor/parallel_compute/next.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::executor::thread_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; -use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf}; +use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf}; use orx_concurrent_iter::ConcurrentIter; pub fn m( diff --git a/src/executor/parallel_compute/next_any.rs b/src/executor/parallel_compute/next_any.rs index cab2dcf..2ec7638 100644 --- a/src/executor/parallel_compute/next_any.rs +++ b/src/executor/parallel_compute/next_any.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::executor::thread_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; -use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf}; +use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf}; use orx_concurrent_iter::ConcurrentIter; pub fn m( diff --git a/src/executor/parallel_compute/reduce.rs b/src/executor/parallel_compute/reduce.rs index ca429ae..4402589 100644 --- a/src/executor/parallel_compute/reduce.rs +++ b/src/executor/parallel_compute/reduce.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::executor::thread_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; -use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; +use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; use orx_concurrent_iter::ConcurrentIter; pub fn m( diff --git a/src/executor/parallel_executor.rs b/src/executor/parallel_executor.rs index bf367b4..abdbf83 100644 --- a/src/executor/parallel_executor.rs +++ b/src/executor/parallel_executor.rs @@ -1,6 +1,6 @@ use super::thread_executor::ThreadExecutor; use crate::{ - orch::{ComputationKind, NumSpawned}, + runner::{ComputationKind, NumSpawned}, parameters::Params, }; use core::num::NonZeroUsize; diff --git a/src/into_par_iter.rs b/src/into_par_iter.rs index be77677..b5486a1 100644 --- a/src/into_par_iter.rs +++ b/src/into_par_iter.rs @@ -1,4 +1,4 @@ -use crate::{Params, computational_variants::Par, orch::DefaultOrchestrator}; +use crate::{Params, computational_variants::Par, runner::DefaultOrchestrator}; use orx_concurrent_iter::{ConcurrentIter, IntoConcurrentIter}; /// Trait to convert a source (collection or generator) into a parallel iterator; i.e., [`ParIter`], diff --git a/src/iter/special_iterators.rs b/src/iter/special_iterators.rs index 3f0a60f..028ce6e 100644 --- a/src/iter/special_iterators.rs +++ b/src/iter/special_iterators.rs @@ -1,4 +1,4 @@ -use crate::{computational_variants::Par, orch::DefaultOrchestrator}; +use crate::{computational_variants::Par, runner::DefaultOrchestrator}; use orx_concurrent_iter::implementations::ConIterEmpty; /// An empty parallel iterator which does not yield any elements. diff --git a/src/iter_into_par_iter.rs b/src/iter_into_par_iter.rs index 696b628..c13f2a3 100644 --- a/src/iter_into_par_iter.rs +++ b/src/iter_into_par_iter.rs @@ -1,4 +1,4 @@ -use crate::{Params, computational_variants::Par, orch::DefaultOrchestrator}; +use crate::{Params, computational_variants::Par, runner::DefaultOrchestrator}; use orx_concurrent_iter::{IterIntoConcurrentIter, implementations::ConIterOfIter}; /// Any regular iterator implements [`IterIntoParIter`] trait allowing them to be used diff --git a/src/lib.rs b/src/lib.rs index adcbbd3..14f694c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -30,8 +30,6 @@ mod into_par_iter; /// Module for creating special iterators. pub mod iter; mod iter_into_par_iter; -/// Orchestrator for parallel execution and managing threads. -pub mod orch; mod par_iter; mod par_iter_option; mod par_iter_result; @@ -41,6 +39,8 @@ mod parallelizable; mod parallelizable_collection; mod parallelizable_collection_mut; mod parameters; +/// Orchestrator for parallel execution and managing threads. +pub mod runner; mod special_type_sets; /// Module defining parallel iterators with mutable access to values distributed to each thread. pub mod using; diff --git a/src/par_iter.rs b/src/par_iter.rs index 3f0642c..dfdd65f 100644 --- a/src/par_iter.rs +++ b/src/par_iter.rs @@ -1,6 +1,6 @@ use crate::ParIterResult; use crate::computational_variants::fallible_option::ParOption; -use crate::orch::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, Orchestrator}; use crate::par_iter_option::{IntoOption, ParIterOption}; use crate::par_iter_result::IntoResult; use crate::using::{UsingClone, UsingFun}; diff --git a/src/par_iter_option.rs b/src/par_iter_option.rs index 94e2df3..c3b86a3 100644 --- a/src/par_iter_option.rs +++ b/src/par_iter_option.rs @@ -1,5 +1,5 @@ use crate::default_fns::{map_count, reduce_sum, reduce_unit}; -use crate::orch::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, Orchestrator}; use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, Sum}; use core::cmp::Ordering; diff --git a/src/par_iter_result.rs b/src/par_iter_result.rs index fc336da..c2c112a 100644 --- a/src/par_iter_result.rs +++ b/src/par_iter_result.rs @@ -1,5 +1,5 @@ use crate::default_fns::{map_count, reduce_sum, reduce_unit}; -use crate::orch::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, Orchestrator}; use crate::{ChunkSize, IterationOrder, NumThreads, Sum}; use crate::{ParCollectInto, ParIter, generic_values::fallible_iterators::ResultOfIter}; use core::cmp::Ordering; diff --git a/src/par_thread_pool.rs b/src/par_thread_pool.rs index 5df34a8..105ad41 100644 --- a/src/par_thread_pool.rs +++ b/src/par_thread_pool.rs @@ -1,4 +1,4 @@ -use crate::{generic_values::runner_results::Fallibility, orch::NumSpawned}; +use crate::{generic_values::runner_results::Fallibility, runner::NumSpawned}; use alloc::vec::Vec; use core::num::NonZeroUsize; use orx_concurrent_bag::ConcurrentBag; diff --git a/src/parallel_drainable.rs b/src/parallel_drainable.rs index 70f8f1d..d291733 100644 --- a/src/parallel_drainable.rs +++ b/src/parallel_drainable.rs @@ -1,4 +1,4 @@ -use crate::{Params, computational_variants::Par, orch::DefaultOrchestrator}; +use crate::{Params, computational_variants::Par, runner::DefaultOrchestrator}; use core::ops::RangeBounds; use orx_concurrent_iter::ConcurrentDrainableOverSlice; diff --git a/src/parallelizable.rs b/src/parallelizable.rs index fc01aca..0c4f381 100644 --- a/src/parallelizable.rs +++ b/src/parallelizable.rs @@ -1,4 +1,4 @@ -use crate::{computational_variants::Par, orch::DefaultOrchestrator, parameters::Params}; +use crate::{computational_variants::Par, runner::DefaultOrchestrator, parameters::Params}; use orx_concurrent_iter::ConcurrentIterable; /// `Parallelizable` types are those from which parallel iterators can be created diff --git a/src/parallelizable_collection.rs b/src/parallelizable_collection.rs index 883dbcb..754d3bd 100644 --- a/src/parallelizable_collection.rs +++ b/src/parallelizable_collection.rs @@ -1,4 +1,4 @@ -use crate::{Params, computational_variants::Par, orch::DefaultOrchestrator}; +use crate::{Params, computational_variants::Par, runner::DefaultOrchestrator}; use orx_concurrent_iter::{ConcurrentCollection, ConcurrentIterable}; /// A type implementing [`ParallelizableCollection`] is a collection owning the elements such that diff --git a/src/parallelizable_collection_mut.rs b/src/parallelizable_collection_mut.rs index 063b546..2225836 100644 --- a/src/parallelizable_collection_mut.rs +++ b/src/parallelizable_collection_mut.rs @@ -1,6 +1,6 @@ use crate::{ ParIter, ParallelizableCollection, Params, computational_variants::Par, - orch::DefaultOrchestrator, + runner::DefaultOrchestrator, }; use orx_concurrent_iter::ConcurrentCollectionMut; diff --git a/src/orch/computation_kind.rs b/src/runner/computation_kind.rs similarity index 100% rename from src/orch/computation_kind.rs rename to src/runner/computation_kind.rs diff --git a/src/orch/implementations/default_std_orchestrator.rs b/src/runner/implementations/default_std_orchestrator.rs similarity index 97% rename from src/orch/implementations/default_std_orchestrator.rs rename to src/runner/implementations/default_std_orchestrator.rs index c5667d4..6d5d6aa 100644 --- a/src/orch/implementations/default_std_orchestrator.rs +++ b/src/runner/implementations/default_std_orchestrator.rs @@ -1,5 +1,5 @@ use crate::par_thread_pool::ParThreadPool; -use crate::{DefaultExecutor, orch::Orchestrator}; +use crate::{DefaultExecutor, runner::Orchestrator}; use core::num::NonZeroUsize; // POOL diff --git a/src/orch/implementations/mod.rs b/src/runner/implementations/mod.rs similarity index 100% rename from src/orch/implementations/mod.rs rename to src/runner/implementations/mod.rs diff --git a/src/orch/implementations/rayon.rs b/src/runner/implementations/rayon.rs similarity index 96% rename from src/orch/implementations/rayon.rs rename to src/runner/implementations/rayon.rs index 9da5960..9d05c5a 100644 --- a/src/orch/implementations/rayon.rs +++ b/src/runner/implementations/rayon.rs @@ -1,5 +1,5 @@ use crate::{ - DefaultExecutor, ParallelExecutor, orch::Orchestrator, par_thread_pool::ParThreadPool, + DefaultExecutor, ParallelExecutor, par_thread_pool::ParThreadPool, runner::Orchestrator, }; use core::{marker::PhantomData, num::NonZeroUsize}; use orx_self_or::SoR; diff --git a/src/orch/implementations/scoped_threadpool.rs b/src/runner/implementations/scoped_threadpool.rs similarity index 96% rename from src/orch/implementations/scoped_threadpool.rs rename to src/runner/implementations/scoped_threadpool.rs index 2dded51..480539c 100644 --- a/src/orch/implementations/scoped_threadpool.rs +++ b/src/runner/implementations/scoped_threadpool.rs @@ -1,5 +1,5 @@ use crate::{ - DefaultExecutor, ParallelExecutor, orch::Orchestrator, par_thread_pool::ParThreadPool, + DefaultExecutor, ParallelExecutor, par_thread_pool::ParThreadPool, runner::Orchestrator, }; use core::{marker::PhantomData, num::NonZeroUsize}; use orx_self_or::SoM; diff --git a/src/orch/implementations/tests/mod.rs b/src/runner/implementations/tests/mod.rs similarity index 100% rename from src/orch/implementations/tests/mod.rs rename to src/runner/implementations/tests/mod.rs diff --git a/src/orch/implementations/tests/rayon.rs b/src/runner/implementations/tests/rayon.rs similarity index 88% rename from src/orch/implementations/tests/rayon.rs rename to src/runner/implementations/tests/rayon.rs index 9f153e9..7475df6 100644 --- a/src/orch/implementations/tests/rayon.rs +++ b/src/runner/implementations/tests/rayon.rs @@ -1,5 +1,5 @@ use super::run_map; -use crate::{IterationOrder, orch::implementations::RayonOrchestrator}; +use crate::{IterationOrder, runner::implementations::RayonOrchestrator}; use test_case::test_matrix; #[cfg(miri)] diff --git a/src/orch/implementations/tests/scoped_threadpool.rs b/src/runner/implementations/tests/scoped_threadpool.rs similarity index 86% rename from src/orch/implementations/tests/scoped_threadpool.rs rename to src/runner/implementations/tests/scoped_threadpool.rs index bb553de..5817a27 100644 --- a/src/orch/implementations/tests/scoped_threadpool.rs +++ b/src/runner/implementations/tests/scoped_threadpool.rs @@ -1,5 +1,5 @@ use super::run_map; -use crate::{IterationOrder, orch::implementations::ScopedThreadPoolOrchestrator}; +use crate::{IterationOrder, runner::implementations::ScopedThreadPoolOrchestrator}; use scoped_threadpool::Pool; use test_case::test_matrix; diff --git a/src/orch/implementations/tests/utils.rs b/src/runner/implementations/tests/utils.rs similarity index 93% rename from src/orch/implementations/tests/utils.rs rename to src/runner/implementations/tests/utils.rs index 31b3637..dfad0d7 100644 --- a/src/orch/implementations/tests/utils.rs +++ b/src/runner/implementations/tests/utils.rs @@ -1,4 +1,4 @@ -use crate::{IntoParIter, IterationOrder, ParIter, orch::Orchestrator}; +use crate::{IntoParIter, IterationOrder, ParIter, runner::Orchestrator}; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; diff --git a/src/orch/mod.rs b/src/runner/mod.rs similarity index 82% rename from src/orch/mod.rs rename to src/runner/mod.rs index 9d50e70..d130667 100644 --- a/src/orch/mod.rs +++ b/src/runner/mod.rs @@ -5,7 +5,7 @@ mod orchestrator; pub(crate) use orchestrator::{SharedStateOf, ThreadRunnerOf}; -pub use crate::orch::implementations::DefaultStdOrchestrator; +pub use crate::runner::implementations::DefaultStdOrchestrator; pub use computation_kind::ComputationKind; pub use num_spawned::NumSpawned; pub use orchestrator::Orchestrator; diff --git a/src/orch/num_spawned.rs b/src/runner/num_spawned.rs similarity index 100% rename from src/orch/num_spawned.rs rename to src/runner/num_spawned.rs diff --git a/src/orch/orchestrator.rs b/src/runner/orchestrator.rs similarity index 99% rename from src/orch/orchestrator.rs rename to src/runner/orchestrator.rs index 8616ae4..32bfcc3 100644 --- a/src/orch/orchestrator.rs +++ b/src/runner/orchestrator.rs @@ -1,8 +1,8 @@ use crate::{ NumThreads, ParallelExecutor, Params, generic_values::runner_results::{Fallibility, Infallible, Never}, - orch::{ComputationKind, NumSpawned}, par_thread_pool::{ParThreadPool, ParThreadPoolCompute}, + runner::{ComputationKind, NumSpawned}, }; use alloc::vec::Vec; use core::num::NonZeroUsize; diff --git a/src/using/collect_into/collect.rs b/src/using/collect_into/collect.rs index 8eda27d..9388be4 100644 --- a/src/using/collect_into/collect.rs +++ b/src/using/collect_into/collect.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::generic_values::runner_results::{ Infallible, ParallelCollect, ParallelCollectArbitrary, }; -use crate::orch::{NumSpawned, Orchestrator}; +use crate::runner::{NumSpawned, Orchestrator}; use crate::using::executor::parallel_compute as prc; use crate::using::using_variants::Using; use crate::{IterationOrder, generic_values::Values}; diff --git a/src/using/collect_into/fixed_vec.rs b/src/using/collect_into/fixed_vec.rs index 54b958d..f6aaaa4 100644 --- a/src/using/collect_into/fixed_vec.rs +++ b/src/using/collect_into/fixed_vec.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; -use crate::orch::Orchestrator; +use crate::runner::Orchestrator; use crate::using::collect_into::u_par_collect_into::UParCollectIntoCore; use alloc::vec::Vec; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/collect_into/split_vec.rs b/src/using/collect_into/split_vec.rs index 47e1bf7..6e62640 100644 --- a/src/using/collect_into/split_vec.rs +++ b/src/using/collect_into/split_vec.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::collect_into::utils::split_vec_reserve; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; -use crate::orch::Orchestrator; +use crate::runner::Orchestrator; use crate::using::collect_into::collect::{map_collect_into, xap_collect_into}; use crate::using::collect_into::u_par_collect_into::UParCollectIntoCore; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/collect_into/u_par_collect_into.rs b/src/using/collect_into/u_par_collect_into.rs index f4f113a..f346e36 100644 --- a/src/using/collect_into/u_par_collect_into.rs +++ b/src/using/collect_into/u_par_collect_into.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::collect_into::ParCollectIntoCore; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; -use crate::orch::Orchestrator; +use crate::runner::Orchestrator; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/collect_into/vec.rs b/src/using/collect_into/vec.rs index 52d70df..edfae42 100644 --- a/src/using/collect_into/vec.rs +++ b/src/using/collect_into/vec.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::collect_into::utils::extend_vec_from_split; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; -use crate::orch::Orchestrator; +use crate::runner::Orchestrator; use crate::using::collect_into::collect::map_collect_into; use crate::using::collect_into::u_par_collect_into::UParCollectIntoCore; use crate::using::using_variants::Using; diff --git a/src/using/computational_variants/u_map.rs b/src/using/computational_variants/u_map.rs index 399e1d1..3c65550 100644 --- a/src/using/computational_variants/u_map.rs +++ b/src/using/computational_variants/u_map.rs @@ -1,6 +1,6 @@ use crate::ParIterUsing; use crate::generic_values::Vector; -use crate::orch::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, Orchestrator}; use crate::using::computational_variants::u_xap::UParXap; use crate::using::executor::parallel_compute as prc; use crate::using::using_variants::Using; diff --git a/src/using/computational_variants/u_par.rs b/src/using/computational_variants/u_par.rs index 7f64e77..a5c8737 100644 --- a/src/using/computational_variants/u_par.rs +++ b/src/using/computational_variants/u_par.rs @@ -1,7 +1,7 @@ use crate::ParIterUsing; use crate::default_fns::u_map_self; use crate::generic_values::Vector; -use crate::orch::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, Orchestrator}; use crate::using::computational_variants::u_map::UParMap; use crate::using::computational_variants::u_xap::UParXap; use crate::using::executor::parallel_compute as prc; diff --git a/src/using/computational_variants/u_xap.rs b/src/using/computational_variants/u_xap.rs index 705b455..ef76a48 100644 --- a/src/using/computational_variants/u_xap.rs +++ b/src/using/computational_variants/u_xap.rs @@ -2,7 +2,7 @@ use crate::using::executor::parallel_compute as prc; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIterUsing, Params, generic_values::{TransformableValues, runner_results::Infallible}, - orch::{DefaultOrchestrator, Orchestrator}, + runner::{DefaultOrchestrator, Orchestrator}, using::using_variants::Using, }; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/executor/parallel_compute/collect_arbitrary.rs b/src/using/executor/parallel_compute/collect_arbitrary.rs index 2a378c1..4e969e7 100644 --- a/src/using/executor/parallel_compute/collect_arbitrary.rs +++ b/src/using/executor/parallel_compute/collect_arbitrary.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::ParallelCollectArbitrary; -use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; +use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_bag::ConcurrentBag; diff --git a/src/using/executor/parallel_compute/collect_ordered.rs b/src/using/executor/parallel_compute/collect_ordered.rs index 4249be3..20bc943 100644 --- a/src/using/executor/parallel_compute/collect_ordered.rs +++ b/src/using/executor/parallel_compute/collect_ordered.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect}; -use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; +use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/executor/parallel_compute/next.rs b/src/using/executor/parallel_compute/next.rs index 8f54df1..d1e4278 100644 --- a/src/using/executor/parallel_compute/next.rs +++ b/src/using/executor/parallel_compute/next.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; -use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf}; +use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf}; use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/executor/parallel_compute/next_any.rs b/src/using/executor/parallel_compute/next_any.rs index 2084bb6..4676023 100644 --- a/src/using/executor/parallel_compute/next_any.rs +++ b/src/using/executor/parallel_compute/next_any.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; -use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf}; +use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf}; use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/executor/parallel_compute/reduce.rs b/src/using/executor/parallel_compute/reduce.rs index b327744..5c588e6 100644 --- a/src/using/executor/parallel_compute/reduce.rs +++ b/src/using/executor/parallel_compute/reduce.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; -use crate::orch::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; +use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/using/u_par_iter.rs b/src/using/u_par_iter.rs index d9abeed..06ce580 100644 --- a/src/using/u_par_iter.rs +++ b/src/using/u_par_iter.rs @@ -1,7 +1,7 @@ use crate::default_fns::*; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, Sum, - orch::{DefaultOrchestrator, Orchestrator}, + runner::{DefaultOrchestrator, Orchestrator}, using::using_variants::Using, }; use core::cmp::Ordering; From b02bf0e84b2d1b8ead5eec49832946ea7e11b226 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 15:16:42 +0200 Subject: [PATCH 201/264] renaming --- src/collect_into/collect.rs | 8 ++++---- src/collect_into/fixed_vec.rs | 8 ++++---- src/collect_into/par_collect_into.rs | 8 ++++---- src/collect_into/split_vec.rs | 8 ++++---- src/collect_into/vec.rs | 8 ++++---- src/computational_variants/fallible_option.rs | 10 +++++----- .../fallible_result/map_result.rs | 10 +++++----- .../fallible_result/par_result.rs | 12 ++++++------ .../fallible_result/xap_result.rs | 10 +++++----- src/computational_variants/map.rs | 14 +++++++------- src/computational_variants/par.rs | 16 ++++++++-------- src/computational_variants/xap.rs | 14 +++++++------- .../parallel_compute/collect_arbitrary.rs | 6 +++--- src/executor/parallel_compute/collect_ordered.rs | 6 +++--- src/executor/parallel_compute/next.rs | 6 +++--- src/executor/parallel_compute/next_any.rs | 6 +++--- src/executor/parallel_compute/reduce.rs | 6 +++--- src/lib.rs | 2 +- src/par_iter.rs | 6 +++--- src/par_iter_option.rs | 8 ++++---- src/par_iter_result.rs | 8 ++++---- .../implementations/default_std_orchestrator.rs | 4 ++-- src/runner/implementations/rayon.rs | 4 ++-- src/runner/implementations/scoped_threadpool.rs | 4 ++-- src/runner/implementations/tests/utils.rs | 4 ++-- src/runner/mod.rs | 6 +++--- .../{orchestrator.rs => parallel_runner.rs} | 10 +++++----- src/using/collect_into/collect.rs | 6 +++--- src/using/collect_into/fixed_vec.rs | 6 +++--- src/using/collect_into/split_vec.rs | 6 +++--- src/using/collect_into/u_par_collect_into.rs | 6 +++--- src/using/collect_into/vec.rs | 6 +++--- src/using/computational_variants/u_map.rs | 14 +++++++------- src/using/computational_variants/u_par.rs | 14 +++++++------- src/using/computational_variants/u_xap.rs | 14 +++++++------- .../parallel_compute/collect_arbitrary.rs | 6 +++--- .../executor/parallel_compute/collect_ordered.rs | 6 +++--- src/using/executor/parallel_compute/next.rs | 6 +++--- src/using/executor/parallel_compute/next_any.rs | 6 +++--- src/using/executor/parallel_compute/reduce.rs | 6 +++--- src/using/u_par_iter.rs | 8 ++++---- 41 files changed, 161 insertions(+), 161 deletions(-) rename src/runner/{orchestrator.rs => parallel_runner.rs} (93%) diff --git a/src/collect_into/collect.rs b/src/collect_into/collect.rs index d08862e..da7a585 100644 --- a/src/collect_into/collect.rs +++ b/src/collect_into/collect.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::generic_values::runner_results::{ Fallibility, Infallible, ParallelCollect, ParallelCollectArbitrary, Stop, }; -use crate::runner::{NumSpawned, Orchestrator}; +use crate::runner::{NumSpawned, ParallelRunner}; use crate::executor::parallel_compute as prc; use crate::{IterationOrder, generic_values::Values}; use orx_concurrent_iter::ConcurrentIter; @@ -16,7 +16,7 @@ pub fn map_collect_into( pinned_vec: P, ) -> (NumSpawned, P) where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, O: Send, @@ -57,7 +57,7 @@ pub fn xap_collect_into( pinned_vec: P, ) -> (NumSpawned, P) where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: Values, Vo::Item: Send, @@ -123,7 +123,7 @@ pub fn xap_try_collect_into( Result::Error>, ) where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: Values, Vo::Item: Send, diff --git a/src/collect_into/fixed_vec.rs b/src/collect_into/fixed_vec.rs index 2402161..8b3905d 100644 --- a/src/collect_into/fixed_vec.rs +++ b/src/collect_into/fixed_vec.rs @@ -2,7 +2,7 @@ use super::par_collect_into::ParCollectIntoCore; use crate::Params; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; -use crate::runner::Orchestrator; +use crate::runner::ParallelRunner; use alloc::vec::Vec; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; @@ -22,7 +22,7 @@ where fn m_collect_into(self, orchestrator: R, params: Params, iter: I, map1: M1) -> Self where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, O: Send, @@ -39,7 +39,7 @@ where xap1: X1, ) -> Self where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(I::Item) -> Vo + Sync, @@ -56,7 +56,7 @@ where xap1: X1, ) -> Result::Error> where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, X1: Fn(I::Item) -> Vo + Sync, Vo: Values, diff --git a/src/collect_into/par_collect_into.rs b/src/collect_into/par_collect_into.rs index 19cf859..58d8562 100644 --- a/src/collect_into/par_collect_into.rs +++ b/src/collect_into/par_collect_into.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; -use crate::runner::Orchestrator; +use crate::runner::ParallelRunner; use crate::using::UParCollectIntoCore; use orx_concurrent_iter::ConcurrentIter; use orx_iterable::Collection; @@ -14,7 +14,7 @@ pub trait ParCollectIntoCore: Collection { fn m_collect_into(self, orchestrator: R, params: Params, iter: I, map1: M1) -> Self where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync; @@ -26,7 +26,7 @@ pub trait ParCollectIntoCore: Collection { xap1: X1, ) -> Self where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(I::Item) -> Vo + Sync; @@ -39,7 +39,7 @@ pub trait ParCollectIntoCore: Collection { xap1: X1, ) -> Result::Error> where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, X1: Fn(I::Item) -> Vo + Sync, Vo: Values, diff --git a/src/collect_into/split_vec.rs b/src/collect_into/split_vec.rs index 61a4f5d..7d42b76 100644 --- a/src/collect_into/split_vec.rs +++ b/src/collect_into/split_vec.rs @@ -4,7 +4,7 @@ use crate::Params; use crate::collect_into::utils::split_vec_reserve; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; -use crate::runner::Orchestrator; +use crate::runner::ParallelRunner; use orx_concurrent_iter::ConcurrentIter; #[cfg(test)] use orx_pinned_vec::PinnedVec; @@ -32,7 +32,7 @@ where map1: M1, ) -> Self where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, O: Send, @@ -50,7 +50,7 @@ where xap1: X1, ) -> Self where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(I::Item) -> Vo + Sync, @@ -68,7 +68,7 @@ where xap1: X1, ) -> Result::Error> where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, X1: Fn(I::Item) -> Vo + Sync, Vo: Values, diff --git a/src/collect_into/vec.rs b/src/collect_into/vec.rs index 526bea3..880d1f0 100644 --- a/src/collect_into/vec.rs +++ b/src/collect_into/vec.rs @@ -4,7 +4,7 @@ use crate::collect_into::collect::map_collect_into; use crate::collect_into::utils::extend_vec_from_split; use crate::generic_values::runner_results::{Fallibility, Infallible}; use crate::generic_values::{TransformableValues, Values}; -use crate::runner::Orchestrator; +use crate::runner::ParallelRunner; use alloc::vec::Vec; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::FixedVec; @@ -31,7 +31,7 @@ where map1: M1, ) -> Self where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, O: Send, @@ -59,7 +59,7 @@ where xap1: X1, ) -> Self where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(I::Item) -> Vo + Sync, @@ -77,7 +77,7 @@ where xap1: X1, ) -> Result::Error> where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, X1: Fn(I::Item) -> Vo + Sync, Vo: Values, diff --git a/src/computational_variants/fallible_option.rs b/src/computational_variants/fallible_option.rs index fc29408..685fb96 100644 --- a/src/computational_variants/fallible_option.rs +++ b/src/computational_variants/fallible_option.rs @@ -1,6 +1,6 @@ use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIterResult, - runner::{DefaultOrchestrator, Orchestrator}, + runner::{DefaultOrchestrator, ParallelRunner}, par_iter_option::{ParIterOption, ResultIntoOption}, }; use core::marker::PhantomData; @@ -9,7 +9,7 @@ use core::marker::PhantomData; /// or fails and **early exits** with None. pub struct ParOption where - R: Orchestrator, + R: ParallelRunner, F: ParIterResult, { par: F, @@ -18,7 +18,7 @@ where impl ParOption where - R: Orchestrator, + R: ParallelRunner, F: ParIterResult, { pub(crate) fn new(par: F) -> Self { @@ -31,7 +31,7 @@ where impl ParIterOption for ParOption where - R: Orchestrator, + R: ParallelRunner, F: ParIterResult, { type Item = T; @@ -50,7 +50,7 @@ where Self::new(self.par.iteration_order(order)) } - fn with_runner( + fn with_runner( self, orchestrator: Q, ) -> impl ParIterOption { diff --git a/src/computational_variants/fallible_result/map_result.rs b/src/computational_variants/fallible_result/map_result.rs index a0e128f..94da016 100644 --- a/src/computational_variants/fallible_result/map_result.rs +++ b/src/computational_variants/fallible_result/map_result.rs @@ -1,5 +1,5 @@ use crate::computational_variants::ParMap; -use crate::runner::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, ParallelRunner}; use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::executor::parallel_compute as prc; use crate::{IterationOrder, ParCollectInto, ParIter}; @@ -10,7 +10,7 @@ use orx_concurrent_iter::ConcurrentIter; /// or fails and **early exits** with an error. pub struct ParMapResult where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, O: IntoResult, M1: Fn(I::Item) -> O + Sync, @@ -21,7 +21,7 @@ where impl ParMapResult where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, O: IntoResult, M1: Fn(I::Item) -> O + Sync, @@ -36,7 +36,7 @@ where impl ParIterResult for ParMapResult where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, O: IntoResult, M1: Fn(I::Item) -> O + Sync, @@ -66,7 +66,7 @@ where // params transformations - fn with_runner( + fn with_runner( self, orchestrator: Q, ) -> impl ParIterResult { diff --git a/src/computational_variants/fallible_result/par_result.rs b/src/computational_variants/fallible_result/par_result.rs index ed5acc8..6af62ab 100644 --- a/src/computational_variants/fallible_result/par_result.rs +++ b/src/computational_variants/fallible_result/par_result.rs @@ -1,7 +1,7 @@ use crate::computational_variants::Par; -use crate::runner::{DefaultOrchestrator, Orchestrator}; -use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::executor::parallel_compute as prc; +use crate::par_iter_result::{IntoResult, ParIterResult}; +use crate::runner::{DefaultOrchestrator, ParallelRunner}; use crate::{IterationOrder, ParCollectInto, ParIter}; use core::marker::PhantomData; use orx_concurrent_iter::ConcurrentIter; @@ -10,7 +10,7 @@ use orx_concurrent_iter::ConcurrentIter; /// or fails and **early exits** with an error. pub struct ParResult where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, I::Item: IntoResult, { @@ -20,7 +20,7 @@ where impl ParResult where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, I::Item: IntoResult, { @@ -34,7 +34,7 @@ where impl ParIterResult for ParResult where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, I::Item: IntoResult, { @@ -63,7 +63,7 @@ where // params transformations - fn with_runner( + fn with_runner( self, orchestrator: Q, ) -> impl ParIterResult { diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index a416aa7..2b880c4 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -1,7 +1,7 @@ use crate::computational_variants::ParXap; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; -use crate::runner::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, ParallelRunner}; use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::executor::parallel_compute as prc; use crate::{IterationOrder, ParCollectInto, Params}; @@ -12,7 +12,7 @@ use orx_concurrent_iter::ConcurrentIter; /// or fails and **early exits** with an error. pub struct ParXapResult where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, Vo::Item: IntoResult, @@ -27,7 +27,7 @@ where impl ParXapResult where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, Vo::Item: IntoResult, @@ -50,7 +50,7 @@ where impl ParIterResult for ParXapResult where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, Vo::Item: IntoResult, @@ -80,7 +80,7 @@ where // params transformations - fn with_runner( + fn with_runner( self, orchestrator: Q, ) -> impl ParIterResult { diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index 57364d4..2e49bf0 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -1,7 +1,7 @@ use super::xap::ParXap; use crate::computational_variants::fallible_result::ParMapResult; use crate::generic_values::{Vector, WhilstAtom}; -use crate::runner::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, ParallelRunner}; use crate::par_iter_result::IntoResult; use crate::executor::parallel_compute as prc; use crate::using::{UParMap, UsingClone, UsingFun}; @@ -12,7 +12,7 @@ use orx_concurrent_iter::ConcurrentIter; /// A parallel iterator that maps inputs. pub struct ParMap where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, { @@ -24,7 +24,7 @@ where impl ParMap where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, { @@ -44,7 +44,7 @@ where unsafe impl Send for ParMap where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, { @@ -52,7 +52,7 @@ where unsafe impl Sync for ParMap where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, { @@ -60,7 +60,7 @@ where impl ParIter for ParMap where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, { @@ -91,7 +91,7 @@ where self } - fn with_runner(self, orchestrator: Q) -> impl ParIter { + fn with_runner(self, orchestrator: Q) -> impl ParIter { let (_, params, iter, map) = self.destruct(); ParMap::new(orchestrator, params, iter, map) } diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 1cdebad..4395079 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -1,7 +1,7 @@ use super::{map::ParMap, xap::ParXap}; use crate::computational_variants::fallible_result::ParResult; use crate::generic_values::{Vector, WhilstAtom}; -use crate::runner::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, ParallelRunner}; use crate::par_iter_result::IntoResult; use crate::executor::parallel_compute as prc; use crate::using::{UPar, UsingClone, UsingFun}; @@ -15,7 +15,7 @@ use orx_concurrent_iter::{ConcurrentIter, ExactSizeConcurrentIter}; /// A parallel iterator. pub struct Par where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, { orchestrator: R, @@ -25,7 +25,7 @@ where impl Par where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, { pub(crate) fn new(orchestrator: R, params: Params, iter: I) -> Self { @@ -43,21 +43,21 @@ where unsafe impl Send for Par where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, { } unsafe impl Sync for Par where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, { } impl ParIter for Par where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, { type Item = I::Item; @@ -87,7 +87,7 @@ where self } - fn with_runner(self, orchestrator: Q) -> impl ParIter { + fn with_runner(self, orchestrator: Q) -> impl ParIter { Par::new(orchestrator, self.params, self.iter) } @@ -205,7 +205,7 @@ where impl Par where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, { /// Creates a chain of this and `other` parallel iterators. diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index 7dc8316..bcad192 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -1,7 +1,7 @@ use crate::computational_variants::fallible_result::ParXapResult; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; -use crate::runner::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, ParallelRunner}; use crate::par_iter_result::IntoResult; use crate::executor::parallel_compute as prc; use crate::using::{UParXap, UsingClone, UsingFun}; @@ -14,7 +14,7 @@ use orx_concurrent_iter::ConcurrentIter; /// *xap* is a generalization of one-to-one map, filter-map and flat-map operations. pub struct ParXap where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(I::Item) -> Vo + Sync, @@ -27,7 +27,7 @@ where impl ParXap where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(I::Item) -> Vo + Sync, @@ -48,7 +48,7 @@ where unsafe impl Send for ParXap where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(I::Item) -> Vo + Sync, @@ -57,7 +57,7 @@ where unsafe impl Sync for ParXap where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(I::Item) -> Vo + Sync, @@ -66,7 +66,7 @@ where impl ParIter for ParXap where - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(I::Item) -> Vo + Sync, @@ -98,7 +98,7 @@ where self } - fn with_runner(self, orchestrator: Q) -> impl ParIter { + fn with_runner(self, orchestrator: Q) -> impl ParIter { let (_, params, iter, x1) = self.destruct(); ParXap::new(orchestrator, params, iter, x1) } diff --git a/src/executor/parallel_compute/collect_arbitrary.rs b/src/executor/parallel_compute/collect_arbitrary.rs index cbf904d..50deea3 100644 --- a/src/executor/parallel_compute/collect_arbitrary.rs +++ b/src/executor/parallel_compute/collect_arbitrary.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::executor::thread_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::ParallelCollectArbitrary; -use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; +use crate::runner::{ComputationKind, NumSpawned, ParallelRunner, SharedStateOf, ThreadRunnerOf}; use orx_concurrent_bag::ConcurrentBag; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; @@ -16,7 +16,7 @@ pub fn m( pinned_vec: P, ) -> (NumSpawned, P) where - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, O: Send, M1: Fn(I::Item) -> O + Sync, @@ -47,7 +47,7 @@ pub fn x( pinned_vec: P, ) -> (NumSpawned, ParallelCollectArbitrary) where - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, Vo: Values, Vo::Item: Send, diff --git a/src/executor/parallel_compute/collect_ordered.rs b/src/executor/parallel_compute/collect_ordered.rs index 113a44d..6633267 100644 --- a/src/executor/parallel_compute/collect_ordered.rs +++ b/src/executor/parallel_compute/collect_ordered.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::executor::thread_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect}; -use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; +use crate::runner::{ComputationKind, NumSpawned, ParallelRunner, SharedStateOf, ThreadRunnerOf}; use orx_concurrent_iter::ConcurrentIter; use orx_concurrent_ordered_bag::ConcurrentOrderedBag; use orx_fixed_vec::IntoConcurrentPinnedVec; @@ -15,7 +15,7 @@ pub fn m( pinned_vec: P, ) -> (NumSpawned, P) where - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, O: Send, M1: Fn(I::Item) -> O + Sync, @@ -41,7 +41,7 @@ pub fn x( pinned_vec: P, ) -> (NumSpawned, ParallelCollect) where - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, Vo: Values, Vo::Item: Send, diff --git a/src/executor/parallel_compute/next.rs b/src/executor/parallel_compute/next.rs index a73c824..9fc77a1 100644 --- a/src/executor/parallel_compute/next.rs +++ b/src/executor/parallel_compute/next.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::executor::thread_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; -use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf}; +use crate::runner::{ComputationKind, NumSpawned, ParallelRunner, SharedStateOf}; use orx_concurrent_iter::ConcurrentIter; pub fn m( @@ -12,7 +12,7 @@ pub fn m( map1: M1, ) -> (NumSpawned, Option) where - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, O: Send, M1: Fn(I::Item) -> O + Sync, @@ -45,7 +45,7 @@ pub fn x( xap1: X1, ) -> (NumSpawned, ResultNext) where - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, Vo: Values, Vo::Item: Send, diff --git a/src/executor/parallel_compute/next_any.rs b/src/executor/parallel_compute/next_any.rs index 2ec7638..49dae38 100644 --- a/src/executor/parallel_compute/next_any.rs +++ b/src/executor/parallel_compute/next_any.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::executor::thread_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; -use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf}; +use crate::runner::{ComputationKind, NumSpawned, ParallelRunner, SharedStateOf}; use orx_concurrent_iter::ConcurrentIter; pub fn m( @@ -12,7 +12,7 @@ pub fn m( map1: M1, ) -> (NumSpawned, Option) where - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, O: Send, M1: Fn(I::Item) -> O + Sync, @@ -39,7 +39,7 @@ pub fn x( xap1: X1, ) -> (NumSpawned, ResultNextAny) where - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, Vo: Values, Vo::Item: Send, diff --git a/src/executor/parallel_compute/reduce.rs b/src/executor/parallel_compute/reduce.rs index 4402589..ba9c0ba 100644 --- a/src/executor/parallel_compute/reduce.rs +++ b/src/executor/parallel_compute/reduce.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::executor::thread_compute as th; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; -use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; +use crate::runner::{ComputationKind, NumSpawned, ParallelRunner, SharedStateOf, ThreadRunnerOf}; use orx_concurrent_iter::ConcurrentIter; pub fn m( @@ -13,7 +13,7 @@ pub fn m( reduce: Red, ) -> (NumSpawned, Option) where - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, M1: Fn(I::Item) -> O + Sync, Red: Fn(O, O) -> O + Sync, @@ -43,7 +43,7 @@ pub fn x( reduce: Red, ) -> (NumSpawned, ResultReduce) where - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, Vo: Values, Vo::Item: Send, diff --git a/src/lib.rs b/src/lib.rs index 14f694c..4402fc9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -39,7 +39,7 @@ mod parallelizable; mod parallelizable_collection; mod parallelizable_collection_mut; mod parameters; -/// Orchestrator for parallel execution and managing threads. +/// ParallelRunner for parallel execution and managing threads. pub mod runner; mod special_type_sets; /// Module defining parallel iterators with mutable access to values distributed to each thread. diff --git a/src/par_iter.rs b/src/par_iter.rs index dfdd65f..3497dce 100644 --- a/src/par_iter.rs +++ b/src/par_iter.rs @@ -1,6 +1,6 @@ use crate::ParIterResult; use crate::computational_variants::fallible_option::ParOption; -use crate::runner::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, ParallelRunner}; use crate::par_iter_option::{IntoOption, ParIterOption}; use crate::par_iter_result::IntoResult; use crate::using::{UsingClone, UsingFun}; @@ -17,7 +17,7 @@ use orx_concurrent_iter::ConcurrentIter; /// Parallel iterator. pub trait ParIter: Sized + Send + Sync where - R: Orchestrator, + R: ParallelRunner, { /// Element type of the parallel iterator. type Item; @@ -262,7 +262,7 @@ where /// // uses the custom parallel runner MyParallelRunner: ParallelRunner /// let sum = inputs.par().with_runner::().sum(); /// ``` - fn with_runner(self, orchestrator: Q) -> impl ParIter; + fn with_runner(self, orchestrator: Q) -> impl ParIter; // using transformations diff --git a/src/par_iter_option.rs b/src/par_iter_option.rs index c3b86a3..3100b51 100644 --- a/src/par_iter_option.rs +++ b/src/par_iter_option.rs @@ -1,5 +1,5 @@ use crate::default_fns::{map_count, reduce_sum, reduce_unit}; -use crate::runner::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, ParallelRunner}; use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, Sum}; use core::cmp::Ordering; @@ -123,7 +123,7 @@ use core::cmp::Ordering; /// [`ParIter`]: crate::ParIter pub trait ParIterOption where - R: Orchestrator, + R: ParallelRunner, { /// Type of the success element, to be received as the Some variant iff the entire computation succeeds. type Item; @@ -156,10 +156,10 @@ where /// See [`IterationOrder`] and [`crate::ParIter::iteration_order`] for details. fn iteration_order(self, order: IterationOrder) -> Self; - /// Rather than the [`DefaultRunner`], uses the parallel runner `Q` which implements [`Orchestrator`]. + /// Rather than the [`DefaultRunner`], uses the parallel runner `Q` which implements [`ParallelRunner`]. /// /// See [`crate::ParIter::with_runner`] for details. - fn with_runner( + fn with_runner( self, orchestrator: Q, ) -> impl ParIterOption; diff --git a/src/par_iter_result.rs b/src/par_iter_result.rs index c2c112a..6be9cc3 100644 --- a/src/par_iter_result.rs +++ b/src/par_iter_result.rs @@ -1,5 +1,5 @@ use crate::default_fns::{map_count, reduce_sum, reduce_unit}; -use crate::runner::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, ParallelRunner}; use crate::{ChunkSize, IterationOrder, NumThreads, Sum}; use crate::{ParCollectInto, ParIter, generic_values::fallible_iterators::ResultOfIter}; use core::cmp::Ordering; @@ -131,7 +131,7 @@ use core::cmp::Ordering; /// [`ParIter`]: crate::ParIter pub trait ParIterResult where - R: Orchestrator, + R: ParallelRunner, { /// Type of the Ok element, to be received as the Ok variant iff the entire computation succeeds. type Item; @@ -197,10 +197,10 @@ where Self::from_regular_par(self.into_regular_par().iteration_order(order)) } - /// Rather than the [`DefaultRunner`], uses the parallel runner `Q` which implements [`Orchestrator`]. + /// Rather than the [`DefaultRunner`], uses the parallel runner `Q` which implements [`ParallelRunner`]. /// /// See [`ParIter::with_runner`] for details. - fn with_runner( + fn with_runner( self, orchestrator: Q, ) -> impl ParIterResult; diff --git a/src/runner/implementations/default_std_orchestrator.rs b/src/runner/implementations/default_std_orchestrator.rs index 6d5d6aa..a65d4cb 100644 --- a/src/runner/implementations/default_std_orchestrator.rs +++ b/src/runner/implementations/default_std_orchestrator.rs @@ -1,5 +1,5 @@ use crate::par_thread_pool::ParThreadPool; -use crate::{DefaultExecutor, runner::Orchestrator}; +use crate::{DefaultExecutor, runner::ParallelRunner}; use core::num::NonZeroUsize; // POOL @@ -61,7 +61,7 @@ impl ParThreadPool for StdDefaultPool { #[derive(Default)] pub struct DefaultStdOrchestrator(StdDefaultPool); -impl Orchestrator for DefaultStdOrchestrator { +impl ParallelRunner for DefaultStdOrchestrator { type Runner = DefaultExecutor; type ThreadPool = StdDefaultPool; diff --git a/src/runner/implementations/rayon.rs b/src/runner/implementations/rayon.rs index 9d05c5a..4fcc001 100644 --- a/src/runner/implementations/rayon.rs +++ b/src/runner/implementations/rayon.rs @@ -1,5 +1,5 @@ use crate::{ - DefaultExecutor, ParallelExecutor, par_thread_pool::ParThreadPool, runner::Orchestrator, + DefaultExecutor, ParallelExecutor, par_thread_pool::ParThreadPool, runner::ParallelRunner, }; use core::{marker::PhantomData, num::NonZeroUsize}; use orx_self_or::SoR; @@ -100,7 +100,7 @@ where } } -impl Orchestrator for RayonOrchestrator +impl ParallelRunner for RayonOrchestrator where R: ParallelExecutor, P: SoR + ParThreadPool, diff --git a/src/runner/implementations/scoped_threadpool.rs b/src/runner/implementations/scoped_threadpool.rs index 480539c..d106d8f 100644 --- a/src/runner/implementations/scoped_threadpool.rs +++ b/src/runner/implementations/scoped_threadpool.rs @@ -1,5 +1,5 @@ use crate::{ - DefaultExecutor, ParallelExecutor, par_thread_pool::ParThreadPool, runner::Orchestrator, + DefaultExecutor, ParallelExecutor, par_thread_pool::ParThreadPool, runner::ParallelRunner, }; use core::{marker::PhantomData, num::NonZeroUsize}; use orx_self_or::SoM; @@ -100,7 +100,7 @@ where } } -impl Orchestrator for ScopedThreadPoolOrchestrator +impl ParallelRunner for ScopedThreadPoolOrchestrator where R: ParallelExecutor, P: SoM + ParThreadPool, diff --git a/src/runner/implementations/tests/utils.rs b/src/runner/implementations/tests/utils.rs index dfad0d7..af9fdd9 100644 --- a/src/runner/implementations/tests/utils.rs +++ b/src/runner/implementations/tests/utils.rs @@ -1,11 +1,11 @@ -use crate::{IntoParIter, IterationOrder, ParIter, runner::Orchestrator}; +use crate::{IntoParIter, IterationOrder, ParIter, runner::ParallelRunner}; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; use orx_pinned_vec::PinnedVec; use orx_split_vec::SplitVec; -pub fn run_map(n: usize, chunk: usize, ordering: IterationOrder, mut orch: impl Orchestrator) { +pub fn run_map(n: usize, chunk: usize, ordering: IterationOrder, mut orch: impl ParallelRunner) { let offset = 33; let input: Vec<_> = (0..n).map(|x| x.to_string()).collect(); diff --git a/src/runner/mod.rs b/src/runner/mod.rs index d130667..2d16498 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -1,13 +1,13 @@ mod computation_kind; mod implementations; mod num_spawned; -mod orchestrator; +mod parallel_runner; -pub(crate) use orchestrator::{SharedStateOf, ThreadRunnerOf}; +pub(crate) use parallel_runner::{SharedStateOf, ThreadRunnerOf}; pub use crate::runner::implementations::DefaultStdOrchestrator; pub use computation_kind::ComputationKind; pub use num_spawned::NumSpawned; -pub use orchestrator::Orchestrator; +pub use parallel_runner::ParallelRunner; pub type DefaultOrchestrator = DefaultStdOrchestrator; diff --git a/src/runner/orchestrator.rs b/src/runner/parallel_runner.rs similarity index 93% rename from src/runner/orchestrator.rs rename to src/runner/parallel_runner.rs index 32bfcc3..5604055 100644 --- a/src/runner/orchestrator.rs +++ b/src/runner/parallel_runner.rs @@ -8,7 +8,7 @@ use alloc::vec::Vec; use core::num::NonZeroUsize; use orx_concurrent_iter::ConcurrentIter; -pub trait Orchestrator { +pub trait ParallelRunner { type Runner: ParallelExecutor; type ThreadPool: ParThreadPool; @@ -115,15 +115,15 @@ pub trait Orchestrator { } } -pub(crate) type SharedStateOf = <::Runner as ParallelExecutor>::SharedState; +pub(crate) type SharedStateOf = <::Runner as ParallelExecutor>::SharedState; pub(crate) type ThreadRunnerOf = - <::Runner as ParallelExecutor>::ThreadExecutor; + <::Runner as ParallelExecutor>::ThreadExecutor; // auto impl for &mut pool -impl<'a, O> Orchestrator for &'a mut O +impl<'a, O> ParallelRunner for &'a mut O where - O: Orchestrator, + O: ParallelRunner, { type Runner = O::Runner; diff --git a/src/using/collect_into/collect.rs b/src/using/collect_into/collect.rs index 9388be4..c80f0ba 100644 --- a/src/using/collect_into/collect.rs +++ b/src/using/collect_into/collect.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::generic_values::runner_results::{ Infallible, ParallelCollect, ParallelCollectArbitrary, }; -use crate::runner::{NumSpawned, Orchestrator}; +use crate::runner::{NumSpawned, ParallelRunner}; use crate::using::executor::parallel_compute as prc; use crate::using::using_variants::Using; use crate::{IterationOrder, generic_values::Values}; @@ -19,7 +19,7 @@ pub fn map_collect_into( ) -> (NumSpawned, P) where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, O: Send, @@ -64,7 +64,7 @@ pub fn xap_collect_into( ) -> (NumSpawned, P) where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: Values, Vo::Item: Send, diff --git a/src/using/collect_into/fixed_vec.rs b/src/using/collect_into/fixed_vec.rs index f6aaaa4..f478d20 100644 --- a/src/using/collect_into/fixed_vec.rs +++ b/src/using/collect_into/fixed_vec.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; -use crate::runner::Orchestrator; +use crate::runner::ParallelRunner; use crate::using::collect_into::u_par_collect_into::UParCollectIntoCore; use alloc::vec::Vec; use orx_concurrent_iter::ConcurrentIter; @@ -21,7 +21,7 @@ where ) -> Self where U: crate::using::using_variants::Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, { @@ -39,7 +39,7 @@ where ) -> Self where U: crate::using::using_variants::Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, diff --git a/src/using/collect_into/split_vec.rs b/src/using/collect_into/split_vec.rs index 6e62640..6b5466e 100644 --- a/src/using/collect_into/split_vec.rs +++ b/src/using/collect_into/split_vec.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::collect_into::utils::split_vec_reserve; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; -use crate::runner::Orchestrator; +use crate::runner::ParallelRunner; use crate::using::collect_into::collect::{map_collect_into, xap_collect_into}; use crate::using::collect_into::u_par_collect_into::UParCollectIntoCore; use orx_concurrent_iter::ConcurrentIter; @@ -24,7 +24,7 @@ where ) -> Self where U: crate::using::using_variants::Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, { @@ -43,7 +43,7 @@ where ) -> Self where U: crate::using::using_variants::Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, diff --git a/src/using/collect_into/u_par_collect_into.rs b/src/using/collect_into/u_par_collect_into.rs index f346e36..9932dbc 100644 --- a/src/using/collect_into/u_par_collect_into.rs +++ b/src/using/collect_into/u_par_collect_into.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::collect_into::ParCollectIntoCore; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; -use crate::runner::Orchestrator; +use crate::runner::ParallelRunner; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; @@ -17,7 +17,7 @@ pub trait UParCollectIntoCore: ParCollectIntoCore { ) -> Self where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync; @@ -31,7 +31,7 @@ pub trait UParCollectIntoCore: ParCollectIntoCore { ) -> Self where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(&mut U::Item, I::Item) -> Vo + Sync; diff --git a/src/using/collect_into/vec.rs b/src/using/collect_into/vec.rs index edfae42..301efd5 100644 --- a/src/using/collect_into/vec.rs +++ b/src/using/collect_into/vec.rs @@ -2,7 +2,7 @@ use crate::Params; use crate::collect_into::utils::extend_vec_from_split; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; -use crate::runner::Orchestrator; +use crate::runner::ParallelRunner; use crate::using::collect_into::collect::map_collect_into; use crate::using::collect_into::u_par_collect_into::UParCollectIntoCore; use crate::using::using_variants::Using; @@ -25,7 +25,7 @@ where ) -> Self where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, { @@ -55,7 +55,7 @@ where ) -> Self where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, diff --git a/src/using/computational_variants/u_map.rs b/src/using/computational_variants/u_map.rs index 3c65550..a78b641 100644 --- a/src/using/computational_variants/u_map.rs +++ b/src/using/computational_variants/u_map.rs @@ -1,6 +1,6 @@ use crate::ParIterUsing; use crate::generic_values::Vector; -use crate::runner::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, ParallelRunner}; use crate::using::computational_variants::u_xap::UParXap; use crate::using::executor::parallel_compute as prc; use crate::using::using_variants::Using; @@ -11,7 +11,7 @@ use orx_concurrent_iter::ConcurrentIter; pub struct UParMap where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, { @@ -25,7 +25,7 @@ where impl UParMap where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, { @@ -53,7 +53,7 @@ where unsafe impl Send for UParMap where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, { @@ -62,7 +62,7 @@ where unsafe impl Sync for UParMap where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, { @@ -71,7 +71,7 @@ where impl ParIterUsing for UParMap where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, { @@ -100,7 +100,7 @@ where self } - fn with_runner( + fn with_runner( self, orchestrator: Q, ) -> impl ParIterUsing { diff --git a/src/using/computational_variants/u_par.rs b/src/using/computational_variants/u_par.rs index a5c8737..d5328cd 100644 --- a/src/using/computational_variants/u_par.rs +++ b/src/using/computational_variants/u_par.rs @@ -1,7 +1,7 @@ use crate::ParIterUsing; use crate::default_fns::u_map_self; use crate::generic_values::Vector; -use crate::runner::{DefaultOrchestrator, Orchestrator}; +use crate::runner::{DefaultOrchestrator, ParallelRunner}; use crate::using::computational_variants::u_map::UParMap; use crate::using::computational_variants::u_xap::UParXap; use crate::using::executor::parallel_compute as prc; @@ -13,7 +13,7 @@ use orx_concurrent_iter::ConcurrentIter; pub struct UPar where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, { using: U, @@ -25,7 +25,7 @@ where impl UPar where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, { pub(crate) fn new(using: U, orchestrator: R, params: Params, iter: I) -> Self { @@ -45,7 +45,7 @@ where unsafe impl Send for UPar where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, { } @@ -53,7 +53,7 @@ where unsafe impl Sync for UPar where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, { } @@ -61,7 +61,7 @@ where impl ParIterUsing for UPar where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, { type Item = I::Item; @@ -89,7 +89,7 @@ where self } - fn with_runner( + fn with_runner( self, orchestrator: Q, ) -> impl ParIterUsing { diff --git a/src/using/computational_variants/u_xap.rs b/src/using/computational_variants/u_xap.rs index ef76a48..6924302 100644 --- a/src/using/computational_variants/u_xap.rs +++ b/src/using/computational_variants/u_xap.rs @@ -2,7 +2,7 @@ use crate::using::executor::parallel_compute as prc; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIterUsing, Params, generic_values::{TransformableValues, runner_results::Infallible}, - runner::{DefaultOrchestrator, Orchestrator}, + runner::{DefaultOrchestrator, ParallelRunner}, using::using_variants::Using, }; use orx_concurrent_iter::ConcurrentIter; @@ -11,7 +11,7 @@ use orx_concurrent_iter::ConcurrentIter; pub struct UParXap where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, @@ -26,7 +26,7 @@ where impl UParXap where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, @@ -55,7 +55,7 @@ where unsafe impl Send for UParXap where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, @@ -65,7 +65,7 @@ where unsafe impl Sync for UParXap where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, @@ -75,7 +75,7 @@ where impl ParIterUsing for UParXap where U: Using, - R: Orchestrator, + R: ParallelRunner, I: ConcurrentIter, Vo: TransformableValues, X1: Fn(&mut U::Item, I::Item) -> Vo + Sync, @@ -105,7 +105,7 @@ where self } - fn with_runner( + fn with_runner( self, orchestrator: Q, ) -> impl ParIterUsing { diff --git a/src/using/executor/parallel_compute/collect_arbitrary.rs b/src/using/executor/parallel_compute/collect_arbitrary.rs index 4e969e7..74851cd 100644 --- a/src/using/executor/parallel_compute/collect_arbitrary.rs +++ b/src/using/executor/parallel_compute/collect_arbitrary.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::ParallelCollectArbitrary; -use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; +use crate::runner::{ComputationKind, NumSpawned, ParallelRunner, SharedStateOf, ThreadRunnerOf}; use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_bag::ConcurrentBag; @@ -19,7 +19,7 @@ pub fn m( ) -> (NumSpawned, P) where U: Using, - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, O: Send, M1: Fn(&mut U::Item, I::Item) -> O + Sync, @@ -53,7 +53,7 @@ pub fn x( ) -> (NumSpawned, ParallelCollectArbitrary) where U: Using, - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, Vo: Values, Vo::Item: Send, diff --git a/src/using/executor/parallel_compute/collect_ordered.rs b/src/using/executor/parallel_compute/collect_ordered.rs index 20bc943..10ad818 100644 --- a/src/using/executor/parallel_compute/collect_ordered.rs +++ b/src/using/executor/parallel_compute/collect_ordered.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, ParallelCollect}; -use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; +use crate::runner::{ComputationKind, NumSpawned, ParallelRunner, SharedStateOf, ThreadRunnerOf}; use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; @@ -18,7 +18,7 @@ pub fn m( ) -> (NumSpawned, P) where U: Using, - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, O: Send, M1: Fn(&mut U::Item, I::Item) -> O + Sync, @@ -48,7 +48,7 @@ pub fn x( ) -> (NumSpawned, ParallelCollect) where U: Using, - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, Vo: Values, Vo::Item: Send, diff --git a/src/using/executor/parallel_compute/next.rs b/src/using/executor/parallel_compute/next.rs index d1e4278..7d44845 100644 --- a/src/using/executor/parallel_compute/next.rs +++ b/src/using/executor/parallel_compute/next.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::{Fallibility, NextSuccess, NextWithIdx}; -use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf}; +use crate::runner::{ComputationKind, NumSpawned, ParallelRunner, SharedStateOf}; use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; @@ -15,7 +15,7 @@ pub fn m( ) -> (NumSpawned, Option) where U: Using, - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, O: Send, M1: Fn(&mut U::Item, I::Item) -> O + Sync, @@ -51,7 +51,7 @@ pub fn x( ) -> (NumSpawned, ResultNext) where U: Using, - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, Vo: Values, Vo::Item: Send, diff --git a/src/using/executor/parallel_compute/next_any.rs b/src/using/executor/parallel_compute/next_any.rs index 4676023..7ff51bf 100644 --- a/src/using/executor/parallel_compute/next_any.rs +++ b/src/using/executor/parallel_compute/next_any.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; -use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf}; +use crate::runner::{ComputationKind, NumSpawned, ParallelRunner, SharedStateOf}; use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; @@ -15,7 +15,7 @@ pub fn m( ) -> (NumSpawned, Option) where U: Using, - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, O: Send, M1: Fn(&mut U::Item, I::Item) -> O + Sync, @@ -45,7 +45,7 @@ pub fn x( ) -> (NumSpawned, ResultNextAny) where U: Using, - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, Vo: Values, Vo::Item: Send, diff --git a/src/using/executor/parallel_compute/reduce.rs b/src/using/executor/parallel_compute/reduce.rs index 5c588e6..de20823 100644 --- a/src/using/executor/parallel_compute/reduce.rs +++ b/src/using/executor/parallel_compute/reduce.rs @@ -1,7 +1,7 @@ use crate::Params; use crate::generic_values::Values; use crate::generic_values::runner_results::Fallibility; -use crate::runner::{ComputationKind, NumSpawned, Orchestrator, SharedStateOf, ThreadRunnerOf}; +use crate::runner::{ComputationKind, NumSpawned, ParallelRunner, SharedStateOf, ThreadRunnerOf}; use crate::using::executor::thread_compute as th; use crate::using::using_variants::Using; use orx_concurrent_iter::ConcurrentIter; @@ -16,7 +16,7 @@ pub fn m( ) -> (NumSpawned, Option) where U: Using, - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, M1: Fn(&mut U::Item, I::Item) -> O + Sync, Red: Fn(&mut U::Item, O, O) -> O + Sync, @@ -54,7 +54,7 @@ pub fn x( ) -> (NumSpawned, ResultReduce) where U: Using, - C: Orchestrator, + C: ParallelRunner, I: ConcurrentIter, Vo: Values, Vo::Item: Send, diff --git a/src/using/u_par_iter.rs b/src/using/u_par_iter.rs index 06ce580..2f7746b 100644 --- a/src/using/u_par_iter.rs +++ b/src/using/u_par_iter.rs @@ -1,7 +1,7 @@ use crate::default_fns::*; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, Sum, - runner::{DefaultOrchestrator, Orchestrator}, + runner::{DefaultOrchestrator, ParallelRunner}, using::using_variants::Using, }; use core::cmp::Ordering; @@ -12,7 +12,7 @@ use orx_concurrent_iter::ConcurrentIter; /// Note that one variable will be created per thread used by the parallel computation. pub trait ParIterUsing: Sized + Send + Sync where - R: Orchestrator, + R: ParallelRunner, U: Using, { /// Element type of the parallel iterator. @@ -56,10 +56,10 @@ where /// See [crate::ParIter::iteration_order] for details. fn iteration_order(self, collect: IterationOrder) -> Self; - /// Rather than the [`DefaultOrchestrator`], uses the parallel runner `Q` which implements [`Orchestrator`]. + /// Rather than the [`DefaultOrchestrator`], uses the parallel runner `Q` which implements [`ParallelRunner`]. /// /// See [crate::ParIter::with_runner] for details. - fn with_runner( + fn with_runner( self, orchestrator: Q, ) -> impl ParIterUsing; From 5b01ee8d9e5e72bbe2efb4e0073bfcd522952a7e Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 15:21:40 +0200 Subject: [PATCH 202/264] renaming --- src/computational_variants/fallible_option.rs | 4 +-- .../fallible_result/map_result.rs | 4 +-- .../fallible_result/par_result.rs | 4 +-- .../fallible_result/xap_result.rs | 4 +-- src/computational_variants/map.rs | 4 +-- src/computational_variants/par.rs | 4 +-- .../tests/map/collect.rs | 4 +-- src/computational_variants/tests/map/find.rs | 6 ++--- .../tests/map/reduce.rs | 6 ++--- .../tests/xap/collect.rs | 8 +++--- src/computational_variants/tests/xap/find.rs | 6 ++--- .../tests/xap/reduce.rs | 6 ++--- src/computational_variants/xap.rs | 4 +-- src/into_par_iter.rs | 8 +++--- src/iter/special_iterators.rs | 4 +-- src/iter_into_par_iter.rs | 6 ++--- src/par_iter.rs | 4 +-- src/par_iter_option.rs | 4 +-- src/par_iter_result.rs | 4 +-- src/parallel_drainable.rs | 4 +-- src/parallelizable.rs | 4 +-- src/parallelizable_collection.rs | 4 +-- src/parallelizable_collection_mut.rs | 4 +-- src/runner/implementations/mod.rs | 4 +-- src/runner/implementations/rayon.rs | 2 +- .../implementations/scoped_threadpool.rs | 2 +- ...ault_std_orchestrator.rs => std_runner.rs} | 27 ++++++++++++++----- src/runner/mod.rs | 4 +-- src/using/computational_variants/u_map.rs | 4 +-- src/using/computational_variants/u_par.rs | 4 +-- src/using/computational_variants/u_xap.rs | 4 +-- src/using/u_par_iter.rs | 4 +-- 32 files changed, 89 insertions(+), 76 deletions(-) rename src/runner/implementations/{default_std_orchestrator.rs => std_runner.rs} (77%) diff --git a/src/computational_variants/fallible_option.rs b/src/computational_variants/fallible_option.rs index 685fb96..3ea5179 100644 --- a/src/computational_variants/fallible_option.rs +++ b/src/computational_variants/fallible_option.rs @@ -1,13 +1,13 @@ use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIterResult, - runner::{DefaultOrchestrator, ParallelRunner}, + runner::{DefaultRunner, ParallelRunner}, par_iter_option::{ParIterOption, ResultIntoOption}, }; use core::marker::PhantomData; /// A parallel iterator for which the computation either completely succeeds, /// or fails and **early exits** with None. -pub struct ParOption +pub struct ParOption where R: ParallelRunner, F: ParIterResult, diff --git a/src/computational_variants/fallible_result/map_result.rs b/src/computational_variants/fallible_result/map_result.rs index 94da016..99089f2 100644 --- a/src/computational_variants/fallible_result/map_result.rs +++ b/src/computational_variants/fallible_result/map_result.rs @@ -1,5 +1,5 @@ use crate::computational_variants::ParMap; -use crate::runner::{DefaultOrchestrator, ParallelRunner}; +use crate::runner::{DefaultRunner, ParallelRunner}; use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::executor::parallel_compute as prc; use crate::{IterationOrder, ParCollectInto, ParIter}; @@ -8,7 +8,7 @@ use orx_concurrent_iter::ConcurrentIter; /// A parallel iterator for which the computation either completely succeeds, /// or fails and **early exits** with an error. -pub struct ParMapResult +pub struct ParMapResult where R: ParallelRunner, I: ConcurrentIter, diff --git a/src/computational_variants/fallible_result/par_result.rs b/src/computational_variants/fallible_result/par_result.rs index 6af62ab..2a6296f 100644 --- a/src/computational_variants/fallible_result/par_result.rs +++ b/src/computational_variants/fallible_result/par_result.rs @@ -1,14 +1,14 @@ use crate::computational_variants::Par; use crate::executor::parallel_compute as prc; use crate::par_iter_result::{IntoResult, ParIterResult}; -use crate::runner::{DefaultOrchestrator, ParallelRunner}; +use crate::runner::{DefaultRunner, ParallelRunner}; use crate::{IterationOrder, ParCollectInto, ParIter}; use core::marker::PhantomData; use orx_concurrent_iter::ConcurrentIter; /// A parallel iterator for which the computation either completely succeeds, /// or fails and **early exits** with an error. -pub struct ParResult +pub struct ParResult where R: ParallelRunner, I: ConcurrentIter, diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index 2b880c4..cc8035f 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -1,7 +1,7 @@ use crate::computational_variants::ParXap; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; -use crate::runner::{DefaultOrchestrator, ParallelRunner}; +use crate::runner::{DefaultRunner, ParallelRunner}; use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::executor::parallel_compute as prc; use crate::{IterationOrder, ParCollectInto, Params}; @@ -10,7 +10,7 @@ use orx_concurrent_iter::ConcurrentIter; /// A parallel iterator for which the computation either completely succeeds, /// or fails and **early exits** with an error. -pub struct ParXapResult +pub struct ParXapResult where R: ParallelRunner, I: ConcurrentIter, diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index 2e49bf0..410f699 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -1,7 +1,7 @@ use super::xap::ParXap; use crate::computational_variants::fallible_result::ParMapResult; use crate::generic_values::{Vector, WhilstAtom}; -use crate::runner::{DefaultOrchestrator, ParallelRunner}; +use crate::runner::{DefaultRunner, ParallelRunner}; use crate::par_iter_result::IntoResult; use crate::executor::parallel_compute as prc; use crate::using::{UParMap, UsingClone, UsingFun}; @@ -10,7 +10,7 @@ use crate::{ParIterResult, ParIterUsing}; use orx_concurrent_iter::ConcurrentIter; /// A parallel iterator that maps inputs. -pub struct ParMap +pub struct ParMap where R: ParallelRunner, I: ConcurrentIter, diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 4395079..47e1baa 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -1,7 +1,7 @@ use super::{map::ParMap, xap::ParXap}; use crate::computational_variants::fallible_result::ParResult; use crate::generic_values::{Vector, WhilstAtom}; -use crate::runner::{DefaultOrchestrator, ParallelRunner}; +use crate::runner::{DefaultRunner, ParallelRunner}; use crate::par_iter_result::IntoResult; use crate::executor::parallel_compute as prc; use crate::using::{UPar, UsingClone, UsingFun}; @@ -13,7 +13,7 @@ use orx_concurrent_iter::chain::ChainKnownLenI; use orx_concurrent_iter::{ConcurrentIter, ExactSizeConcurrentIter}; /// A parallel iterator. -pub struct Par +pub struct Par where R: ParallelRunner, I: ConcurrentIter, diff --git a/src/computational_variants/tests/map/collect.rs b/src/computational_variants/tests/map/collect.rs index 571e53a..2eb13d9 100644 --- a/src/computational_variants/tests/map/collect.rs +++ b/src/computational_variants/tests/map/collect.rs @@ -1,5 +1,5 @@ use crate::collect_into::collect::map_collect_into; -use crate::{IterationOrder, Params, runner::DefaultOrchestrator}; +use crate::{IterationOrder, Params, runner::DefaultRunner}; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; @@ -38,7 +38,7 @@ fn m_map_collect(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { let params = Params::new(nt, chunk, ordering); let iter = input.into_con_iter(); let (_, mut output) = - map_collect_into(DefaultOrchestrator::default(), params, iter, map, output); + map_collect_into(DefaultRunner::default(), params, iter, map, output); if !params.is_sequential() && matches!(params.iteration_order, IterationOrder::Arbitrary) { expected.sort(); diff --git a/src/computational_variants/tests/map/find.rs b/src/computational_variants/tests/map/find.rs index b42da93..1daf76a 100644 --- a/src/computational_variants/tests/map/find.rs +++ b/src/computational_variants/tests/map/find.rs @@ -1,5 +1,5 @@ use crate::{ - Params, default_fns::map_self, executor::parallel_compute, runner::DefaultOrchestrator, + Params, default_fns::map_self, executor::parallel_compute, runner::DefaultRunner, }; use alloc::format; use alloc::string::{String, ToString}; @@ -26,7 +26,7 @@ fn m_find(n: usize, nt: usize, chunk: usize) { let iter = input.into_con_iter(); let output = - parallel_compute::next::m(DefaultOrchestrator::default(), params, iter, map_self).1; + parallel_compute::next::m(DefaultRunner::default(), params, iter, map_self).1; assert_eq!(expected, output); } @@ -43,7 +43,7 @@ fn m_map_find(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let output = parallel_compute::next::m(DefaultOrchestrator::default(), params, iter, map).1; + let output = parallel_compute::next::m(DefaultRunner::default(), params, iter, map).1; assert_eq!(expected, output); } diff --git a/src/computational_variants/tests/map/reduce.rs b/src/computational_variants/tests/map/reduce.rs index 79d9646..523a493 100644 --- a/src/computational_variants/tests/map/reduce.rs +++ b/src/computational_variants/tests/map/reduce.rs @@ -1,4 +1,4 @@ -use crate::{Params, default_fns::map_self, runner::DefaultOrchestrator, executor::parallel_compute}; +use crate::{Params, default_fns::map_self, runner::DefaultRunner, executor::parallel_compute}; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; @@ -27,7 +27,7 @@ fn m_reduce(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); let (_, output) = parallel_compute::reduce::m( - DefaultOrchestrator::default(), + DefaultRunner::default(), params, iter, map_self, @@ -55,7 +55,7 @@ fn m_map_reduce(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); let (_, output) = - parallel_compute::reduce::m(DefaultOrchestrator::default(), params, iter, map, reduce); + parallel_compute::reduce::m(DefaultRunner::default(), params, iter, map, reduce); assert_eq!(expected, output); } diff --git a/src/computational_variants/tests/xap/collect.rs b/src/computational_variants/tests/xap/collect.rs index d30ab69..c08ba4a 100644 --- a/src/computational_variants/tests/xap/collect.rs +++ b/src/computational_variants/tests/xap/collect.rs @@ -1,7 +1,7 @@ use crate::ParIter; use crate::computational_variants::ParXap; use crate::generic_values::Vector; -use crate::runner::DefaultOrchestrator; +use crate::runner::DefaultRunner; use crate::{IterationOrder, Params}; use alloc::format; use alloc::string::{String, ToString}; @@ -44,7 +44,7 @@ fn todo_panic_at_con_bag_new() { let params = Params::new(nt, chunk, ordering); let iter = input.into_con_iter(); - let x = ParXap::new(DefaultOrchestrator::default(), params, iter, xmap); + let x = ParXap::new(DefaultRunner::default(), params, iter, xmap); let mut output = x.collect_into(output); @@ -83,7 +83,7 @@ fn x_flat_map_collect(n: usize, nt: usize, chunk: usize, ordering: IterationOrde let params = Params::new(nt, chunk, ordering); let iter = input.into_con_iter(); - let x = ParXap::new(DefaultOrchestrator::default(), params, iter, xmap); + let x = ParXap::new(DefaultRunner::default(), params, iter, xmap); let mut output = x.collect_into(output); @@ -122,7 +122,7 @@ fn x_filter_map_collect(n: usize, nt: usize, chunk: usize, ordering: IterationOr let params = Params::new(nt, chunk, ordering); let iter = input.into_con_iter(); - let x = ParXap::new(DefaultOrchestrator::default(), params, iter, xmap); + let x = ParXap::new(DefaultRunner::default(), params, iter, xmap); let mut output = x.collect_into(output); diff --git a/src/computational_variants/tests/xap/find.rs b/src/computational_variants/tests/xap/find.rs index d7643b3..7fbb5c0 100644 --- a/src/computational_variants/tests/xap/find.rs +++ b/src/computational_variants/tests/xap/find.rs @@ -2,7 +2,7 @@ use crate::ParIter; use crate::Params; use crate::computational_variants::ParXap; use crate::generic_values::Vector; -use crate::runner::DefaultOrchestrator; +use crate::runner::DefaultRunner; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; @@ -28,7 +28,7 @@ fn x_flat_map_find(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let x = ParXap::new(DefaultOrchestrator::default(), params, iter, xmap); + let x = ParXap::new(DefaultRunner::default(), params, iter, xmap); let output = x.first(); @@ -49,7 +49,7 @@ fn x_filter_map_find(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let x = ParXap::new(DefaultOrchestrator::default(), params, iter, xmap); + let x = ParXap::new(DefaultRunner::default(), params, iter, xmap); let output = x.first(); diff --git a/src/computational_variants/tests/xap/reduce.rs b/src/computational_variants/tests/xap/reduce.rs index 463fa42..bf15ae1 100644 --- a/src/computational_variants/tests/xap/reduce.rs +++ b/src/computational_variants/tests/xap/reduce.rs @@ -2,7 +2,7 @@ use crate::ParIter; use crate::Params; use crate::computational_variants::ParXap; use crate::generic_values::Vector; -use crate::runner::DefaultOrchestrator; +use crate::runner::DefaultRunner; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; @@ -32,7 +32,7 @@ fn x_flat_map_reduce(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let x = ParXap::new(DefaultOrchestrator::default(), params, iter, xmap); + let x = ParXap::new(DefaultRunner::default(), params, iter, xmap); let output = x.reduce(reduce); @@ -57,7 +57,7 @@ fn x_filter_map_reduce(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let x = ParXap::new(DefaultOrchestrator::default(), params, iter, xmap); + let x = ParXap::new(DefaultRunner::default(), params, iter, xmap); let output = x.reduce(reduce); diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index bcad192..ac49b6f 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -1,7 +1,7 @@ use crate::computational_variants::fallible_result::ParXapResult; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; -use crate::runner::{DefaultOrchestrator, ParallelRunner}; +use crate::runner::{DefaultRunner, ParallelRunner}; use crate::par_iter_result::IntoResult; use crate::executor::parallel_compute as prc; use crate::using::{UParXap, UsingClone, UsingFun}; @@ -12,7 +12,7 @@ use orx_concurrent_iter::ConcurrentIter; /// A parallel iterator that xaps inputs. /// /// *xap* is a generalization of one-to-one map, filter-map and flat-map operations. -pub struct ParXap +pub struct ParXap where R: ParallelRunner, I: ConcurrentIter, diff --git a/src/into_par_iter.rs b/src/into_par_iter.rs index b5486a1..f545f50 100644 --- a/src/into_par_iter.rs +++ b/src/into_par_iter.rs @@ -1,4 +1,4 @@ -use crate::{Params, computational_variants::Par, runner::DefaultOrchestrator}; +use crate::{Params, computational_variants::Par, runner::DefaultRunner}; use orx_concurrent_iter::{ConcurrentIter, IntoConcurrentIter}; /// Trait to convert a source (collection or generator) into a parallel iterator; i.e., [`ParIter`], @@ -47,19 +47,19 @@ pub trait IntoParIter: IntoConcurrentIter { /// let range = 1..5; /// assert_eq!(range.into_par().max(), Some(4)); /// ``` - fn into_par(self) -> Par; + fn into_par(self) -> Par; } impl IntoParIter for I where I: IntoConcurrentIter, { - fn into_par(self) -> Par { + fn into_par(self) -> Par { Par::new(Default::default(), Params::default(), self.into_con_iter()) } } -impl IntoConcurrentIter for Par { +impl IntoConcurrentIter for Par { type Item = I::Item; type IntoIter = I; diff --git a/src/iter/special_iterators.rs b/src/iter/special_iterators.rs index 028ce6e..5970372 100644 --- a/src/iter/special_iterators.rs +++ b/src/iter/special_iterators.rs @@ -1,8 +1,8 @@ -use crate::{computational_variants::Par, runner::DefaultOrchestrator}; +use crate::{computational_variants::Par, runner::DefaultRunner}; use orx_concurrent_iter::implementations::ConIterEmpty; /// An empty parallel iterator which does not yield any elements. -pub type ParEmpty = Par, R>; +pub type ParEmpty = Par, R>; /// Creates an empty parallel iterator which does not yield any elements. pub fn empty() -> ParEmpty { diff --git a/src/iter_into_par_iter.rs b/src/iter_into_par_iter.rs index c13f2a3..42211d5 100644 --- a/src/iter_into_par_iter.rs +++ b/src/iter_into_par_iter.rs @@ -1,4 +1,4 @@ -use crate::{Params, computational_variants::Par, runner::DefaultOrchestrator}; +use crate::{Params, computational_variants::Par, runner::DefaultRunner}; use orx_concurrent_iter::{IterIntoConcurrentIter, implementations::ConIterOfIter}; /// Any regular iterator implements [`IterIntoParIter`] trait allowing them to be used @@ -116,7 +116,7 @@ pub trait IterIntoParIter: Iterator { /// /// assert_eq!(sum_evens, 3782); /// ``` - fn iter_into_par(self) -> Par, DefaultOrchestrator> + fn iter_into_par(self) -> Par, DefaultRunner> where Self: Sized, Self::Item: Send; @@ -127,7 +127,7 @@ where I: Iterator, I::Item: Send + Sync, { - fn iter_into_par(self) -> Par, DefaultOrchestrator> { + fn iter_into_par(self) -> Par, DefaultRunner> { Par::new( Default::default(), Params::default(), diff --git a/src/par_iter.rs b/src/par_iter.rs index 3497dce..1b5d455 100644 --- a/src/par_iter.rs +++ b/src/par_iter.rs @@ -1,6 +1,6 @@ use crate::ParIterResult; use crate::computational_variants::fallible_option::ParOption; -use crate::runner::{DefaultOrchestrator, ParallelRunner}; +use crate::runner::{DefaultRunner, ParallelRunner}; use crate::par_iter_option::{IntoOption, ParIterOption}; use crate::par_iter_result::IntoResult; use crate::using::{UsingClone, UsingFun}; @@ -15,7 +15,7 @@ use core::cmp::Ordering; use orx_concurrent_iter::ConcurrentIter; /// Parallel iterator. -pub trait ParIter: Sized + Send + Sync +pub trait ParIter: Sized + Send + Sync where R: ParallelRunner, { diff --git a/src/par_iter_option.rs b/src/par_iter_option.rs index 3100b51..c948941 100644 --- a/src/par_iter_option.rs +++ b/src/par_iter_option.rs @@ -1,5 +1,5 @@ use crate::default_fns::{map_count, reduce_sum, reduce_unit}; -use crate::runner::{DefaultOrchestrator, ParallelRunner}; +use crate::runner::{DefaultRunner, ParallelRunner}; use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, Sum}; use core::cmp::Ordering; @@ -121,7 +121,7 @@ use core::cmp::Ordering; /// ``` /// /// [`ParIter`]: crate::ParIter -pub trait ParIterOption +pub trait ParIterOption where R: ParallelRunner, { diff --git a/src/par_iter_result.rs b/src/par_iter_result.rs index 6be9cc3..35b2a63 100644 --- a/src/par_iter_result.rs +++ b/src/par_iter_result.rs @@ -1,5 +1,5 @@ use crate::default_fns::{map_count, reduce_sum, reduce_unit}; -use crate::runner::{DefaultOrchestrator, ParallelRunner}; +use crate::runner::{DefaultRunner, ParallelRunner}; use crate::{ChunkSize, IterationOrder, NumThreads, Sum}; use crate::{ParCollectInto, ParIter, generic_values::fallible_iterators::ResultOfIter}; use core::cmp::Ordering; @@ -129,7 +129,7 @@ use core::cmp::Ordering; /// ``` /// /// [`ParIter`]: crate::ParIter -pub trait ParIterResult +pub trait ParIterResult where R: ParallelRunner, { diff --git a/src/parallel_drainable.rs b/src/parallel_drainable.rs index d291733..d889f83 100644 --- a/src/parallel_drainable.rs +++ b/src/parallel_drainable.rs @@ -1,4 +1,4 @@ -use crate::{Params, computational_variants::Par, runner::DefaultOrchestrator}; +use crate::{Params, computational_variants::Par, runner::DefaultRunner}; use core::ops::RangeBounds; use orx_concurrent_iter::ConcurrentDrainableOverSlice; @@ -46,7 +46,7 @@ pub trait ParallelDrainableOverSlice: ConcurrentDrainableOverSlice { fn par_drain( &mut self, range: R, - ) -> Par<::DrainingIter<'_>, DefaultOrchestrator> + ) -> Par<::DrainingIter<'_>, DefaultRunner> where R: RangeBounds, { diff --git a/src/parallelizable.rs b/src/parallelizable.rs index 0c4f381..47d1d65 100644 --- a/src/parallelizable.rs +++ b/src/parallelizable.rs @@ -1,4 +1,4 @@ -use crate::{computational_variants::Par, runner::DefaultOrchestrator, parameters::Params}; +use crate::{computational_variants::Par, parameters::Params, runner::DefaultRunner}; use orx_concurrent_iter::ConcurrentIterable; /// `Parallelizable` types are those from which parallel iterators can be created @@ -61,7 +61,7 @@ pub trait Parallelizable: ConcurrentIterable { /// assert_eq!(range.par().sum(), 10); /// assert_eq!(range.par().max(), Some(4)); /// ``` - fn par(&self) -> Par<::Iter, DefaultOrchestrator> { + fn par(&self) -> Par<::Iter, DefaultRunner> { Par::new(Default::default(), Params::default(), self.con_iter()) } } diff --git a/src/parallelizable_collection.rs b/src/parallelizable_collection.rs index 754d3bd..82992d2 100644 --- a/src/parallelizable_collection.rs +++ b/src/parallelizable_collection.rs @@ -1,4 +1,4 @@ -use crate::{Params, computational_variants::Par, runner::DefaultOrchestrator}; +use crate::{Params, computational_variants::Par, runner::DefaultRunner}; use orx_concurrent_iter::{ConcurrentCollection, ConcurrentIterable}; /// A type implementing [`ParallelizableCollection`] is a collection owning the elements such that @@ -75,7 +75,7 @@ pub trait ParallelizableCollection: ConcurrentCollection { &self, ) -> Par< <::Iterable<'_> as ConcurrentIterable>::Iter, - DefaultOrchestrator, + DefaultRunner, > { Par::new(Default::default(), Params::default(), self.con_iter()) } diff --git a/src/parallelizable_collection_mut.rs b/src/parallelizable_collection_mut.rs index 2225836..2c6a664 100644 --- a/src/parallelizable_collection_mut.rs +++ b/src/parallelizable_collection_mut.rs @@ -1,6 +1,6 @@ use crate::{ ParIter, ParallelizableCollection, Params, computational_variants::Par, - runner::DefaultOrchestrator, + runner::DefaultRunner, }; use orx_concurrent_iter::ConcurrentCollectionMut; @@ -60,7 +60,7 @@ pub trait ParallelizableCollectionMut: ConcurrentCollectionMut + ParallelizableC /// /// assert_eq!(&vec, &[1, 2, 13, 14]); /// ``` - fn par_mut(&mut self) -> impl ParIter { + fn par_mut(&mut self) -> impl ParIter { Par::new(Default::default(), Params::default(), self.con_iter_mut()) } } diff --git a/src/runner/implementations/mod.rs b/src/runner/implementations/mod.rs index 7be1def..2bf60c3 100644 --- a/src/runner/implementations/mod.rs +++ b/src/runner/implementations/mod.rs @@ -2,9 +2,9 @@ mod tests; #[cfg(feature = "std")] -mod default_std_orchestrator; +mod std_runner; #[cfg(feature = "std")] -pub use default_std_orchestrator::DefaultStdOrchestrator; +pub use std_runner::StdRunner; #[cfg(feature = "rayon")] mod rayon; diff --git a/src/runner/implementations/rayon.rs b/src/runner/implementations/rayon.rs index 4fcc001..940e28c 100644 --- a/src/runner/implementations/rayon.rs +++ b/src/runner/implementations/rayon.rs @@ -65,7 +65,7 @@ impl<'a> ParThreadPool for &'a rayon::ThreadPool { } } -// ORCH +// RUNNER pub struct RayonOrchestrator where diff --git a/src/runner/implementations/scoped_threadpool.rs b/src/runner/implementations/scoped_threadpool.rs index d106d8f..fcad047 100644 --- a/src/runner/implementations/scoped_threadpool.rs +++ b/src/runner/implementations/scoped_threadpool.rs @@ -65,7 +65,7 @@ impl<'a> ParThreadPool for &'a mut Pool { } } -// ORCH +// RUNNER pub struct ScopedThreadPoolOrchestrator where diff --git a/src/runner/implementations/default_std_orchestrator.rs b/src/runner/implementations/std_runner.rs similarity index 77% rename from src/runner/implementations/default_std_orchestrator.rs rename to src/runner/implementations/std_runner.rs index a65d4cb..918f149 100644 --- a/src/runner/implementations/default_std_orchestrator.rs +++ b/src/runner/implementations/std_runner.rs @@ -1,5 +1,7 @@ +use crate::ParallelExecutor; use crate::par_thread_pool::ParThreadPool; use crate::{DefaultExecutor, runner::ParallelRunner}; +use core::marker::PhantomData; use core::num::NonZeroUsize; // POOL @@ -56,21 +58,32 @@ impl ParThreadPool for StdDefaultPool { } } -// ORCH +// RUNNER -#[derive(Default)] -pub struct DefaultStdOrchestrator(StdDefaultPool); +pub struct StdRunner { + pool: StdDefaultPool, + executor: PhantomData, +} + +impl Default for StdRunner { + fn default() -> Self { + Self { + pool: Default::default(), + executor: PhantomData, + } + } +} -impl ParallelRunner for DefaultStdOrchestrator { - type Runner = DefaultExecutor; +impl ParallelRunner for StdRunner { + type Runner = E; type ThreadPool = StdDefaultPool; fn thread_pool(&self) -> &Self::ThreadPool { - &self.0 + &self.pool } fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { - &mut self.0 + &mut self.pool } } diff --git a/src/runner/mod.rs b/src/runner/mod.rs index 2d16498..4a0e1e6 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -5,9 +5,9 @@ mod parallel_runner; pub(crate) use parallel_runner::{SharedStateOf, ThreadRunnerOf}; -pub use crate::runner::implementations::DefaultStdOrchestrator; +pub use crate::runner::implementations::StdRunner; pub use computation_kind::ComputationKind; pub use num_spawned::NumSpawned; pub use parallel_runner::ParallelRunner; -pub type DefaultOrchestrator = DefaultStdOrchestrator; +pub type DefaultRunner = StdRunner; diff --git a/src/using/computational_variants/u_map.rs b/src/using/computational_variants/u_map.rs index a78b641..5eacb80 100644 --- a/src/using/computational_variants/u_map.rs +++ b/src/using/computational_variants/u_map.rs @@ -1,6 +1,6 @@ use crate::ParIterUsing; use crate::generic_values::Vector; -use crate::runner::{DefaultOrchestrator, ParallelRunner}; +use crate::runner::{DefaultRunner, ParallelRunner}; use crate::using::computational_variants::u_xap::UParXap; use crate::using::executor::parallel_compute as prc; use crate::using::using_variants::Using; @@ -8,7 +8,7 @@ use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params}; use orx_concurrent_iter::ConcurrentIter; /// A parallel iterator that maps inputs. -pub struct UParMap +pub struct UParMap where U: Using, R: ParallelRunner, diff --git a/src/using/computational_variants/u_par.rs b/src/using/computational_variants/u_par.rs index d5328cd..d9d1232 100644 --- a/src/using/computational_variants/u_par.rs +++ b/src/using/computational_variants/u_par.rs @@ -1,7 +1,7 @@ use crate::ParIterUsing; use crate::default_fns::u_map_self; use crate::generic_values::Vector; -use crate::runner::{DefaultOrchestrator, ParallelRunner}; +use crate::runner::{DefaultRunner, ParallelRunner}; use crate::using::computational_variants::u_map::UParMap; use crate::using::computational_variants::u_xap::UParXap; use crate::using::executor::parallel_compute as prc; @@ -10,7 +10,7 @@ use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params}; use orx_concurrent_iter::ConcurrentIter; /// A parallel iterator. -pub struct UPar +pub struct UPar where U: Using, R: ParallelRunner, diff --git a/src/using/computational_variants/u_xap.rs b/src/using/computational_variants/u_xap.rs index 6924302..c390683 100644 --- a/src/using/computational_variants/u_xap.rs +++ b/src/using/computational_variants/u_xap.rs @@ -2,13 +2,13 @@ use crate::using::executor::parallel_compute as prc; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIterUsing, Params, generic_values::{TransformableValues, runner_results::Infallible}, - runner::{DefaultOrchestrator, ParallelRunner}, + runner::{DefaultRunner, ParallelRunner}, using::using_variants::Using, }; use orx_concurrent_iter::ConcurrentIter; // use crate::runner::parallel_runner_compute as prc; -pub struct UParXap +pub struct UParXap where U: Using, R: ParallelRunner, diff --git a/src/using/u_par_iter.rs b/src/using/u_par_iter.rs index 2f7746b..66a222e 100644 --- a/src/using/u_par_iter.rs +++ b/src/using/u_par_iter.rs @@ -1,7 +1,7 @@ use crate::default_fns::*; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, Sum, - runner::{DefaultOrchestrator, ParallelRunner}, + runner::{DefaultRunner, ParallelRunner}, using::using_variants::Using, }; use core::cmp::Ordering; @@ -10,7 +10,7 @@ use orx_concurrent_iter::ConcurrentIter; /// Parallel iterator which allows mutable access to a variable of type `U` within its iterator methods. /// /// Note that one variable will be created per thread used by the parallel computation. -pub trait ParIterUsing: Sized + Send + Sync +pub trait ParIterUsing: Sized + Send + Sync where R: ParallelRunner, U: Using, From 5abecdd4c835fcac07584718fe04fa96fdd7ca91 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 15:23:38 +0200 Subject: [PATCH 203/264] renaming --- src/runner/implementations/mod.rs | 4 ++-- src/runner/implementations/rayon.rs | 8 ++++---- src/runner/implementations/scoped_threadpool.rs | 8 ++++---- src/runner/implementations/tests/rayon.rs | 4 ++-- src/runner/implementations/tests/scoped_threadpool.rs | 4 ++-- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/runner/implementations/mod.rs b/src/runner/implementations/mod.rs index 2bf60c3..13c6d46 100644 --- a/src/runner/implementations/mod.rs +++ b/src/runner/implementations/mod.rs @@ -9,9 +9,9 @@ pub use std_runner::StdRunner; #[cfg(feature = "rayon")] mod rayon; #[cfg(feature = "rayon")] -pub use rayon::RayonOrchestrator; +pub use rayon::RunnerWithRayonPool; #[cfg(feature = "scoped_threadpool")] mod scoped_threadpool; #[cfg(feature = "scoped_threadpool")] -pub use scoped_threadpool::ScopedThreadPoolOrchestrator; +pub use scoped_threadpool::RunnerWithScopedThreadPool; diff --git a/src/runner/implementations/rayon.rs b/src/runner/implementations/rayon.rs index 940e28c..5851226 100644 --- a/src/runner/implementations/rayon.rs +++ b/src/runner/implementations/rayon.rs @@ -67,7 +67,7 @@ impl<'a> ParThreadPool for &'a rayon::ThreadPool { // RUNNER -pub struct RayonOrchestrator +pub struct RunnerWithRayonPool where R: ParallelExecutor, P: SoR + ParThreadPool, @@ -76,7 +76,7 @@ where runner: PhantomData, } -impl From for RayonOrchestrator +impl From for RunnerWithRayonPool where R: ParallelExecutor, { @@ -88,7 +88,7 @@ where } } -impl<'a, R> From<&'a ThreadPool> for RayonOrchestrator<&'a ThreadPool, R> +impl<'a, R> From<&'a ThreadPool> for RunnerWithRayonPool<&'a ThreadPool, R> where R: ParallelExecutor, { @@ -100,7 +100,7 @@ where } } -impl ParallelRunner for RayonOrchestrator +impl ParallelRunner for RunnerWithRayonPool where R: ParallelExecutor, P: SoR + ParThreadPool, diff --git a/src/runner/implementations/scoped_threadpool.rs b/src/runner/implementations/scoped_threadpool.rs index fcad047..13ad2e4 100644 --- a/src/runner/implementations/scoped_threadpool.rs +++ b/src/runner/implementations/scoped_threadpool.rs @@ -67,7 +67,7 @@ impl<'a> ParThreadPool for &'a mut Pool { // RUNNER -pub struct ScopedThreadPoolOrchestrator +pub struct RunnerWithScopedThreadPool where R: ParallelExecutor, P: SoM + ParThreadPool, @@ -76,7 +76,7 @@ where runner: PhantomData, } -impl From for ScopedThreadPoolOrchestrator +impl From for RunnerWithScopedThreadPool where R: ParallelExecutor, { @@ -88,7 +88,7 @@ where } } -impl<'a, R> From<&'a mut Pool> for ScopedThreadPoolOrchestrator<&'a mut Pool, R> +impl<'a, R> From<&'a mut Pool> for RunnerWithScopedThreadPool<&'a mut Pool, R> where R: ParallelExecutor, { @@ -100,7 +100,7 @@ where } } -impl ParallelRunner for ScopedThreadPoolOrchestrator +impl ParallelRunner for RunnerWithScopedThreadPool where R: ParallelExecutor, P: SoM + ParThreadPool, diff --git a/src/runner/implementations/tests/rayon.rs b/src/runner/implementations/tests/rayon.rs index 7475df6..ebca673 100644 --- a/src/runner/implementations/tests/rayon.rs +++ b/src/runner/implementations/tests/rayon.rs @@ -1,5 +1,5 @@ use super::run_map; -use crate::{IterationOrder, runner::implementations::RayonOrchestrator}; +use crate::{IterationOrder, runner::implementations::RunnerWithRayonPool}; use test_case::test_matrix; #[cfg(miri)] @@ -18,6 +18,6 @@ fn pool_rayon_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { .num_threads(nt) .build() .unwrap(); - let orch: RayonOrchestrator<_> = (&pool).into(); + let orch: RunnerWithRayonPool<_> = (&pool).into(); run_map(n, chunk, ordering, orch); } diff --git a/src/runner/implementations/tests/scoped_threadpool.rs b/src/runner/implementations/tests/scoped_threadpool.rs index 5817a27..f2a492c 100644 --- a/src/runner/implementations/tests/scoped_threadpool.rs +++ b/src/runner/implementations/tests/scoped_threadpool.rs @@ -1,5 +1,5 @@ use super::run_map; -use crate::{IterationOrder, runner::implementations::ScopedThreadPoolOrchestrator}; +use crate::{IterationOrder, runner::implementations::RunnerWithScopedThreadPool}; use scoped_threadpool::Pool; use test_case::test_matrix; @@ -16,6 +16,6 @@ const N: [usize; 2] = [1025, 4735]; ] fn pool_scoped_threadpool_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { let mut pool = Pool::new(nt as u32); - let orch: ScopedThreadPoolOrchestrator<_> = (&mut pool).into(); + let orch: RunnerWithScopedThreadPool<_> = (&mut pool).into(); run_map(n, chunk, ordering, orch); } From 383c09acbc37a015c8a4d7a1426c8b69a139af82 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 15:56:41 +0200 Subject: [PATCH 204/264] document ParThreadPool --- Cargo.toml | 2 +- src/lib.rs | 8 ++ src/par_thread_pool.rs | 98 ++++++++++++++++--- src/runner/implementations/rayon.rs | 10 +- .../implementations/scoped_threadpool.rs | 10 +- src/runner/mod.rs | 10 +- 6 files changed, 107 insertions(+), 31 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9a8973b..4e2540e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,7 +41,7 @@ test-case = "3.3.1" name = "find_iter_into_par" harness = false -[package.metadata."docs.rs"] +[package.metadata.docs.rs] all-features = true [features] diff --git a/src/lib.rs b/src/lib.rs index 4402fc9..45e4f57 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -72,3 +72,11 @@ pub use parallelizable_collection_mut::ParallelizableCollectionMut; pub use parameters::{ChunkSize, IterationOrder, NumThreads, Params}; pub use special_type_sets::Sum; pub use using::ParIterUsing; + +pub use runner::DefaultRunner; +#[cfg(feature = "rayon")] +pub use runner::RunnerWithRayonPool; +#[cfg(feature = "scoped_threadpool")] +pub use runner::RunnerWithScopedThreadPool; +#[cfg(feature = "std")] +pub use runner::StdRunner; diff --git a/src/par_thread_pool.rs b/src/par_thread_pool.rs index 105ad41..a99b96f 100644 --- a/src/par_thread_pool.rs +++ b/src/par_thread_pool.rs @@ -5,27 +5,112 @@ use orx_concurrent_bag::ConcurrentBag; /// A thread pool that can be used for parallel computation. /// +/// orx_parallel abstracts away the thread pool implementation and can work with different +/// thread pool implementations. +/// +/// Parallel computation will not use any threads outside the pool. +/// Default std thread pool assumes all OS threads are available in the pool. +/// /// # Examples /// +/// ## Default std pool +/// +/// **requires std feature** +/// +/// Default parallel runner spawns scoped threads using `std::thread::scope`. +/// +/// ``` +/// use orx_parallel::*; +/// +/// let sum = (0..1000).par().sum(); +/// assert_eq!(sum, 1000 * 999 / 2); +/// +/// // this is equivalent to +/// let sum = (0..1000).par().with_runner(DefaultRunner::default()).sum(); +/// assert_eq!(sum, 1000 * 999 / 2); +/// ``` +/// +/// ## Rayon thread pool +/// +/// **requires rayon feature** +/// +/// The following example demonstrate using a rayon thread pool as the thread provider of +/// the parallel computation. +/// +/// ``` +/// use orx_parallel::*; +/// +/// #[cfg(feature = "rayon")] +/// { +/// let pool = rayon::ThreadPoolBuilder::new() +/// .num_threads(4) +/// .build() +/// .unwrap(); +/// +/// // creating a runner for the computation +/// let runner = RunnerWithRayonPool::from(&pool); +/// let sum = (0..1000).par().with_runner(runner).sum(); +/// assert_eq!(sum, 1000 * 999 / 2); +/// +/// // or reuse a runner multiple times (identical under the hood) +/// let mut runner = RunnerWithRayonPool::from(&pool); +/// let sum = (0..1000).par().with_runner(&mut runner).sum(); +/// assert_eq!(sum, 1000 * 999 / 2); +/// } /// ``` +/// +/// Note that since rayon::ThreadPool::scope only requires a shared reference `&self`, +/// we can create as many runners as we want from the same thread pool and use them concurrently. +/// +/// ## Scoped thread pool +/// +/// **requires scoped_threadpool feature** +/// +/// The following example demonstrate using a scoped_threadpool thread pool as the thread provider of +/// the parallel computation. +/// /// ``` +/// use orx_parallel::*; +/// +/// #[cfg(feature = "scoped_threadpool")] +/// { +/// // creating a runner for the computation +/// let mut pool = scoped_threadpool::Pool::new(4); +/// let runner = RunnerWithScopedThreadPool::from(&mut pool); +/// let sum = (0..1000).par().with_runner(runner).sum(); +/// assert_eq!(sum, 1000 * 999 / 2); +/// +/// // or reuse a runner multiple times (identical under the hood) +/// let mut pool = scoped_threadpool::Pool::new(4); +/// let mut runner = RunnerWithScopedThreadPool::from(&mut pool); +/// let sum = (0..1000).par().with_runner(&mut runner).sum(); +/// assert_eq!(sum, 1000 * 999 / 2); +/// } +/// ``` +/// +/// Since scoped_thread_pool::Pool::scoped requires an exclusive reference `&mut self`, +/// we can create one runner from a pool at a time, note use of `&mut pool` in runner creation. pub trait ParThreadPool { + /// Scope type of the thread pool. type ScopeRef<'s, 'env, 'scope> where 'scope: 's, 'env: 'scope + 's; + /// Executes the `work` within scope `s`. fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) where 'scope: 's, 'env: 'scope + 's, W: Fn() + Send + 'scope + 'env; + /// Executes the scoped computation `f`. fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) where 'env: 'scope, for<'s> F: FnOnce(Self::ScopeRef<'s, 'env, 'scope>) + Send; + /// Returns the maximum number of threads available in the pool. fn max_num_threads(&self) -> NonZeroUsize; } @@ -86,16 +171,3 @@ pub trait ParThreadPoolCompute: ParThreadPool { } impl ParThreadPoolCompute for X {} - -#[cfg(test)] -mod tsts { - use crate::*; - - #[test] - fn abc() { - let pool = rayon::ThreadPoolBuilder::new() - .num_threads(4) - .build() - .unwrap(); - } -} diff --git a/src/runner/implementations/rayon.rs b/src/runner/implementations/rayon.rs index 5851226..bcc776f 100644 --- a/src/runner/implementations/rayon.rs +++ b/src/runner/implementations/rayon.rs @@ -76,10 +76,7 @@ where runner: PhantomData, } -impl From for RunnerWithRayonPool -where - R: ParallelExecutor, -{ +impl From for RunnerWithRayonPool { fn from(pool: ThreadPool) -> Self { Self { pool, @@ -88,10 +85,7 @@ where } } -impl<'a, R> From<&'a ThreadPool> for RunnerWithRayonPool<&'a ThreadPool, R> -where - R: ParallelExecutor, -{ +impl<'a> From<&'a ThreadPool> for RunnerWithRayonPool<&'a ThreadPool, DefaultExecutor> { fn from(pool: &'a ThreadPool) -> Self { Self { pool, diff --git a/src/runner/implementations/scoped_threadpool.rs b/src/runner/implementations/scoped_threadpool.rs index 13ad2e4..ef83ac6 100644 --- a/src/runner/implementations/scoped_threadpool.rs +++ b/src/runner/implementations/scoped_threadpool.rs @@ -76,10 +76,7 @@ where runner: PhantomData, } -impl From for RunnerWithScopedThreadPool -where - R: ParallelExecutor, -{ +impl From for RunnerWithScopedThreadPool { fn from(pool: Pool) -> Self { Self { pool, @@ -88,10 +85,7 @@ where } } -impl<'a, R> From<&'a mut Pool> for RunnerWithScopedThreadPool<&'a mut Pool, R> -where - R: ParallelExecutor, -{ +impl<'a> From<&'a mut Pool> for RunnerWithScopedThreadPool<&'a mut Pool, DefaultExecutor> { fn from(pool: &'a mut Pool) -> Self { Self { pool, diff --git a/src/runner/mod.rs b/src/runner/mod.rs index 4a0e1e6..fe0d2c3 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -5,9 +5,17 @@ mod parallel_runner; pub(crate) use parallel_runner::{SharedStateOf, ThreadRunnerOf}; -pub use crate::runner::implementations::StdRunner; pub use computation_kind::ComputationKind; pub use num_spawned::NumSpawned; pub use parallel_runner::ParallelRunner; pub type DefaultRunner = StdRunner; + +#[cfg(feature = "std")] +pub use implementations::StdRunner; + +#[cfg(feature = "rayon")] +pub use implementations::RunnerWithRayonPool; + +#[cfg(feature = "scoped_threadpool")] +pub use implementations::RunnerWithScopedThreadPool; From 5d2033033d6953f7b75dbfd8606521fca3bda577 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 20:59:08 +0200 Subject: [PATCH 205/264] documentation --- src/runner/implementations/rayon.rs | 3 +- .../implementations/scoped_threadpool.rs | 3 +- src/runner/implementations/std_runner.rs | 3 +- src/runner/mod.rs | 8 +++-- src/runner/parallel_runner.rs | 30 +++++++++++++------ 5 files changed, 33 insertions(+), 14 deletions(-) diff --git a/src/runner/implementations/rayon.rs b/src/runner/implementations/rayon.rs index bcc776f..0f658e9 100644 --- a/src/runner/implementations/rayon.rs +++ b/src/runner/implementations/rayon.rs @@ -67,6 +67,7 @@ impl<'a> ParThreadPool for &'a rayon::ThreadPool { // RUNNER +/// Parallel runner using threads provided by rayon thread pool. pub struct RunnerWithRayonPool where R: ParallelExecutor, @@ -99,7 +100,7 @@ where R: ParallelExecutor, P: SoR + ParThreadPool, { - type Runner = R; + type Executor = R; type ThreadPool = P; diff --git a/src/runner/implementations/scoped_threadpool.rs b/src/runner/implementations/scoped_threadpool.rs index ef83ac6..938273f 100644 --- a/src/runner/implementations/scoped_threadpool.rs +++ b/src/runner/implementations/scoped_threadpool.rs @@ -67,6 +67,7 @@ impl<'a> ParThreadPool for &'a mut Pool { // RUNNER +/// Parallel runner using threads provided by scoped_threadpool. pub struct RunnerWithScopedThreadPool where R: ParallelExecutor, @@ -99,7 +100,7 @@ where R: ParallelExecutor, P: SoM + ParThreadPool, { - type Runner = R; + type Executor = R; type ThreadPool = P; diff --git a/src/runner/implementations/std_runner.rs b/src/runner/implementations/std_runner.rs index 918f149..686452c 100644 --- a/src/runner/implementations/std_runner.rs +++ b/src/runner/implementations/std_runner.rs @@ -60,6 +60,7 @@ impl ParThreadPool for StdDefaultPool { // RUNNER +/// Parallel runner using std threads. pub struct StdRunner { pool: StdDefaultPool, executor: PhantomData, @@ -75,7 +76,7 @@ impl Default for StdRunner { } impl ParallelRunner for StdRunner { - type Runner = E; + type Executor = E; type ThreadPool = StdDefaultPool; diff --git a/src/runner/mod.rs b/src/runner/mod.rs index fe0d2c3..5e2346b 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -9,8 +9,6 @@ pub use computation_kind::ComputationKind; pub use num_spawned::NumSpawned; pub use parallel_runner::ParallelRunner; -pub type DefaultRunner = StdRunner; - #[cfg(feature = "std")] pub use implementations::StdRunner; @@ -19,3 +17,9 @@ pub use implementations::RunnerWithRayonPool; #[cfg(feature = "scoped_threadpool")] pub use implementations::RunnerWithScopedThreadPool; + +/// Default runner used by orx-parallel computations: +/// +/// * [`StdRunner`] when "std" feature is enabled, +/// * TODO otherwise. +pub type DefaultRunner = StdRunner; diff --git a/src/runner/parallel_runner.rs b/src/runner/parallel_runner.rs index 5604055..eb92399 100644 --- a/src/runner/parallel_runner.rs +++ b/src/runner/parallel_runner.rs @@ -8,27 +8,34 @@ use alloc::vec::Vec; use core::num::NonZeroUsize; use orx_concurrent_iter::ConcurrentIter; +/// Parallel runner defining how the threads must be spawned and job must be distributed. pub trait ParallelRunner { - type Runner: ParallelExecutor; + /// Parallel executor responsible for distribution of tasks to the threads. + type Executor: ParallelExecutor; + /// Thread pool responsible for providing threads to the parallel computation. type ThreadPool: ParThreadPool; - fn new_runner( + /// Creates a new parallel executor for a parallel computation. + fn new_executor( &self, kind: ComputationKind, params: Params, initial_input_len: Option, - ) -> Self::Runner { + ) -> Self::Executor { let max_num_threads = self.max_num_threads_for_computation(params, initial_input_len); - ::new(kind, params, initial_input_len, max_num_threads) + ::new(kind, params, initial_input_len, max_num_threads) } + /// Reference to the underlying thread pool. fn thread_pool(&self) -> &Self::ThreadPool; + /// Mutable reference to the underlying thread pool. fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool; // derived + /// Runs `thread_do` using threads provided by the thread pool. fn run_all( &mut self, params: Params, @@ -40,7 +47,7 @@ pub trait ParallelRunner { I: ConcurrentIter, F: Fn(NumSpawned, &I, &SharedStateOf, ThreadRunnerOf) + Sync, { - let runner = self.new_runner(kind, params, iter.try_get_len()); + let runner = self.new_executor(kind, params, iter.try_get_len()); let state = runner.new_shared_state(); let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); let work = |num_spawned| { @@ -54,6 +61,7 @@ pub trait ParallelRunner { self.thread_pool_mut().run_in_pool(do_spawn, work) } + /// Runs `thread_map` using threads provided by the thread pool. fn map_all( &mut self, params: Params, @@ -70,7 +78,7 @@ pub trait ParallelRunner { F::Error: Send, { let iter_len = iter.try_get_len(); - let runner = self.new_runner(kind, params, iter_len); + let runner = self.new_executor(kind, params, iter_len); let state = runner.new_shared_state(); let do_spawn = |num_spawned| runner.do_spawn_new(num_spawned, &state, &iter); let work = |nt| thread_map(nt, &iter, &state, runner.new_thread_executor(&state)); @@ -79,6 +87,7 @@ pub trait ParallelRunner { .map_in_pool::(do_spawn, work, max_num_threads) } + /// Runs infallible `thread_map` using threads provided by the thread pool. fn map_infallible( &mut self, params: Params, @@ -95,6 +104,8 @@ pub trait ParallelRunner { self.map_all::(params, iter, kind, thread_map) } + /// Returns the maximum number of threads that can be used for the computation defined by + /// the `params` and input `iter_len`. fn max_num_threads_for_computation( &self, params: Params, @@ -115,9 +126,10 @@ pub trait ParallelRunner { } } -pub(crate) type SharedStateOf = <::Runner as ParallelExecutor>::SharedState; +pub(crate) type SharedStateOf = + <::Executor as ParallelExecutor>::SharedState; pub(crate) type ThreadRunnerOf = - <::Runner as ParallelExecutor>::ThreadExecutor; + <::Executor as ParallelExecutor>::ThreadExecutor; // auto impl for &mut pool @@ -125,7 +137,7 @@ impl<'a, O> ParallelRunner for &'a mut O where O: ParallelRunner, { - type Runner = O::Runner; + type Executor = O::Executor; type ThreadPool = O::ThreadPool; From 3c76424c314c1eca6ee256b28a5a84ea711ccb87 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 21:00:30 +0200 Subject: [PATCH 206/264] documentation --- src/using/collect_into/mod.rs | 2 +- src/using/mod.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/using/collect_into/mod.rs b/src/using/collect_into/mod.rs index 1a85260..322d927 100644 --- a/src/using/collect_into/mod.rs +++ b/src/using/collect_into/mod.rs @@ -4,4 +4,4 @@ mod split_vec; mod u_par_collect_into; mod vec; -pub use u_par_collect_into::UParCollectIntoCore; +pub(crate) use u_par_collect_into::UParCollectIntoCore; diff --git a/src/using/mod.rs b/src/using/mod.rs index f5a7fd2..9e261ba 100644 --- a/src/using/mod.rs +++ b/src/using/mod.rs @@ -4,7 +4,7 @@ mod executor; mod u_par_iter; mod using_variants; -pub use collect_into::UParCollectIntoCore; -pub use computational_variants::{UPar, UParMap, UParXap}; +pub(crate) use collect_into::UParCollectIntoCore; +pub(crate) use computational_variants::{UPar, UParMap, UParXap}; pub use u_par_iter::ParIterUsing; pub use using_variants::{Using, UsingClone, UsingFun}; From 0ddb6530b1bee17bf3bc23c262c085dfe93de92e Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 21:38:57 +0200 Subject: [PATCH 207/264] revise features and exports --- Cargo.toml | 1 + src/lib.rs | 1 + src/runner/implementations/mod.rs | 3 + src/runner/implementations/sequential.rs | 63 +++++++++++++++++++ src/runner/implementations/std_runner.rs | 2 +- src/runner/implementations/tests/mod.rs | 6 ++ .../implementations/tests/sequential.rs | 19 ++++++ src/runner/implementations/tests/std.rs | 19 ++++++ src/runner/mod.rs | 11 +++- 9 files changed, 123 insertions(+), 2 deletions(-) create mode 100644 src/runner/implementations/sequential.rs create mode 100644 src/runner/implementations/tests/sequential.rs create mode 100644 src/runner/implementations/tests/std.rs diff --git a/Cargo.toml b/Cargo.toml index 4e2540e..d88d28b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,6 +45,7 @@ harness = false all-features = true [features] +# default = [] # default = ["std"] default = ["std", "scoped_threadpool", "rayon", "generic_iterator"] std = [] diff --git a/src/lib.rs b/src/lib.rs index 45e4f57..b6156f7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,5 +78,6 @@ pub use runner::DefaultRunner; pub use runner::RunnerWithRayonPool; #[cfg(feature = "scoped_threadpool")] pub use runner::RunnerWithScopedThreadPool; +pub use runner::SequentialRunner; #[cfg(feature = "std")] pub use runner::StdRunner; diff --git a/src/runner/implementations/mod.rs b/src/runner/implementations/mod.rs index 13c6d46..142d26e 100644 --- a/src/runner/implementations/mod.rs +++ b/src/runner/implementations/mod.rs @@ -1,6 +1,9 @@ #[cfg(test)] mod tests; +mod sequential; +pub use sequential::SequentialRunner; + #[cfg(feature = "std")] mod std_runner; #[cfg(feature = "std")] diff --git a/src/runner/implementations/sequential.rs b/src/runner/implementations/sequential.rs new file mode 100644 index 0000000..84014e6 --- /dev/null +++ b/src/runner/implementations/sequential.rs @@ -0,0 +1,63 @@ +use crate::{DefaultExecutor, ParThreadPool, runner::ParallelRunner}; +use core::num::NonZeroUsize; + +// POOL + +#[derive(Default)] +pub struct SequentialPool; + +impl ParThreadPool for SequentialPool { + type ScopeRef<'s, 'env, 'scope> + = () + where + 'scope: 's, + 'env: 'scope + 's; + + fn run_in_scope<'s, 'env, 'scope, W>(_: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Send + 'scope + 'env, + { + work() + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(()) + Send, + { + f(()) + } + + fn max_num_threads(&self) -> NonZeroUsize { + NonZeroUsize::new(1).expect(">0") + } +} + +// RUNNER + +/// Sequential runner using using the main thread. +/// +/// This is the default runner when "std" feature is not enabled. +/// +/// Parallelization can be achieved by providing a parallel runner +/// using the [`with_runner`] method of parallel iterators. +/// +/// [`with_runner`]: crate::ParIter::with_runner +#[derive(Default)] +pub struct SequentialRunner(SequentialPool); + +impl ParallelRunner for SequentialRunner { + type Executor = DefaultExecutor; + + type ThreadPool = SequentialPool; + + fn thread_pool(&self) -> &Self::ThreadPool { + &self.0 + } + + fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { + &mut self.0 + } +} diff --git a/src/runner/implementations/std_runner.rs b/src/runner/implementations/std_runner.rs index 686452c..4021f2a 100644 --- a/src/runner/implementations/std_runner.rs +++ b/src/runner/implementations/std_runner.rs @@ -66,7 +66,7 @@ pub struct StdRunner { executor: PhantomData, } -impl Default for StdRunner { +impl Default for StdRunner { fn default() -> Self { Self { pool: Default::default(), diff --git a/src/runner/implementations/tests/mod.rs b/src/runner/implementations/tests/mod.rs index fabb867..e779f13 100644 --- a/src/runner/implementations/tests/mod.rs +++ b/src/runner/implementations/tests/mod.rs @@ -4,5 +4,11 @@ mod rayon; #[cfg(feature = "scoped_threadpool")] mod scoped_threadpool; +#[cfg(feature = "std")] +mod std; + +mod sequential; + mod utils; + use utils::run_map; diff --git a/src/runner/implementations/tests/sequential.rs b/src/runner/implementations/tests/sequential.rs new file mode 100644 index 0000000..62492ba --- /dev/null +++ b/src/runner/implementations/tests/sequential.rs @@ -0,0 +1,19 @@ +use super::run_map; +use crate::{IterationOrder, runner::implementations::SequentialRunner}; +use test_case::test_matrix; + +#[cfg(miri)] +const N: [usize; 2] = [37, 125]; +#[cfg(not(miri))] +const N: [usize; 2] = [1025, 4735]; + +#[test_matrix( + [0, 1, N[0], N[1]], + [1, 4], + [1, 64], + [IterationOrder::Ordered, IterationOrder::Arbitrary]) +] +fn pool_scoped_threadpool_map(n: usize, _: usize, chunk: usize, ordering: IterationOrder) { + let orch = SequentialRunner::default(); + run_map(n, chunk, ordering, orch); +} diff --git a/src/runner/implementations/tests/std.rs b/src/runner/implementations/tests/std.rs new file mode 100644 index 0000000..5bc174d --- /dev/null +++ b/src/runner/implementations/tests/std.rs @@ -0,0 +1,19 @@ +use super::run_map; +use crate::{IterationOrder, StdRunner}; +use test_case::test_matrix; + +#[cfg(miri)] +const N: [usize; 2] = [37, 125]; +#[cfg(not(miri))] +const N: [usize; 2] = [1025, 4735]; + +#[test_matrix( + [0, 1, N[0], N[1]], + [1, 4], + [1, 64], + [IterationOrder::Ordered, IterationOrder::Arbitrary]) +] +fn pool_scoped_threadpool_map(n: usize, _: usize, chunk: usize, ordering: IterationOrder) { + let orch = StdRunner::default(); + run_map(n, chunk, ordering, orch); +} diff --git a/src/runner/mod.rs b/src/runner/mod.rs index 5e2346b..0ba776c 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -9,6 +9,8 @@ pub use computation_kind::ComputationKind; pub use num_spawned::NumSpawned; pub use parallel_runner::ParallelRunner; +pub use implementations::SequentialRunner; + #[cfg(feature = "std")] pub use implementations::StdRunner; @@ -21,5 +23,12 @@ pub use implementations::RunnerWithScopedThreadPool; /// Default runner used by orx-parallel computations: /// /// * [`StdRunner`] when "std" feature is enabled, -/// * TODO otherwise. +/// * `SequentialRunner` otherwise. +#[cfg(feature = "std")] pub type DefaultRunner = StdRunner; +/// Default runner used by orx-parallel computations: +/// +/// * `StdRunner` when "std" feature is enabled, +/// * [`SequentialRunner`] otherwise. +#[cfg(not(feature = "std"))] +pub type DefaultRunner = SequentialRunner; From ab9b0cababb3eefc5bbd5697e30e0c8629834797 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 21:39:55 +0200 Subject: [PATCH 208/264] std feature --- Cargo.toml | 4 ++-- src/env.rs | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d88d28b..b84d8e3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,8 +45,8 @@ harness = false all-features = true [features] -# default = [] +default = [] # default = ["std"] -default = ["std", "scoped_threadpool", "rayon", "generic_iterator"] +# default = ["std", "scoped_threadpool", "rayon", "generic_iterator"] std = [] generic_iterator = ["rayon"] diff --git a/src/env.rs b/src/env.rs index 02fb12a..24635f7 100644 --- a/src/env.rs +++ b/src/env.rs @@ -1,5 +1,6 @@ use core::num::NonZeroUsize; +#[cfg(feature = "std")] const MAX_NUM_THREADS_ENV_VARIABLE: &str = "ORX_PARALLEL_MAX_NUM_THREADS"; pub fn max_num_threads_by_env_variable() -> Option { From 709ab2f9db6709443174ed9d2e8467409c07ea06 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 21:40:52 +0200 Subject: [PATCH 209/264] set default features as std --- Cargo.toml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b84d8e3..7af1184 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,8 +45,6 @@ harness = false all-features = true [features] -default = [] -# default = ["std"] -# default = ["std", "scoped_threadpool", "rayon", "generic_iterator"] +default = ["std"] std = [] generic_iterator = ["rayon"] From be1628caf31033b1e568ac6d1c21ada0bc0da602 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 21:45:35 +0200 Subject: [PATCH 210/264] clippy fixes --- src/executor/parallel_compute/next.rs | 4 ++-- src/executor/parallel_compute/next_any.rs | 4 ++-- src/executor/parallel_compute/reduce.rs | 4 ++-- src/runner/implementations/std_runner.rs | 6 +++--- src/runner/parallel_runner.rs | 2 +- src/using/executor/parallel_compute/next.rs | 4 ++-- src/using/executor/parallel_compute/next_any.rs | 4 ++-- src/using/executor/parallel_compute/reduce.rs | 4 ++-- src/using/u_par_iter.rs | 2 +- 9 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/executor/parallel_compute/next.rs b/src/executor/parallel_compute/next.rs index 9fc77a1..5653175 100644 --- a/src/executor/parallel_compute/next.rs +++ b/src/executor/parallel_compute/next.rs @@ -26,7 +26,7 @@ where let next = match result { Ok(results) => results .into_iter() - .filter_map(|x| x) + .flatten() .min_by_key(|x| x.0) .map(|x| x.1), }; @@ -67,6 +67,6 @@ where ComputationKind::Collect, thread_map, ); - let next = result.map(|results| NextSuccess::reduce(results.into_iter().filter_map(|x| x))); + let next = result.map(|results| NextSuccess::reduce(results.into_iter().flatten())); (num_spawned, next) } diff --git a/src/executor/parallel_compute/next_any.rs b/src/executor/parallel_compute/next_any.rs index 49dae38..c3d5d3c 100644 --- a/src/executor/parallel_compute/next_any.rs +++ b/src/executor/parallel_compute/next_any.rs @@ -24,7 +24,7 @@ where orchestrator.map_infallible(params, iter, ComputationKind::Collect, thread_map); let next = match result { - Ok(results) => results.into_iter().filter_map(|x| x).next(), + Ok(results) => results.into_iter().flatten().next(), }; (num_spawned, next) } @@ -54,6 +54,6 @@ where ComputationKind::Collect, thread_map, ); - let next = result.map(|results| results.into_iter().filter_map(|x| x).next()); + let next = result.map(|results| results.into_iter().flatten().next()); (num_spawned, next) } diff --git a/src/executor/parallel_compute/reduce.rs b/src/executor/parallel_compute/reduce.rs index ba9c0ba..663b1c9 100644 --- a/src/executor/parallel_compute/reduce.rs +++ b/src/executor/parallel_compute/reduce.rs @@ -26,7 +26,7 @@ where orchestrator.map_infallible(params, iter, ComputationKind::Collect, thread_map); let acc = match result { - Ok(results) => results.into_iter().filter_map(|x| x).reduce(reduce), + Ok(results) => results.into_iter().flatten().reduce(reduce), }; (num_spawned, acc) @@ -59,6 +59,6 @@ where ComputationKind::Collect, thread_map, ); - let acc = result.map(|results| results.into_iter().filter_map(|x| x).reduce(reduce)); + let acc = result.map(|results| results.into_iter().flatten().reduce(reduce)); (num_spawned, acc) } diff --git a/src/runner/implementations/std_runner.rs b/src/runner/implementations/std_runner.rs index 4021f2a..4830839 100644 --- a/src/runner/implementations/std_runner.rs +++ b/src/runner/implementations/std_runner.rs @@ -6,7 +6,7 @@ use core::num::NonZeroUsize; // POOL -const MAX_UNSET_NUM_THREADS: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(8) }; +const MAX_UNSET_NUM_THREADS: NonZeroUsize = NonZeroUsize::new(8).expect(">0"); pub struct StdDefaultPool { max_num_threads: NonZeroUsize, @@ -45,7 +45,7 @@ impl ParThreadPool for StdDefaultPool { 'env: 'scope, for<'s> F: FnOnce(&'s std::thread::Scope<'s, 'env>) + Send, { - std::thread::scope(|s| f(&s)) + std::thread::scope(f) } fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) @@ -54,7 +54,7 @@ impl ParThreadPool for StdDefaultPool { 'env: 'scope + 's, W: Fn() + Send + 'scope + 'env, { - s.spawn(move || work()); + s.spawn(work); } } diff --git a/src/runner/parallel_runner.rs b/src/runner/parallel_runner.rs index eb92399..b3b8f09 100644 --- a/src/runner/parallel_runner.rs +++ b/src/runner/parallel_runner.rs @@ -133,7 +133,7 @@ pub(crate) type ThreadRunnerOf = // auto impl for &mut pool -impl<'a, O> ParallelRunner for &'a mut O +impl ParallelRunner for &'_ mut O where O: ParallelRunner, { diff --git a/src/using/executor/parallel_compute/next.rs b/src/using/executor/parallel_compute/next.rs index 7d44845..434b3e7 100644 --- a/src/using/executor/parallel_compute/next.rs +++ b/src/using/executor/parallel_compute/next.rs @@ -30,7 +30,7 @@ where let next = match result { Ok(results) => results .into_iter() - .filter_map(|x| x) + .flatten() .min_by_key(|x| x.0) .map(|x| x.1), }; @@ -74,6 +74,6 @@ where ComputationKind::Collect, thread_map, ); - let next = result.map(|results| NextSuccess::reduce(results.into_iter().filter_map(|x| x))); + let next = result.map(|results| NextSuccess::reduce(results.into_iter().flatten())); (num_spawned, next) } diff --git a/src/using/executor/parallel_compute/next_any.rs b/src/using/executor/parallel_compute/next_any.rs index 7ff51bf..2ff8389 100644 --- a/src/using/executor/parallel_compute/next_any.rs +++ b/src/using/executor/parallel_compute/next_any.rs @@ -28,7 +28,7 @@ where orchestrator.map_infallible(params, iter, ComputationKind::Collect, thread_map); let next = match result { - Ok(results) => results.into_iter().filter_map(|x| x).next(), + Ok(results) => results.into_iter().flatten().next(), }; (num_spawned, next) } @@ -61,6 +61,6 @@ where ComputationKind::Collect, thread_map, ); - let next = result.map(|results| results.into_iter().filter_map(|x| x).next()); + let next = result.map(|results| results.into_iter().flatten().next()); (num_spawned, next) } diff --git a/src/using/executor/parallel_compute/reduce.rs b/src/using/executor/parallel_compute/reduce.rs index de20823..4f3d90b 100644 --- a/src/using/executor/parallel_compute/reduce.rs +++ b/src/using/executor/parallel_compute/reduce.rs @@ -34,7 +34,7 @@ where let acc = match result { Ok(results) => results .into_iter() - .filter_map(|x| x) + .flatten() .reduce(|a, b| reduce(&mut u, a, b)), }; @@ -76,7 +76,7 @@ where let acc = result.map(|results| { results .into_iter() - .filter_map(|x| x) + .flatten() .reduce(|a, b| reduce(&mut u, a, b)) }); (num_spawned, acc) diff --git a/src/using/u_par_iter.rs b/src/using/u_par_iter.rs index 66a222e..6e67ebc 100644 --- a/src/using/u_par_iter.rs +++ b/src/using/u_par_iter.rs @@ -56,7 +56,7 @@ where /// See [crate::ParIter::iteration_order] for details. fn iteration_order(self, collect: IterationOrder) -> Self; - /// Rather than the [`DefaultOrchestrator`], uses the parallel runner `Q` which implements [`ParallelRunner`]. + /// Rather than the [`DefaultRunner`], uses the parallel runner `Q` which implements [`ParallelRunner`]. /// /// See [crate::ParIter::with_runner] for details. fn with_runner( From d69f803c0b2004dbd0874b41c47b8cabbb76f1f8 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 21:46:32 +0200 Subject: [PATCH 211/264] clippy fixes --- src/runner/implementations/rayon.rs | 2 +- src/runner/implementations/scoped_threadpool.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/runner/implementations/rayon.rs b/src/runner/implementations/rayon.rs index 0f658e9..5e01f68 100644 --- a/src/runner/implementations/rayon.rs +++ b/src/runner/implementations/rayon.rs @@ -36,7 +36,7 @@ impl ParThreadPool for ThreadPool { } } -impl<'a> ParThreadPool for &'a rayon::ThreadPool { +impl ParThreadPool for &rayon::ThreadPool { type ScopeRef<'s, 'env, 'scope> = &'s rayon::Scope<'scope> where diff --git a/src/runner/implementations/scoped_threadpool.rs b/src/runner/implementations/scoped_threadpool.rs index 938273f..b9d6bb1 100644 --- a/src/runner/implementations/scoped_threadpool.rs +++ b/src/runner/implementations/scoped_threadpool.rs @@ -36,7 +36,7 @@ impl ParThreadPool for Pool { } } -impl<'a> ParThreadPool for &'a mut Pool { +impl ParThreadPool for &mut Pool { type ScopeRef<'s, 'env, 'scope> = &'s scoped_threadpool::Scope<'env, 'scope> where From ccc47c1337a69590d8b6240af41e33bc20dcd1b9 Mon Sep 17 00:00:00 2001 From: orxfun Date: Wed, 17 Sep 2025 21:47:08 +0200 Subject: [PATCH 212/264] fmt --- src/collect_into/collect.rs | 2 +- src/computational_variants/fallible_option.rs | 2 +- .../fallible_result/map_result.rs | 4 ++-- .../fallible_result/xap_result.rs | 4 ++-- src/computational_variants/map.rs | 4 ++-- src/computational_variants/par.rs | 4 ++-- src/computational_variants/tests/map/collect.rs | 3 +-- src/computational_variants/tests/map/find.rs | 7 ++----- src/computational_variants/tests/map/reduce.rs | 11 +++-------- src/computational_variants/xap.rs | 4 ++-- src/executor/parallel_executor.rs | 2 +- src/par_iter.rs | 2 +- src/parallelizable_collection_mut.rs | 3 +-- tests/whilst/collect.rs | 2 +- tests/whilst/find.rs | 2 +- tests/whilst/reduce.rs | 2 +- 16 files changed, 24 insertions(+), 34 deletions(-) diff --git a/src/collect_into/collect.rs b/src/collect_into/collect.rs index da7a585..a33c810 100644 --- a/src/collect_into/collect.rs +++ b/src/collect_into/collect.rs @@ -1,9 +1,9 @@ use crate::Params; +use crate::executor::parallel_compute as prc; use crate::generic_values::runner_results::{ Fallibility, Infallible, ParallelCollect, ParallelCollectArbitrary, Stop, }; use crate::runner::{NumSpawned, ParallelRunner}; -use crate::executor::parallel_compute as prc; use crate::{IterationOrder, generic_values::Values}; use orx_concurrent_iter::ConcurrentIter; use orx_fixed_vec::IntoConcurrentPinnedVec; diff --git a/src/computational_variants/fallible_option.rs b/src/computational_variants/fallible_option.rs index 3ea5179..7fb1a15 100644 --- a/src/computational_variants/fallible_option.rs +++ b/src/computational_variants/fallible_option.rs @@ -1,7 +1,7 @@ use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIterResult, - runner::{DefaultRunner, ParallelRunner}, par_iter_option::{ParIterOption, ResultIntoOption}, + runner::{DefaultRunner, ParallelRunner}, }; use core::marker::PhantomData; diff --git a/src/computational_variants/fallible_result/map_result.rs b/src/computational_variants/fallible_result/map_result.rs index 99089f2..2ed891a 100644 --- a/src/computational_variants/fallible_result/map_result.rs +++ b/src/computational_variants/fallible_result/map_result.rs @@ -1,7 +1,7 @@ use crate::computational_variants::ParMap; -use crate::runner::{DefaultRunner, ParallelRunner}; -use crate::par_iter_result::{IntoResult, ParIterResult}; use crate::executor::parallel_compute as prc; +use crate::par_iter_result::{IntoResult, ParIterResult}; +use crate::runner::{DefaultRunner, ParallelRunner}; use crate::{IterationOrder, ParCollectInto, ParIter}; use core::marker::PhantomData; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/computational_variants/fallible_result/xap_result.rs b/src/computational_variants/fallible_result/xap_result.rs index cc8035f..a88a7fa 100644 --- a/src/computational_variants/fallible_result/xap_result.rs +++ b/src/computational_variants/fallible_result/xap_result.rs @@ -1,9 +1,9 @@ use crate::computational_variants::ParXap; +use crate::executor::parallel_compute as prc; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; -use crate::runner::{DefaultRunner, ParallelRunner}; use crate::par_iter_result::{IntoResult, ParIterResult}; -use crate::executor::parallel_compute as prc; +use crate::runner::{DefaultRunner, ParallelRunner}; use crate::{IterationOrder, ParCollectInto, Params}; use core::marker::PhantomData; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/computational_variants/map.rs b/src/computational_variants/map.rs index 410f699..ecbf231 100644 --- a/src/computational_variants/map.rs +++ b/src/computational_variants/map.rs @@ -1,9 +1,9 @@ use super::xap::ParXap; use crate::computational_variants::fallible_result::ParMapResult; +use crate::executor::parallel_compute as prc; use crate::generic_values::{Vector, WhilstAtom}; -use crate::runner::{DefaultRunner, ParallelRunner}; use crate::par_iter_result::IntoResult; -use crate::executor::parallel_compute as prc; +use crate::runner::{DefaultRunner, ParallelRunner}; use crate::using::{UParMap, UsingClone, UsingFun}; use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, Params}; use crate::{ParIterResult, ParIterUsing}; diff --git a/src/computational_variants/par.rs b/src/computational_variants/par.rs index 47e1baa..669ee75 100644 --- a/src/computational_variants/par.rs +++ b/src/computational_variants/par.rs @@ -1,9 +1,9 @@ use super::{map::ParMap, xap::ParXap}; use crate::computational_variants::fallible_result::ParResult; +use crate::executor::parallel_compute as prc; use crate::generic_values::{Vector, WhilstAtom}; -use crate::runner::{DefaultRunner, ParallelRunner}; use crate::par_iter_result::IntoResult; -use crate::executor::parallel_compute as prc; +use crate::runner::{DefaultRunner, ParallelRunner}; use crate::using::{UPar, UsingClone, UsingFun}; use crate::{ ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, Params, default_fns::map_self, diff --git a/src/computational_variants/tests/map/collect.rs b/src/computational_variants/tests/map/collect.rs index 2eb13d9..3928e33 100644 --- a/src/computational_variants/tests/map/collect.rs +++ b/src/computational_variants/tests/map/collect.rs @@ -37,8 +37,7 @@ fn m_map_collect(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { let params = Params::new(nt, chunk, ordering); let iter = input.into_con_iter(); - let (_, mut output) = - map_collect_into(DefaultRunner::default(), params, iter, map, output); + let (_, mut output) = map_collect_into(DefaultRunner::default(), params, iter, map, output); if !params.is_sequential() && matches!(params.iteration_order, IterationOrder::Arbitrary) { expected.sort(); diff --git a/src/computational_variants/tests/map/find.rs b/src/computational_variants/tests/map/find.rs index 1daf76a..ac61306 100644 --- a/src/computational_variants/tests/map/find.rs +++ b/src/computational_variants/tests/map/find.rs @@ -1,6 +1,4 @@ -use crate::{ - Params, default_fns::map_self, executor::parallel_compute, runner::DefaultRunner, -}; +use crate::{Params, default_fns::map_self, executor::parallel_compute, runner::DefaultRunner}; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; @@ -25,8 +23,7 @@ fn m_find(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let output = - parallel_compute::next::m(DefaultRunner::default(), params, iter, map_self).1; + let output = parallel_compute::next::m(DefaultRunner::default(), params, iter, map_self).1; assert_eq!(expected, output); } diff --git a/src/computational_variants/tests/map/reduce.rs b/src/computational_variants/tests/map/reduce.rs index 523a493..f798534 100644 --- a/src/computational_variants/tests/map/reduce.rs +++ b/src/computational_variants/tests/map/reduce.rs @@ -1,4 +1,4 @@ -use crate::{Params, default_fns::map_self, runner::DefaultRunner, executor::parallel_compute}; +use crate::{Params, default_fns::map_self, executor::parallel_compute, runner::DefaultRunner}; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; @@ -26,13 +26,8 @@ fn m_reduce(n: usize, nt: usize, chunk: usize) { let params = Params::new(nt, chunk, Default::default()); let iter = input.into_con_iter(); - let (_, output) = parallel_compute::reduce::m( - DefaultRunner::default(), - params, - iter, - map_self, - reduce, - ); + let (_, output) = + parallel_compute::reduce::m(DefaultRunner::default(), params, iter, map_self, reduce); assert_eq!(expected, output); } diff --git a/src/computational_variants/xap.rs b/src/computational_variants/xap.rs index ac49b6f..f8bc1ae 100644 --- a/src/computational_variants/xap.rs +++ b/src/computational_variants/xap.rs @@ -1,9 +1,9 @@ use crate::computational_variants::fallible_result::ParXapResult; +use crate::executor::parallel_compute as prc; use crate::generic_values::TransformableValues; use crate::generic_values::runner_results::Infallible; -use crate::runner::{DefaultRunner, ParallelRunner}; use crate::par_iter_result::IntoResult; -use crate::executor::parallel_compute as prc; +use crate::runner::{DefaultRunner, ParallelRunner}; use crate::using::{UParXap, UsingClone, UsingFun}; use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParIter, Params}; use crate::{ParIterResult, ParIterUsing}; diff --git a/src/executor/parallel_executor.rs b/src/executor/parallel_executor.rs index abdbf83..abf434d 100644 --- a/src/executor/parallel_executor.rs +++ b/src/executor/parallel_executor.rs @@ -1,7 +1,7 @@ use super::thread_executor::ThreadExecutor; use crate::{ - runner::{ComputationKind, NumSpawned}, parameters::Params, + runner::{ComputationKind, NumSpawned}, }; use core::num::NonZeroUsize; use orx_concurrent_iter::ConcurrentIter; diff --git a/src/par_iter.rs b/src/par_iter.rs index 1b5d455..57cfe49 100644 --- a/src/par_iter.rs +++ b/src/par_iter.rs @@ -1,8 +1,8 @@ use crate::ParIterResult; use crate::computational_variants::fallible_option::ParOption; -use crate::runner::{DefaultRunner, ParallelRunner}; use crate::par_iter_option::{IntoOption, ParIterOption}; use crate::par_iter_result::IntoResult; +use crate::runner::{DefaultRunner, ParallelRunner}; use crate::using::{UsingClone, UsingFun}; use crate::{ ParIterUsing, Params, diff --git a/src/parallelizable_collection_mut.rs b/src/parallelizable_collection_mut.rs index 2c6a664..864b37c 100644 --- a/src/parallelizable_collection_mut.rs +++ b/src/parallelizable_collection_mut.rs @@ -1,6 +1,5 @@ use crate::{ - ParIter, ParallelizableCollection, Params, computational_variants::Par, - runner::DefaultRunner, + ParIter, ParallelizableCollection, Params, computational_variants::Par, runner::DefaultRunner, }; use orx_concurrent_iter::ConcurrentCollectionMut; diff --git a/tests/whilst/collect.rs b/tests/whilst/collect.rs index 03858c6..32c544b 100644 --- a/tests/whilst/collect.rs +++ b/tests/whilst/collect.rs @@ -1,6 +1,6 @@ use crate::fibonacci; -use std::hint::black_box; use orx_parallel::*; +use std::hint::black_box; use test_case::test_case; #[test_case(0, 0, 0, 0, "0")] diff --git a/tests/whilst/find.rs b/tests/whilst/find.rs index 06ddbe7..95e32dc 100644 --- a/tests/whilst/find.rs +++ b/tests/whilst/find.rs @@ -1,6 +1,6 @@ use crate::fibonacci; -use std::hint::black_box; use orx_parallel::*; +use std::hint::black_box; use test_case::test_case; #[test_case(511, 0, 0, &[333], &[333], None)] diff --git a/tests/whilst/reduce.rs b/tests/whilst/reduce.rs index 2f46f8d..7b8cd10 100644 --- a/tests/whilst/reduce.rs +++ b/tests/whilst/reduce.rs @@ -1,6 +1,6 @@ use crate::fibonacci; -use std::hint::black_box; use orx_parallel::*; +use std::hint::black_box; use test_case::test_case; #[test_case(511, 0, 0, 333, 332*331/2)] From 22d20da6af3f84ae5051f8d891211653825c478e Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 07:33:09 +0200 Subject: [PATCH 213/264] exclude rayon pool from miri tests --- src/runner/implementations/tests/rayon.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/runner/implementations/tests/rayon.rs b/src/runner/implementations/tests/rayon.rs index ebca673..3f1825a 100644 --- a/src/runner/implementations/tests/rayon.rs +++ b/src/runner/implementations/tests/rayon.rs @@ -7,6 +7,8 @@ const N: [usize; 2] = [37, 125]; #[cfg(not(miri))] const N: [usize; 2] = [1025, 4735]; +// TODO: rayon pool fails the miri test (integer-to-pointer cast crossbeam-epoch-0.9.18/src/atomic.rs:204:11) +#[cfg(not(miri))] #[test_matrix( [0, 1, N[0], N[1]], [1, 4], From 50add43c79f66a5d2cb2e9eae56151cafa5ab949 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 09:57:13 +0200 Subject: [PATCH 214/264] update benches script --- .scripts/run_benchmark.sh | 2 +- .scripts/run_benchmarks.sh | 22 ++++++++++++---------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/.scripts/run_benchmark.sh b/.scripts/run_benchmark.sh index 743fa5a..eb5b54f 100755 --- a/.scripts/run_benchmark.sh +++ b/.scripts/run_benchmark.sh @@ -1,4 +1,4 @@ -original_bench=find +original_bench=find_iter_into_par bench=$1 sed -i "s/$original_bench/$bench/g" Cargo.toml diff --git a/.scripts/run_benchmarks.sh b/.scripts/run_benchmarks.sh index 2700081..b883762 100755 --- a/.scripts/run_benchmarks.sh +++ b/.scripts/run_benchmarks.sh @@ -1,36 +1,38 @@ -# allBenches=(collect_filter sum) - allBenches=( + chain_collect_map + chain3_collect_map + chain4_collect_map collect_filter collect_filtermap collect_flatmap collect_iter_into_par collect_long_chain - collect_map - collect_map_filter collect_map_filter_hash_set - collect_result + collect_map_filter + collect_map count_filtermap count_flatmap - count_map count_map_filter + count_map drain_vec_collect_map_filter - find find_any find_flatmap find_iter_into_par find_map_filter + find mut_for_each_iter mut_for_each_slice - reduce reduce_iter_into_par reduce_long_chain - reduce_map reduce_map_filter - sum + reduce_map + reduce + result_collect_map + result_reduce_map sum_filtermap sum_flatmap sum_map_filter + sum vec_deque_collect_map_filter vec_deque_collect_map_filter_owned ) From 8cc43e72a4a0cbf7dcffa23ba3ee84e710c824bb Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 10:19:09 +0200 Subject: [PATCH 215/264] implement RunnerWithScopedPool --- Cargo.toml | 3 +- src/runner/implementations/mod.rs | 5 + src/runner/implementations/scoped_pool.rs | 111 ++++++++++++++++++++++ 3 files changed, 118 insertions(+), 1 deletion(-) create mode 100644 src/runner/implementations/scoped_pool.rs diff --git a/Cargo.toml b/Cargo.toml index 7af1184..5cd959e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,6 +25,7 @@ orx-self-or = { version = "1.2.0" } # optional thread pool dependencies rayon = { version = "1.11.0", optional = true } scoped_threadpool = { version = "0.1.9", optional = true } +scoped-pool = { version = "1.0.0", optional = true } [dev-dependencies] chrono = "0.4.42" @@ -45,6 +46,6 @@ harness = false all-features = true [features] -default = ["std"] +default = ["std", "scoped-pool"] std = [] generic_iterator = ["rayon"] diff --git a/src/runner/implementations/mod.rs b/src/runner/implementations/mod.rs index 142d26e..8721ef5 100644 --- a/src/runner/implementations/mod.rs +++ b/src/runner/implementations/mod.rs @@ -18,3 +18,8 @@ pub use rayon::RunnerWithRayonPool; mod scoped_threadpool; #[cfg(feature = "scoped_threadpool")] pub use scoped_threadpool::RunnerWithScopedThreadPool; + +#[cfg(feature = "scoped-pool")] +mod scoped_pool; +#[cfg(feature = "scoped-pool")] +pub use scoped_pool::RunnerWithScopedPool; diff --git a/src/runner/implementations/scoped_pool.rs b/src/runner/implementations/scoped_pool.rs new file mode 100644 index 0000000..222b262 --- /dev/null +++ b/src/runner/implementations/scoped_pool.rs @@ -0,0 +1,111 @@ +use crate::{DefaultExecutor, ParThreadPool, ParallelExecutor, runner::ParallelRunner}; +use core::{marker::PhantomData, num::NonZeroUsize}; +use orx_self_or::SoR; +use scoped_pool::{Pool, Scope}; + +// POOL + +impl ParThreadPool for Pool { + type ScopeRef<'s, 'env, 'scope> + = &'s Scope<'scope> + where + 'scope: 's, + 'env: 'scope + 's; + + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Send + 'scope + 'env, + { + s.execute(work); + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(&Scope<'scope>) + Send, + { + self.scoped(f) + } + + fn max_num_threads(&self) -> NonZeroUsize { + NonZeroUsize::new(self.workers().max(1)).expect(">0") + } +} + +impl ParThreadPool for &Pool { + type ScopeRef<'s, 'env, 'scope> + = &'s Scope<'scope> + where + 'scope: 's, + 'env: 'scope + 's; + + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Send + 'scope + 'env, + { + s.execute(work); + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(&Scope<'scope>) + Send, + { + self.scoped(f) + } + + fn max_num_threads(&self) -> core::num::NonZeroUsize { + NonZeroUsize::new(self.workers().max(1)).expect(">0") + } +} + +// RUNNER + +pub struct RunnerWithScopedPool +where + R: ParallelExecutor, + P: SoR + ParThreadPool, +{ + pool: P, + runner: PhantomData, +} + +impl From for RunnerWithScopedPool { + fn from(pool: Pool) -> Self { + Self { + pool, + runner: PhantomData, + } + } +} + +impl<'a> From<&'a Pool> for RunnerWithScopedPool<&'a Pool, DefaultExecutor> { + fn from(pool: &'a Pool) -> Self { + Self { + pool, + runner: PhantomData, + } + } +} + +impl ParallelRunner for RunnerWithScopedPool +where + R: ParallelExecutor, + P: SoR + ParThreadPool, +{ + type Executor = R; + + type ThreadPool = P; + + fn thread_pool(&self) -> &Self::ThreadPool { + &self.pool + } + + fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { + &mut self.pool + } +} From 21cf80d403aff5e07f8efbeeaff12918cd229297 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 10:21:27 +0200 Subject: [PATCH 216/264] expose RunnerWithScopedPool --- src/lib.rs | 2 ++ src/runner/implementations/rayon.rs | 2 +- src/runner/implementations/scoped_pool.rs | 1 + src/runner/implementations/scoped_threadpool.rs | 2 +- src/runner/mod.rs | 5 +++++ 5 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index b6156f7..8edd61e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -76,6 +76,8 @@ pub use using::ParIterUsing; pub use runner::DefaultRunner; #[cfg(feature = "rayon")] pub use runner::RunnerWithRayonPool; +#[cfg(feature = "scoped-pool")] +pub use runner::RunnerWithScopedPool; #[cfg(feature = "scoped_threadpool")] pub use runner::RunnerWithScopedThreadPool; pub use runner::SequentialRunner; diff --git a/src/runner/implementations/rayon.rs b/src/runner/implementations/rayon.rs index 5e01f68..611688f 100644 --- a/src/runner/implementations/rayon.rs +++ b/src/runner/implementations/rayon.rs @@ -67,7 +67,7 @@ impl ParThreadPool for &rayon::ThreadPool { // RUNNER -/// Parallel runner using threads provided by rayon thread pool. +/// Parallel runner using threads provided by rayon::ThreadPool. pub struct RunnerWithRayonPool where R: ParallelExecutor, diff --git a/src/runner/implementations/scoped_pool.rs b/src/runner/implementations/scoped_pool.rs index 222b262..d7299c0 100644 --- a/src/runner/implementations/scoped_pool.rs +++ b/src/runner/implementations/scoped_pool.rs @@ -65,6 +65,7 @@ impl ParThreadPool for &Pool { // RUNNER +/// Parallel runner using threads provided by scoped_pool::Pool. pub struct RunnerWithScopedPool where R: ParallelExecutor, diff --git a/src/runner/implementations/scoped_threadpool.rs b/src/runner/implementations/scoped_threadpool.rs index b9d6bb1..286ff91 100644 --- a/src/runner/implementations/scoped_threadpool.rs +++ b/src/runner/implementations/scoped_threadpool.rs @@ -67,7 +67,7 @@ impl ParThreadPool for &mut Pool { // RUNNER -/// Parallel runner using threads provided by scoped_threadpool. +/// Parallel runner using threads provided by scoped_threadpool::Pool. pub struct RunnerWithScopedThreadPool where R: ParallelExecutor, diff --git a/src/runner/mod.rs b/src/runner/mod.rs index 0ba776c..f6f5467 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -20,6 +20,11 @@ pub use implementations::RunnerWithRayonPool; #[cfg(feature = "scoped_threadpool")] pub use implementations::RunnerWithScopedThreadPool; +#[cfg(feature = "scoped-pool")] +pub use implementations::RunnerWithScopedPool; + +// DEFAULT + /// Default runner used by orx-parallel computations: /// /// * [`StdRunner`] when "std" feature is enabled, From 114746a3bccca31c357f016278394652bd56512d Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 10:22:54 +0200 Subject: [PATCH 217/264] RunnerWithScopedPool tests added --- src/runner/implementations/tests/mod.rs | 3 +++ .../implementations/tests/scoped_pool.rs | 21 +++++++++++++++++++ 2 files changed, 24 insertions(+) create mode 100644 src/runner/implementations/tests/scoped_pool.rs diff --git a/src/runner/implementations/tests/mod.rs b/src/runner/implementations/tests/mod.rs index e779f13..92a2725 100644 --- a/src/runner/implementations/tests/mod.rs +++ b/src/runner/implementations/tests/mod.rs @@ -4,6 +4,9 @@ mod rayon; #[cfg(feature = "scoped_threadpool")] mod scoped_threadpool; +#[cfg(feature = "scoped-pool")] +mod scoped_pool; + #[cfg(feature = "std")] mod std; diff --git a/src/runner/implementations/tests/scoped_pool.rs b/src/runner/implementations/tests/scoped_pool.rs new file mode 100644 index 0000000..f353980 --- /dev/null +++ b/src/runner/implementations/tests/scoped_pool.rs @@ -0,0 +1,21 @@ +use super::run_map; +use crate::{IterationOrder, runner::implementations::RunnerWithScopedPool}; +use scoped_pool::Pool; +use test_case::test_matrix; + +#[cfg(miri)] +const N: [usize; 2] = [37, 125]; +#[cfg(not(miri))] +const N: [usize; 2] = [1025, 4735]; + +#[test_matrix( + [0, 1, N[0], N[1]], + [1, 4], + [1, 64], + [IterationOrder::Ordered, IterationOrder::Arbitrary]) +] +fn pool_scoped_pool_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { + let pool = Pool::new(nt); + let orch: RunnerWithScopedPool<_> = (&pool).into(); + run_map(n, chunk, ordering, orch); +} From 26a69dc115627d99824463de1f89446a4144ba02 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 10:46:28 +0200 Subject: [PATCH 218/264] implement runner with poolite pool --- Cargo.toml | 3 +- src/lib.rs | 2 + src/runner/implementations/mod.rs | 15 ++-- src/runner/implementations/poolite.rs | 112 ++++++++++++++++++++++++++ src/runner/mod.rs | 9 ++- 5 files changed, 132 insertions(+), 9 deletions(-) create mode 100644 src/runner/implementations/poolite.rs diff --git a/Cargo.toml b/Cargo.toml index 5cd959e..ad091cb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,6 +26,7 @@ orx-self-or = { version = "1.2.0" } rayon = { version = "1.11.0", optional = true } scoped_threadpool = { version = "0.1.9", optional = true } scoped-pool = { version = "1.0.0", optional = true } +poolite = { version = "0.7.1", optional = true } [dev-dependencies] chrono = "0.4.42" @@ -46,6 +47,6 @@ harness = false all-features = true [features] -default = ["std", "scoped-pool"] +default = ["std", "poolite"] std = [] generic_iterator = ["rayon"] diff --git a/src/lib.rs b/src/lib.rs index 8edd61e..997e624 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -74,6 +74,8 @@ pub use special_type_sets::Sum; pub use using::ParIterUsing; pub use runner::DefaultRunner; +#[cfg(feature = "poolite")] +pub use runner::RunnerWithPoolitePool; #[cfg(feature = "rayon")] pub use runner::RunnerWithRayonPool; #[cfg(feature = "scoped-pool")] diff --git a/src/runner/implementations/mod.rs b/src/runner/implementations/mod.rs index 8721ef5..e1cd965 100644 --- a/src/runner/implementations/mod.rs +++ b/src/runner/implementations/mod.rs @@ -9,17 +9,22 @@ mod std_runner; #[cfg(feature = "std")] pub use std_runner::StdRunner; +#[cfg(feature = "poolite")] +mod poolite; +#[cfg(feature = "poolite")] +pub use poolite::RunnerWithPoolitePool; + #[cfg(feature = "rayon")] mod rayon; #[cfg(feature = "rayon")] pub use rayon::RunnerWithRayonPool; -#[cfg(feature = "scoped_threadpool")] -mod scoped_threadpool; -#[cfg(feature = "scoped_threadpool")] -pub use scoped_threadpool::RunnerWithScopedThreadPool; - #[cfg(feature = "scoped-pool")] mod scoped_pool; #[cfg(feature = "scoped-pool")] pub use scoped_pool::RunnerWithScopedPool; + +#[cfg(feature = "scoped_threadpool")] +mod scoped_threadpool; +#[cfg(feature = "scoped_threadpool")] +pub use scoped_threadpool::RunnerWithScopedThreadPool; diff --git a/src/runner/implementations/poolite.rs b/src/runner/implementations/poolite.rs new file mode 100644 index 0000000..68821ad --- /dev/null +++ b/src/runner/implementations/poolite.rs @@ -0,0 +1,112 @@ +use crate::{DefaultExecutor, ParThreadPool, ParallelExecutor, runner::ParallelRunner}; +use core::{marker::PhantomData, num::NonZeroUsize}; +use orx_self_or::SoR; +use poolite::{Pool, Scoped}; + +// POOL + +impl ParThreadPool for Pool { + type ScopeRef<'s, 'env, 'scope> + = &'s Scoped<'env, 'scope> + where + 'scope: 's, + 'env: 'scope + 's; + + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Send + 'scope + 'env, + { + s.push(work); + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(&'s Scoped<'env, 'scope>) + Send, + { + self.scoped(f); + } + + fn max_num_threads(&self) -> NonZeroUsize { + NonZeroUsize::new(self.threads_future().max(1)).expect(">0") + } +} + +impl ParThreadPool for &Pool { + type ScopeRef<'s, 'env, 'scope> + = &'s Scoped<'env, 'scope> + where + 'scope: 's, + 'env: 'scope + 's; + + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Send + 'scope + 'env, + { + s.push(work); + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(&'s Scoped<'env, 'scope>) + Send, + { + self.scoped(f); + } + + fn max_num_threads(&self) -> NonZeroUsize { + NonZeroUsize::new(self.threads_future().max(1)).expect(">0") + } +} + +// RUNNER + +/// Parallel runner using threads provided by poolite::Pool. +pub struct RunnerWithPoolitePool +where + R: ParallelExecutor, + P: SoR + ParThreadPool, +{ + pool: P, + runner: PhantomData, +} + +impl From for RunnerWithPoolitePool { + fn from(pool: Pool) -> Self { + Self { + pool, + runner: PhantomData, + } + } +} + +impl<'a> From<&'a Pool> for RunnerWithPoolitePool<&'a Pool, DefaultExecutor> { + fn from(pool: &'a Pool) -> Self { + Self { + pool, + runner: PhantomData, + } + } +} + +impl ParallelRunner for RunnerWithPoolitePool +where + R: ParallelExecutor, + P: SoR + ParThreadPool, +{ + type Executor = R; + + type ThreadPool = P; + + fn thread_pool(&self) -> &Self::ThreadPool { + &self.pool + } + + fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { + &mut self.pool + } +} diff --git a/src/runner/mod.rs b/src/runner/mod.rs index f6f5467..17252a4 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -14,15 +14,18 @@ pub use implementations::SequentialRunner; #[cfg(feature = "std")] pub use implementations::StdRunner; +#[cfg(feature = "poolite")] +pub use implementations::RunnerWithPoolitePool; + #[cfg(feature = "rayon")] pub use implementations::RunnerWithRayonPool; -#[cfg(feature = "scoped_threadpool")] -pub use implementations::RunnerWithScopedThreadPool; - #[cfg(feature = "scoped-pool")] pub use implementations::RunnerWithScopedPool; +#[cfg(feature = "scoped_threadpool")] +pub use implementations::RunnerWithScopedThreadPool; + // DEFAULT /// Default runner used by orx-parallel computations: From 6a00f4908621814c0e8c6462a76da8a6122d8482 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 10:48:24 +0200 Subject: [PATCH 219/264] poolite runner tests added --- src/runner/implementations/tests/mod.rs | 9 ++++++--- src/runner/implementations/tests/poolite.rs | 21 +++++++++++++++++++++ 2 files changed, 27 insertions(+), 3 deletions(-) create mode 100644 src/runner/implementations/tests/poolite.rs diff --git a/src/runner/implementations/tests/mod.rs b/src/runner/implementations/tests/mod.rs index 92a2725..0d1fef0 100644 --- a/src/runner/implementations/tests/mod.rs +++ b/src/runner/implementations/tests/mod.rs @@ -1,12 +1,15 @@ +#[cfg(feature = "poolite")] +mod poolite; + #[cfg(feature = "rayon")] mod rayon; -#[cfg(feature = "scoped_threadpool")] -mod scoped_threadpool; - #[cfg(feature = "scoped-pool")] mod scoped_pool; +#[cfg(feature = "scoped_threadpool")] +mod scoped_threadpool; + #[cfg(feature = "std")] mod std; diff --git a/src/runner/implementations/tests/poolite.rs b/src/runner/implementations/tests/poolite.rs new file mode 100644 index 0000000..85431fc --- /dev/null +++ b/src/runner/implementations/tests/poolite.rs @@ -0,0 +1,21 @@ +use super::run_map; +use crate::{IterationOrder, runner::implementations::RunnerWithPoolitePool}; +use poolite::{Builder, Pool}; +use test_case::test_matrix; + +#[cfg(miri)] +const N: [usize; 2] = [37, 125]; +#[cfg(not(miri))] +const N: [usize; 2] = [1025, 4735]; + +#[test_matrix( + [0, 1, N[0], N[1]], + [1, 4], + [1, 64], + [IterationOrder::Ordered, IterationOrder::Arbitrary]) +] +fn pool_poolite_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { + let pool = Pool::with_builder(Builder::new().max(nt).min(nt)).unwrap(); + let orch: RunnerWithPoolitePool<_> = (&pool).into(); + run_map(n, chunk, ordering, orch); +} From 5974c17957e3257a2f602d3b6f64a7ed4bfb8d35 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 10:59:38 +0200 Subject: [PATCH 220/264] RunnerWithYastlPool is implemented --- Cargo.toml | 3 +- src/runner/implementations/mod.rs | 5 + src/runner/implementations/yastl.rs | 140 ++++++++++++++++++++++++++++ 3 files changed, 147 insertions(+), 1 deletion(-) create mode 100644 src/runner/implementations/yastl.rs diff --git a/Cargo.toml b/Cargo.toml index ad091cb..9c866ec 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,6 +27,7 @@ rayon = { version = "1.11.0", optional = true } scoped_threadpool = { version = "0.1.9", optional = true } scoped-pool = { version = "1.0.0", optional = true } poolite = { version = "0.7.1", optional = true } +yastl = { version = "0.1.2", optional = true } [dev-dependencies] chrono = "0.4.42" @@ -47,6 +48,6 @@ harness = false all-features = true [features] -default = ["std", "poolite"] +default = ["std", "yastl"] std = [] generic_iterator = ["rayon"] diff --git a/src/runner/implementations/mod.rs b/src/runner/implementations/mod.rs index e1cd965..108e789 100644 --- a/src/runner/implementations/mod.rs +++ b/src/runner/implementations/mod.rs @@ -28,3 +28,8 @@ pub use scoped_pool::RunnerWithScopedPool; mod scoped_threadpool; #[cfg(feature = "scoped_threadpool")] pub use scoped_threadpool::RunnerWithScopedThreadPool; + +#[cfg(feature = "yastl")] +mod yastl; +#[cfg(feature = "yastl")] +pub use yastl::RunnerWithYastlPool; diff --git a/src/runner/implementations/yastl.rs b/src/runner/implementations/yastl.rs new file mode 100644 index 0000000..7dfbbf2 --- /dev/null +++ b/src/runner/implementations/yastl.rs @@ -0,0 +1,140 @@ +use crate::{DefaultExecutor, ParThreadPool, ParallelExecutor, runner::ParallelRunner}; +use core::{marker::PhantomData, num::NonZeroUsize}; +use orx_self_or::SoR; +use yastl::{Pool, Scope, ThreadConfig}; + +// POOL + +pub struct YastlPool(Pool, NonZeroUsize); + +impl YastlPool { + pub fn new(num_threads: usize) -> Self { + let num_threads = num_threads.min(1); + let pool = Pool::new(num_threads); + Self(pool, NonZeroUsize::new(num_threads).expect(">0")) + } + + pub fn with_config(num_threads: usize, config: ThreadConfig) -> Self { + let num_threads = num_threads.min(1); + let pool = Pool::with_config(num_threads, config); + Self(pool, NonZeroUsize::new(num_threads).expect(">0")) + } + + pub fn inner(&self) -> &Pool { + &self.0 + } + + pub fn inner_mut(&mut self) -> &mut Pool { + &mut self.0 + } + + pub fn into_inner(self) -> Pool { + self.0 + } +} + +impl ParThreadPool for YastlPool { + type ScopeRef<'s, 'env, 'scope> + = &'s Scope<'scope> + where + 'scope: 's, + 'env: 'scope + 's; + + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Send + 'scope + 'env, + { + s.execute(work); + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(&'s Scope<'scope>) + Send, + { + self.0.scoped(f) + } + + fn max_num_threads(&self) -> NonZeroUsize { + self.1 + } +} + +impl ParThreadPool for &YastlPool { + type ScopeRef<'s, 'env, 'scope> + = &'s Scope<'scope> + where + 'scope: 's, + 'env: 'scope + 's; + + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Send + 'scope + 'env, + { + s.execute(work); + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(&'s Scope<'scope>) + Send, + { + self.0.scoped(f) + } + + fn max_num_threads(&self) -> NonZeroUsize { + self.1 + } +} + +// RUNNER + +/// Parallel runner using threads provided by yastl::Pool. +pub struct RunnerWithYastlPool +where + R: ParallelExecutor, + P: SoR + ParThreadPool, +{ + pool: P, + runner: PhantomData, +} + +impl From for RunnerWithYastlPool { + fn from(pool: YastlPool) -> Self { + Self { + pool, + runner: PhantomData, + } + } +} + +impl<'a> From<&'a YastlPool> for RunnerWithYastlPool<&'a YastlPool, DefaultExecutor> { + fn from(pool: &'a YastlPool) -> Self { + Self { + pool, + runner: PhantomData, + } + } +} + +impl ParallelRunner for RunnerWithYastlPool +where + R: ParallelExecutor, + P: SoR + ParThreadPool, +{ + type Executor = R; + + type ThreadPool = P; + + fn thread_pool(&self) -> &Self::ThreadPool { + &self.pool + } + + fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { + &mut self.pool + } +} From 3d061021ea83cfa1baed276b5a137a6eb0733e6b Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 11:05:27 +0200 Subject: [PATCH 221/264] yastl documentation --- src/lib.rs | 2 ++ src/runner/implementations/mod.rs | 2 +- src/runner/implementations/yastl.rs | 15 +++++++++++++++ src/runner/mod.rs | 3 +++ 4 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 997e624..1e2400e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -85,3 +85,5 @@ pub use runner::RunnerWithScopedThreadPool; pub use runner::SequentialRunner; #[cfg(feature = "std")] pub use runner::StdRunner; +#[cfg(feature = "yastl")] +pub use runner::{RunnerWithYastlPool, YastlPool}; diff --git a/src/runner/implementations/mod.rs b/src/runner/implementations/mod.rs index 108e789..1e57950 100644 --- a/src/runner/implementations/mod.rs +++ b/src/runner/implementations/mod.rs @@ -32,4 +32,4 @@ pub use scoped_threadpool::RunnerWithScopedThreadPool; #[cfg(feature = "yastl")] mod yastl; #[cfg(feature = "yastl")] -pub use yastl::RunnerWithYastlPool; +pub use yastl::{RunnerWithYastlPool, YastlPool}; diff --git a/src/runner/implementations/yastl.rs b/src/runner/implementations/yastl.rs index 7dfbbf2..3f3c37f 100644 --- a/src/runner/implementations/yastl.rs +++ b/src/runner/implementations/yastl.rs @@ -5,29 +5,44 @@ use yastl::{Pool, Scope, ThreadConfig}; // POOL +/// A wrapper for `yastl::Pool` and number of threads it was built with. +/// +/// NOTE: The reason why `yastl::Pool` does not directly implement `ParThreadPool` +/// is simply to be able to provide `max_num_threads` which is the argument used +/// to create the pool with. +/// +/// Two constructors of the `yastl::Pool` are made available to `YastlPool` as well: +/// * [`YastlPool::new`] +/// * [`YastlPool::with_config`] pub struct YastlPool(Pool, NonZeroUsize); impl YastlPool { + /// Create a new Pool that will execute it's tasks on `num_threads` worker threads. pub fn new(num_threads: usize) -> Self { let num_threads = num_threads.min(1); let pool = Pool::new(num_threads); Self(pool, NonZeroUsize::new(num_threads).expect(">0")) } + /// Create a new Pool that will execute it's tasks on `num_threads` worker threads and + /// spawn them using the given `config`. pub fn with_config(num_threads: usize, config: ThreadConfig) -> Self { let num_threads = num_threads.min(1); let pool = Pool::with_config(num_threads, config); Self(pool, NonZeroUsize::new(num_threads).expect(">0")) } + /// Reference to wrapped `yastl::Pool`. pub fn inner(&self) -> &Pool { &self.0 } + /// Mutable reference to wrapped `yastl::Pool`. pub fn inner_mut(&mut self) -> &mut Pool { &mut self.0 } + /// Returns the wrapped `yastl::Pool`. pub fn into_inner(self) -> Pool { self.0 } diff --git a/src/runner/mod.rs b/src/runner/mod.rs index 17252a4..fd7088d 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -26,6 +26,9 @@ pub use implementations::RunnerWithScopedPool; #[cfg(feature = "scoped_threadpool")] pub use implementations::RunnerWithScopedThreadPool; +#[cfg(feature = "yastl")] +pub use implementations::{RunnerWithYastlPool, YastlPool}; + // DEFAULT /// Default runner used by orx-parallel computations: From 7160da2e5251fa2bc7df7ae093d5ad3c431d0a1c Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 11:07:03 +0200 Subject: [PATCH 222/264] yastl tests added --- src/runner/implementations/tests/mod.rs | 3 +++ src/runner/implementations/tests/yastl.rs | 28 +++++++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 src/runner/implementations/tests/yastl.rs diff --git a/src/runner/implementations/tests/mod.rs b/src/runner/implementations/tests/mod.rs index 0d1fef0..50ce6c5 100644 --- a/src/runner/implementations/tests/mod.rs +++ b/src/runner/implementations/tests/mod.rs @@ -13,6 +13,9 @@ mod scoped_threadpool; #[cfg(feature = "std")] mod std; +#[cfg(feature = "yastl")] +mod yastl; + mod sequential; mod utils; diff --git a/src/runner/implementations/tests/yastl.rs b/src/runner/implementations/tests/yastl.rs new file mode 100644 index 0000000..604aaa4 --- /dev/null +++ b/src/runner/implementations/tests/yastl.rs @@ -0,0 +1,28 @@ +use super::run_map; +use crate::{ + IterationOrder, + runner::implementations::{RunnerWithYastlPool, YastlPool}, +}; +use test_case::test_matrix; +use yastl::ThreadConfig; + +#[cfg(miri)] +const N: [usize; 2] = [37, 125]; +#[cfg(not(miri))] +const N: [usize; 2] = [1025, 4735]; + +#[test_matrix( + [0, 1, N[0], N[1]], + [1, 4], + [1, 64], + [IterationOrder::Ordered, IterationOrder::Arbitrary]) +] +fn pool_yastl_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { + let pool = YastlPool::new(nt); + let orch: RunnerWithYastlPool<_> = (&pool).into(); + run_map(n, chunk, ordering, orch); + + let pool = YastlPool::with_config(nt, ThreadConfig::new()); + let orch: RunnerWithYastlPool<_> = (&pool).into(); + run_map(n, chunk, ordering, orch); +} From aaa7968d2baab5b0b3a99d8f5dd96d95b2fea7d5 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 11:21:19 +0200 Subject: [PATCH 223/264] runner with pond pool is implemented --- Cargo.toml | 3 +- src/lib.rs | 2 + src/runner/implementations/mod.rs | 5 + src/runner/implementations/pond.rs | 147 ++++++++++++++++++++++++++++ src/runner/implementations/yastl.rs | 2 +- src/runner/mod.rs | 3 + 6 files changed, 160 insertions(+), 2 deletions(-) create mode 100644 src/runner/implementations/pond.rs diff --git a/Cargo.toml b/Cargo.toml index 9c866ec..37ec235 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,6 +28,7 @@ scoped_threadpool = { version = "0.1.9", optional = true } scoped-pool = { version = "1.0.0", optional = true } poolite = { version = "0.7.1", optional = true } yastl = { version = "0.1.2", optional = true } +pond = { version = "0.3.1", optional = true } [dev-dependencies] chrono = "0.4.42" @@ -48,6 +49,6 @@ harness = false all-features = true [features] -default = ["std", "yastl"] +default = ["std", "pond"] std = [] generic_iterator = ["rayon"] diff --git a/src/lib.rs b/src/lib.rs index 1e2400e..1034da5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -85,5 +85,7 @@ pub use runner::RunnerWithScopedThreadPool; pub use runner::SequentialRunner; #[cfg(feature = "std")] pub use runner::StdRunner; +#[cfg(feature = "pond")] +pub use runner::{PondPool, RunnerWithPondPool}; #[cfg(feature = "yastl")] pub use runner::{RunnerWithYastlPool, YastlPool}; diff --git a/src/runner/implementations/mod.rs b/src/runner/implementations/mod.rs index 1e57950..3ff456c 100644 --- a/src/runner/implementations/mod.rs +++ b/src/runner/implementations/mod.rs @@ -9,6 +9,11 @@ mod std_runner; #[cfg(feature = "std")] pub use std_runner::StdRunner; +#[cfg(feature = "pond")] +mod pond; +#[cfg(feature = "pond")] +pub use pond::{PondPool, RunnerWithPondPool}; + #[cfg(feature = "poolite")] mod poolite; #[cfg(feature = "poolite")] diff --git a/src/runner/implementations/pond.rs b/src/runner/implementations/pond.rs new file mode 100644 index 0000000..c4068e8 --- /dev/null +++ b/src/runner/implementations/pond.rs @@ -0,0 +1,147 @@ +use crate::{DefaultExecutor, ParThreadPool, ParallelExecutor, runner::ParallelRunner}; +use core::{marker::PhantomData, num::NonZeroUsize}; +use orx_self_or::SoM; +use pond::{Pool, Scope}; + +// POOL + +/// A wrapper for `pond::Pool` and number of threads it was built with. +/// +/// NOTE: The reason why `pond::Pool` does not directly implement `ParThreadPool` +/// is simply to be able to provide `max_num_threads` which is the argument used +/// to create the pool with. +/// +/// Following constructor of the `pond::Pool` is made available to `PondPool`: +/// * [`PondPool::new_threads_unbounded`] +pub struct PondPool(Pool, NonZeroUsize); + +impl PondPool { + /// Spawn a number of threads. The pool's queue of pending jobs is limited. + /// The backlog is unbounded as in unbounded. + pub fn new_threads_unbounded(num_threads: usize) -> Self { + let num_threads = num_threads.min(1); + let pool = Pool::new_threads_unbounded(num_threads); + Self(pool, NonZeroUsize::new(num_threads).expect(">0")) + } + + /// Reference to wrapped `pond::Pool`. + pub fn inner(&self) -> &Pool { + &self.0 + } + + /// Mutable reference to wrapped `pond::Pool`. + pub fn inner_mut(&mut self) -> &mut Pool { + &mut self.0 + } + + /// Returns the wrapped `pond::Pool`. + pub fn into_inner(self) -> Pool { + self.0 + } +} + +impl ParThreadPool for PondPool { + type ScopeRef<'s, 'env, 'scope> + = Scope<'env, 'scope> + where + 'scope: 's, + 'env: 'scope + 's; + + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Send + 'scope + 'env, + { + s.execute(work); + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(Scope<'env, 'scope>) + Send, + { + self.0.scoped(f) + } + + fn max_num_threads(&self) -> NonZeroUsize { + self.1 + } +} + +impl ParThreadPool for &mut PondPool { + type ScopeRef<'s, 'env, 'scope> + = Scope<'env, 'scope> + where + 'scope: 's, + 'env: 'scope + 's; + + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Send + 'scope + 'env, + { + s.execute(work); + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(Scope<'env, 'scope>) + Send, + { + self.0.scoped(f) + } + + fn max_num_threads(&self) -> NonZeroUsize { + self.1 + } +} + +// RUNNER + +/// Parallel runner using threads provided by pond::Pool. +pub struct RunnerWithPondPool +where + R: ParallelExecutor, + P: SoM + ParThreadPool, +{ + pool: P, + runner: PhantomData, +} + +impl From for RunnerWithPondPool { + fn from(pool: PondPool) -> Self { + Self { + pool, + runner: PhantomData, + } + } +} + +impl<'a> From<&'a mut PondPool> for RunnerWithPondPool<&'a mut PondPool, DefaultExecutor> { + fn from(pool: &'a mut PondPool) -> Self { + Self { + pool, + runner: PhantomData, + } + } +} + +impl ParallelRunner for RunnerWithPondPool +where + R: ParallelExecutor, + P: SoM + ParThreadPool, +{ + type Executor = R; + + type ThreadPool = P; + + fn thread_pool(&self) -> &Self::ThreadPool { + &self.pool + } + + fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { + &mut self.pool + } +} diff --git a/src/runner/implementations/yastl.rs b/src/runner/implementations/yastl.rs index 3f3c37f..8617cec 100644 --- a/src/runner/implementations/yastl.rs +++ b/src/runner/implementations/yastl.rs @@ -11,7 +11,7 @@ use yastl::{Pool, Scope, ThreadConfig}; /// is simply to be able to provide `max_num_threads` which is the argument used /// to create the pool with. /// -/// Two constructors of the `yastl::Pool` are made available to `YastlPool` as well: +/// Two constructors of the `yastl::Pool` are made available to `YastlPool`: /// * [`YastlPool::new`] /// * [`YastlPool::with_config`] pub struct YastlPool(Pool, NonZeroUsize); diff --git a/src/runner/mod.rs b/src/runner/mod.rs index fd7088d..37db96d 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -14,6 +14,9 @@ pub use implementations::SequentialRunner; #[cfg(feature = "std")] pub use implementations::StdRunner; +#[cfg(feature = "pond")] +pub use implementations::{PondPool, RunnerWithPondPool}; + #[cfg(feature = "poolite")] pub use implementations::RunnerWithPoolitePool; From e34ed43d85f1ce11716d82cfc6b79e0f7314d878 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 11:22:36 +0200 Subject: [PATCH 224/264] pond tests are added --- src/runner/implementations/tests/mod.rs | 5 +++++ src/runner/implementations/tests/pond.rs | 23 +++++++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 src/runner/implementations/tests/pond.rs diff --git a/src/runner/implementations/tests/mod.rs b/src/runner/implementations/tests/mod.rs index 50ce6c5..27a3827 100644 --- a/src/runner/implementations/tests/mod.rs +++ b/src/runner/implementations/tests/mod.rs @@ -1,5 +1,10 @@ +#[cfg(feature = "pond")] +mod pond; + #[cfg(feature = "poolite")] mod poolite; +#[cfg(feature = "yastl")] +mod yastl; #[cfg(feature = "rayon")] mod rayon; diff --git a/src/runner/implementations/tests/pond.rs b/src/runner/implementations/tests/pond.rs new file mode 100644 index 0000000..d58d49b --- /dev/null +++ b/src/runner/implementations/tests/pond.rs @@ -0,0 +1,23 @@ +use super::run_map; +use crate::{ + IterationOrder, + runner::implementations::{PondPool, RunnerWithPondPool}, +}; +use test_case::test_matrix; + +#[cfg(miri)] +const N: [usize; 2] = [37, 125]; +#[cfg(not(miri))] +const N: [usize; 2] = [1025, 4735]; + +#[test_matrix( + [0, 1, N[0], N[1]], + [1, 4], + [1, 64], + [IterationOrder::Ordered, IterationOrder::Arbitrary]) +] +fn pool_pond_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { + let mut pool = PondPool::new_threads_unbounded(nt); + let orch: RunnerWithPondPool<_> = (&mut pool).into(); + run_map(n, chunk, ordering, orch); +} From 2500106b9ee7e7d24a16438c231f96a0c53cfc2d Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 11:36:45 +0200 Subject: [PATCH 225/264] pool dependency requires rayon-core, not rayon --- Cargo.toml | 14 +++++++++----- src/lib.rs | 2 +- src/runner/implementations/mod.rs | 8 ++++---- .../implementations/{rayon.rs => rayon_core.rs} | 14 +++++++------- src/runner/implementations/tests/mod.rs | 4 ++-- .../tests/{rayon.rs => rayon_core.rs} | 4 ++-- src/runner/mod.rs | 2 +- 7 files changed, 26 insertions(+), 22 deletions(-) rename src/runner/implementations/{rayon.rs => rayon_core.rs} (86%) rename src/runner/implementations/tests/{rayon.rs => rayon_core.rs} (78%) diff --git a/Cargo.toml b/Cargo.toml index 37ec235..7cd6891 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,13 +22,17 @@ orx-pinned-concurrent-col = { version = "2.15.0", default-features = false } orx-priority-queue = { version = "1.7.0", default-features = false } orx-pseudo-default = { version = "2.1.0", default-features = false } orx-self-or = { version = "1.2.0" } -# optional thread pool dependencies + +# optional: generic iterator rayon = { version = "1.11.0", optional = true } -scoped_threadpool = { version = "0.1.9", optional = true } -scoped-pool = { version = "1.0.0", optional = true } + +# optional: thread pool +pond = { version = "0.3.1", optional = true } poolite = { version = "0.7.1", optional = true } +rayon-core = { version = "1.13.0", optional = true } +scoped-pool = { version = "1.0.0", optional = true } +scoped_threadpool = { version = "0.1.9", optional = true } yastl = { version = "0.1.2", optional = true } -pond = { version = "0.3.1", optional = true } [dev-dependencies] chrono = "0.4.42" @@ -49,6 +53,6 @@ harness = false all-features = true [features] -default = ["std", "pond"] +default = ["std", "rayon-core"] std = [] generic_iterator = ["rayon"] diff --git a/src/lib.rs b/src/lib.rs index 1034da5..ebb0887 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -76,7 +76,7 @@ pub use using::ParIterUsing; pub use runner::DefaultRunner; #[cfg(feature = "poolite")] pub use runner::RunnerWithPoolitePool; -#[cfg(feature = "rayon")] +#[cfg(feature = "rayon-core")] pub use runner::RunnerWithRayonPool; #[cfg(feature = "scoped-pool")] pub use runner::RunnerWithScopedPool; diff --git a/src/runner/implementations/mod.rs b/src/runner/implementations/mod.rs index 3ff456c..d9b66cb 100644 --- a/src/runner/implementations/mod.rs +++ b/src/runner/implementations/mod.rs @@ -19,10 +19,10 @@ mod poolite; #[cfg(feature = "poolite")] pub use poolite::RunnerWithPoolitePool; -#[cfg(feature = "rayon")] -mod rayon; -#[cfg(feature = "rayon")] -pub use rayon::RunnerWithRayonPool; +#[cfg(feature = "rayon-core")] +mod rayon_core; +#[cfg(feature = "rayon-core")] +pub use rayon_core::RunnerWithRayonPool; #[cfg(feature = "scoped-pool")] mod scoped_pool; diff --git a/src/runner/implementations/rayon.rs b/src/runner/implementations/rayon_core.rs similarity index 86% rename from src/runner/implementations/rayon.rs rename to src/runner/implementations/rayon_core.rs index 611688f..a49f1b4 100644 --- a/src/runner/implementations/rayon.rs +++ b/src/runner/implementations/rayon_core.rs @@ -3,13 +3,13 @@ use crate::{ }; use core::{marker::PhantomData, num::NonZeroUsize}; use orx_self_or::SoR; -use rayon::ThreadPool; +use rayon_core::ThreadPool; // POOL impl ParThreadPool for ThreadPool { type ScopeRef<'s, 'env, 'scope> - = &'s rayon::Scope<'scope> + = &'s rayon_core::Scope<'scope> where 'scope: 's, 'env: 'scope + 's; @@ -26,7 +26,7 @@ impl ParThreadPool for ThreadPool { fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) where 'env: 'scope, - for<'s> F: FnOnce(&'s rayon::Scope<'scope>) + Send, + for<'s> F: FnOnce(&'s rayon_core::Scope<'scope>) + Send, { self.scope(f) } @@ -36,9 +36,9 @@ impl ParThreadPool for ThreadPool { } } -impl ParThreadPool for &rayon::ThreadPool { +impl ParThreadPool for &rayon_core::ThreadPool { type ScopeRef<'s, 'env, 'scope> - = &'s rayon::Scope<'scope> + = &'s rayon_core::Scope<'scope> where 'scope: 's, 'env: 'scope + 's; @@ -55,7 +55,7 @@ impl ParThreadPool for &rayon::ThreadPool { fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) where 'env: 'scope, - for<'s> F: FnOnce(&'s rayon::Scope<'scope>) + Send, + for<'s> F: FnOnce(&'s rayon_core::Scope<'scope>) + Send, { self.scope(f) } @@ -67,7 +67,7 @@ impl ParThreadPool for &rayon::ThreadPool { // RUNNER -/// Parallel runner using threads provided by rayon::ThreadPool. +/// Parallel runner using threads provided by rayon_core::ThreadPool. pub struct RunnerWithRayonPool where R: ParallelExecutor, diff --git a/src/runner/implementations/tests/mod.rs b/src/runner/implementations/tests/mod.rs index 27a3827..571a9b9 100644 --- a/src/runner/implementations/tests/mod.rs +++ b/src/runner/implementations/tests/mod.rs @@ -6,8 +6,8 @@ mod poolite; #[cfg(feature = "yastl")] mod yastl; -#[cfg(feature = "rayon")] -mod rayon; +#[cfg(feature = "rayon-core")] +mod rayon_core; #[cfg(feature = "scoped-pool")] mod scoped_pool; diff --git a/src/runner/implementations/tests/rayon.rs b/src/runner/implementations/tests/rayon_core.rs similarity index 78% rename from src/runner/implementations/tests/rayon.rs rename to src/runner/implementations/tests/rayon_core.rs index 3f1825a..2654589 100644 --- a/src/runner/implementations/tests/rayon.rs +++ b/src/runner/implementations/tests/rayon_core.rs @@ -7,7 +7,7 @@ const N: [usize; 2] = [37, 125]; #[cfg(not(miri))] const N: [usize; 2] = [1025, 4735]; -// TODO: rayon pool fails the miri test (integer-to-pointer cast crossbeam-epoch-0.9.18/src/atomic.rs:204:11) +// TODO: rayon_core pool fails the miri test (integer-to-pointer cast crossbeam-epoch-0.9.18/src/atomic.rs:204:11) #[cfg(not(miri))] #[test_matrix( [0, 1, N[0], N[1]], @@ -16,7 +16,7 @@ const N: [usize; 2] = [1025, 4735]; [IterationOrder::Ordered, IterationOrder::Arbitrary]) ] fn pool_rayon_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { - let pool = rayon::ThreadPoolBuilder::new() + let pool = rayon_core::ThreadPoolBuilder::new() .num_threads(nt) .build() .unwrap(); diff --git a/src/runner/mod.rs b/src/runner/mod.rs index 37db96d..894df54 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -20,7 +20,7 @@ pub use implementations::{PondPool, RunnerWithPondPool}; #[cfg(feature = "poolite")] pub use implementations::RunnerWithPoolitePool; -#[cfg(feature = "rayon")] +#[cfg(feature = "rayon-core")] pub use implementations::RunnerWithRayonPool; #[cfg(feature = "scoped-pool")] From 0e2d384188cb97b88d50ff837723b8f3df060210 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 11:42:37 +0200 Subject: [PATCH 226/264] fix double mod definition --- src/runner/implementations/tests/mod.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/runner/implementations/tests/mod.rs b/src/runner/implementations/tests/mod.rs index 571a9b9..16a91f0 100644 --- a/src/runner/implementations/tests/mod.rs +++ b/src/runner/implementations/tests/mod.rs @@ -3,8 +3,6 @@ mod pond; #[cfg(feature = "poolite")] mod poolite; -#[cfg(feature = "yastl")] -mod yastl; #[cfg(feature = "rayon-core")] mod rayon_core; From aab9820fe982185216aa170a6f5c997965b4c7c9 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 14:37:52 +0200 Subject: [PATCH 227/264] example benchmark for pools --- Cargo.toml | 10 +- examples/using_pools.rs | 231 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 240 insertions(+), 1 deletion(-) create mode 100644 examples/using_pools.rs diff --git a/Cargo.toml b/Cargo.toml index 7cd6891..bd63a43 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,6 +53,14 @@ harness = false all-features = true [features] -default = ["std", "rayon-core"] +default = [ + "std", + "pond", + "poolite", + "rayon-core", + "scoped-pool", + "scoped_threadpool", + "yastl", +] std = [] generic_iterator = ["rayon"] diff --git a/examples/using_pools.rs b/examples/using_pools.rs new file mode 100644 index 0000000..a73741c --- /dev/null +++ b/examples/using_pools.rs @@ -0,0 +1,231 @@ +mod utils; + +// cargo run --all-features --release --example using_pools +// to run with all options + +// cargo run --all-features --release --example using_pools -- --pool-type scoped-pool +// to run only using scoped-pool + +// cargo run --all-features --release --example using_pools -- --pool-type scoped-thread-pool --len 100 --num-repetitions 100000 +// to run only using scoped_threadpool, with 100000 repetitions for input size of 100 + +// cargo run --all-features --release --example using_pools -- --pool-type sequential +// 11.02s + +// cargo run --all-features --release --example using_pools -- --pool-type std +// 3.66s + +// cargo run --all-features --release --example using_pools -- --pool-type pond +// 11.49s + +// cargo run --all-features --release --example using_pools -- --pool-type poolite +// 12.27s + +// cargo run --all-features --release --example using_pools -- --pool-type rayon-core +// 3.76s + +// cargo run --all-features --release --example using_pools -- --pool-type scoped_threadpool +// 3.70s + +// cargo run --all-features --release --example using_pools -- --pool-type yastl +// 11.47s + +fn main() { + #[cfg(feature = "std")] + #[cfg(feature = "pond")] + #[cfg(feature = "poolite")] + #[cfg(feature = "rayon-core")] + #[cfg(feature = "scoped-pool")] + #[cfg(feature = "scoped_threadpool")] + #[cfg(feature = "yastl")] + { + use clap::Parser; + use orx_parallel::runner::ParallelRunner; + use orx_parallel::*; + use std::hint::black_box; + use std::num::NonZeroUsize; + use std::time::SystemTime; + + #[derive(Parser, Debug)] + struct Args { + /// Type of the thread pool to be used for computations. + #[arg(long, default_value_t, value_enum)] + pool_type: PoolType, + /// Number of threads. + #[arg(long, default_value_t = NonZeroUsize::new(16).unwrap())] + num_threads: NonZeroUsize, + /// Number of items in the input iterator. + #[arg(long, default_value_t = 100000)] + len: usize, + /// Number of repetitions to measure time; total time will be reported. + #[arg(long, default_value_t = 1000)] + num_repetitions: usize, + } + + #[derive(clap::ValueEnum, Clone, Copy, Default, Debug)] + enum PoolType { + Std, + Sequential, + Pond, + Poolite, + RayonCore, + ScopedPool, + ScopedThreadPool, + Yastl, + #[default] + All, + } + + impl PoolType { + fn run_single(self, nt: usize, reps: usize, input: &[usize], expected: &[String]) { + let now = SystemTime::now(); + let result = match self { + Self::Std => run_std(nt, reps, input), + Self::Sequential => run_sequential(nt, reps, input), + Self::Pond => run_pond(nt, reps, input), + Self::Poolite => run_poolite(nt, reps, input), + Self::RayonCore => run_rayon_core(nt, reps, input), + Self::ScopedPool => run_scoped_pool(nt, reps, input), + Self::ScopedThreadPool => run_scoped_threadpool(nt, reps, input), + Self::Yastl => run_yastl(nt, reps, input), + Self::All => panic!("all is handled by run_all"), + }; + let elapsed = now.elapsed().unwrap(); + println!("\n{self:?} => {elapsed:?}"); + assert_eq!(expected, result); + } + + fn run_all(nt: usize, reps: usize, input: &[usize], expected: &[String]) { + Self::Std.run_single(nt, reps, input, expected); + Self::Sequential.run_single(nt, reps, input, expected); + Self::Pond.run_single(nt, reps, input, expected); + Self::Poolite.run_single(nt, reps, input, expected); + Self::RayonCore.run_single(nt, reps, input, expected); + Self::ScopedPool.run_single(nt, reps, input, expected); + Self::ScopedThreadPool.run_single(nt, reps, input, expected); + Self::Yastl.run_single(nt, reps, input, expected); + } + + fn run(self, nt: usize, reps: usize, input: &[usize], expected: &[String]) { + match self { + Self::All => Self::run_all(nt, reps, input, expected), + _ => self.run_single(nt, reps, input, expected), + } + } + } + + fn run_with_runner( + mut runner: R, + num_threads: usize, + num_repetitions: usize, + input: &[usize], + ) -> Vec { + let mut result = vec![]; + for _ in 0..num_repetitions { + result = black_box( + input + .par() + .num_threads(num_threads) + .with_runner(&mut runner) + .map(|x| x.to_string()) + .filter_map(|x| (!x.starts_with('1')).then_some(x)) + .flat_map(|x| [format!("{}!", &x), x]) + .filter(|x| !x.starts_with('2')) + .filter_map(|x| x.parse::().ok()) + .map(|x| x.to_string()) + .collect(), + ); + } + result + } + + fn run_std(num_threads: usize, num_repetitions: usize, input: &[usize]) -> Vec { + let mut runner = DefaultRunner::default(); // StdRunner + run_with_runner(&mut runner, num_threads, num_repetitions, input) + } + + fn run_sequential( + num_threads: usize, + num_repetitions: usize, + input: &[usize], + ) -> Vec { + let mut runner = SequentialRunner::default(); + run_with_runner(&mut runner, num_threads, num_repetitions, input) + } + + fn run_pond(num_threads: usize, num_repetitions: usize, input: &[usize]) -> Vec { + let mut pond = PondPool::new_threads_unbounded(num_threads); + let mut runner = RunnerWithPondPool::from(&mut pond); + run_with_runner(&mut runner, num_threads, num_repetitions, input) + } + + fn run_poolite(num_threads: usize, num_repetitions: usize, input: &[usize]) -> Vec { + let pond = poolite::Pool::with_builder( + poolite::Builder::new().min(num_threads).max(num_threads), + ) + .unwrap(); + let mut runner = RunnerWithPoolitePool::from(&pond); + run_with_runner(&mut runner, num_threads, num_repetitions, input) + } + + fn run_rayon_core( + num_threads: usize, + num_repetitions: usize, + input: &[usize], + ) -> Vec { + let pond = rayon_core::ThreadPoolBuilder::new() + .num_threads(num_threads) + .build() + .unwrap(); + let mut runner = RunnerWithRayonPool::from(&pond); + run_with_runner(&mut runner, num_threads, num_repetitions, input) + } + + fn run_scoped_pool( + num_threads: usize, + num_repetitions: usize, + input: &[usize], + ) -> Vec { + let pond = scoped_pool::Pool::new(num_threads); + let mut runner = RunnerWithScopedPool::from(&pond); + run_with_runner(&mut runner, num_threads, num_repetitions, input) + } + + fn run_scoped_threadpool( + num_threads: usize, + num_repetitions: usize, + input: &[usize], + ) -> Vec { + let mut pond = scoped_threadpool::Pool::new(num_threads as u32); + let mut runner = RunnerWithScopedThreadPool::from(&mut pond); + run_with_runner(&mut runner, num_threads, num_repetitions, input) + } + + fn run_yastl(num_threads: usize, num_repetitions: usize, input: &[usize]) -> Vec { + let pond = YastlPool::new(num_threads); + let mut runner = RunnerWithYastlPool::from(&pond); + run_with_runner(&mut runner, num_threads, num_repetitions, input) + } + + let args = Args::parse(); + println!("\n{args:?}"); + + let input: Vec<_> = (0..args.len as usize).collect::>(); + let expected: Vec<_> = input + .iter() + .map(|x| x.to_string()) + .filter_map(|x| (!x.starts_with('1')).then_some(x)) + .flat_map(|x| [format!("{}!", &x), x]) + .filter(|x| !x.starts_with('2')) + .filter_map(|x| x.parse::().ok()) + .map(|x| x.to_string()) + .collect(); + + args.pool_type.run( + args.num_threads.into(), + args.num_repetitions, + &input, + &expected, + ); + } +} From 3d7ad598d79ab2091f8b67b3bebb8461c2f0608d Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 14:50:29 +0200 Subject: [PATCH 228/264] revise example --- examples/using_pools.rs | 58 ++++++++++++++++++++++++++++++++--------- 1 file changed, 45 insertions(+), 13 deletions(-) diff --git a/examples/using_pools.rs b/examples/using_pools.rs index a73741c..011d510 100644 --- a/examples/using_pools.rs +++ b/examples/using_pools.rs @@ -91,7 +91,7 @@ fn main() { Self::All => panic!("all is handled by run_all"), }; let elapsed = now.elapsed().unwrap(); - println!("\n{self:?} => {elapsed:?}"); + println!("{self:?} => {elapsed:?}"); assert_eq!(expected, result); } @@ -120,25 +120,50 @@ fn main() { num_repetitions: usize, input: &[usize], ) -> Vec { + let mut dummy = vec![]; let mut result = vec![]; - for _ in 0..num_repetitions { + for i in 0..num_repetitions { result = black_box( input .par() .num_threads(num_threads) .with_runner(&mut runner) - .map(|x| x.to_string()) - .filter_map(|x| (!x.starts_with('1')).then_some(x)) - .flat_map(|x| [format!("{}!", &x), x]) - .filter(|x| !x.starts_with('2')) - .filter_map(|x| x.parse::().ok()) + .flat_map(|x| { + [ + *x, + fib(x % 10), + fib(x % 21), + fib(x % 17), + fib(x % 33), + fib(x % 21), + ] + }) + .map(|x| 3 * x) + .filter(|x| !(100..150).contains(x)) .map(|x| x.to_string()) .collect(), ); + if i < num_repetitions.min(result.len()) { + dummy.push(result[i].clone()) + }; + } + for i in 0..dummy.len() { + assert_eq!(&dummy[i], &result[i]); } result } + fn fib(n: usize) -> usize { + let mut a = 0; + let mut b = 1; + for _ in 0..n { + let c = a + b; + a = b; + b = c; + } + a + } + fn run_std(num_threads: usize, num_repetitions: usize, input: &[usize]) -> Vec { let mut runner = DefaultRunner::default(); // StdRunner run_with_runner(&mut runner, num_threads, num_repetitions, input) @@ -208,16 +233,23 @@ fn main() { } let args = Args::parse(); - println!("\n{args:?}"); + println!("{args:?}"); let input: Vec<_> = (0..args.len as usize).collect::>(); let expected: Vec<_> = input .iter() - .map(|x| x.to_string()) - .filter_map(|x| (!x.starts_with('1')).then_some(x)) - .flat_map(|x| [format!("{}!", &x), x]) - .filter(|x| !x.starts_with('2')) - .filter_map(|x| x.parse::().ok()) + .flat_map(|x| { + [ + *x, + fib(x % 10), + fib(x % 21), + fib(x % 17), + fib(x % 33), + fib(x % 21), + ] + }) + .map(|x| 3 * x) + .filter(|x| !(100..150).contains(x)) .map(|x| x.to_string()) .collect(); From 87167682eb89239f3a676f197b3510b268bff326 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 19:16:25 +0200 Subject: [PATCH 229/264] revise pools example --- examples/using_pools.rs | 95 +++++++++++++++++++++-------------------- 1 file changed, 48 insertions(+), 47 deletions(-) diff --git a/examples/using_pools.rs b/examples/using_pools.rs index 011d510..4f781f2 100644 --- a/examples/using_pools.rs +++ b/examples/using_pools.rs @@ -1,34 +1,35 @@ mod utils; // cargo run --all-features --release --example using_pools -// to run with all options +// to run with all options: +// +// output: +// +// Args { pool_type: All, num_threads: 16, len: 100000, num_repetitions: 1000 } +// Std => 15.912437916s +// Sequential => 46.194610858s +// Pond => 42.560279289s +// Poolite => 21.422590826s +// RayonCore => 16.227641997s +// ScopedPool => 15.958834105s +// ScopedThreadPool => 17.228307255s +// Yastl => 43.914882593s // cargo run --all-features --release --example using_pools -- --pool-type scoped-pool // to run only using scoped-pool +// +// output: +// +// Args { pool_type: ScopedPool, num_threads: 16, len: 100000, num_repetitions: 1000 } +// ScopedPool => 16.640308686s -// cargo run --all-features --release --example using_pools -- --pool-type scoped-thread-pool --len 100 --num-repetitions 100000 -// to run only using scoped_threadpool, with 100000 repetitions for input size of 100 - -// cargo run --all-features --release --example using_pools -- --pool-type sequential -// 11.02s - -// cargo run --all-features --release --example using_pools -- --pool-type std -// 3.66s - -// cargo run --all-features --release --example using_pools -- --pool-type pond -// 11.49s - -// cargo run --all-features --release --example using_pools -- --pool-type poolite -// 12.27s - -// cargo run --all-features --release --example using_pools -- --pool-type rayon-core -// 3.76s - -// cargo run --all-features --release --example using_pools -- --pool-type scoped_threadpool -// 3.70s - -// cargo run --all-features --release --example using_pools -- --pool-type yastl -// 11.47s +// cargo run --all-features --release --example using_pools -- --pool-type rayon-core --len 1000 --num-repetitions 10000 +// to run only using rayon-core ThreadPool, with 10000 repetitions for input size of 1000 +// +// output: +// +// Args { pool_type: RayonCore, num_threads: 16, len: 1000, num_repetitions: 10000 } +// RayonCore => 6.950370104s fn main() { #[cfg(feature = "std")] @@ -131,11 +132,11 @@ fn main() { .flat_map(|x| { [ *x, - fib(x % 10), - fib(x % 21), - fib(x % 17), - fib(x % 33), - fib(x % 21), + fibonacci(x % 10), + fibonacci(x % 21), + fibonacci(x % 17), + fibonacci(x % 33), + fibonacci(x % 21), ] }) .map(|x| 3 * x) @@ -153,7 +154,7 @@ fn main() { result } - fn fib(n: usize) -> usize { + fn fibonacci(n: usize) -> usize { let mut a = 0; let mut b = 1; for _ in 0..n { @@ -179,17 +180,17 @@ fn main() { } fn run_pond(num_threads: usize, num_repetitions: usize, input: &[usize]) -> Vec { - let mut pond = PondPool::new_threads_unbounded(num_threads); - let mut runner = RunnerWithPondPool::from(&mut pond); + let mut pool = PondPool::new_threads_unbounded(num_threads); + let mut runner = RunnerWithPondPool::from(&mut pool); run_with_runner(&mut runner, num_threads, num_repetitions, input) } fn run_poolite(num_threads: usize, num_repetitions: usize, input: &[usize]) -> Vec { - let pond = poolite::Pool::with_builder( + let pool = poolite::Pool::with_builder( poolite::Builder::new().min(num_threads).max(num_threads), ) .unwrap(); - let mut runner = RunnerWithPoolitePool::from(&pond); + let mut runner = RunnerWithPoolitePool::from(&pool); run_with_runner(&mut runner, num_threads, num_repetitions, input) } @@ -198,11 +199,11 @@ fn main() { num_repetitions: usize, input: &[usize], ) -> Vec { - let pond = rayon_core::ThreadPoolBuilder::new() + let pool = rayon_core::ThreadPoolBuilder::new() .num_threads(num_threads) .build() .unwrap(); - let mut runner = RunnerWithRayonPool::from(&pond); + let mut runner = RunnerWithRayonPool::from(&pool); run_with_runner(&mut runner, num_threads, num_repetitions, input) } @@ -211,8 +212,8 @@ fn main() { num_repetitions: usize, input: &[usize], ) -> Vec { - let pond = scoped_pool::Pool::new(num_threads); - let mut runner = RunnerWithScopedPool::from(&pond); + let pool = scoped_pool::Pool::new(num_threads); + let mut runner = RunnerWithScopedPool::from(&pool); run_with_runner(&mut runner, num_threads, num_repetitions, input) } @@ -221,14 +222,14 @@ fn main() { num_repetitions: usize, input: &[usize], ) -> Vec { - let mut pond = scoped_threadpool::Pool::new(num_threads as u32); - let mut runner = RunnerWithScopedThreadPool::from(&mut pond); + let mut pool = scoped_threadpool::Pool::new(num_threads as u32); + let mut runner = RunnerWithScopedThreadPool::from(&mut pool); run_with_runner(&mut runner, num_threads, num_repetitions, input) } fn run_yastl(num_threads: usize, num_repetitions: usize, input: &[usize]) -> Vec { - let pond = YastlPool::new(num_threads); - let mut runner = RunnerWithYastlPool::from(&pond); + let pool = YastlPool::new(num_threads); + let mut runner = RunnerWithYastlPool::from(&pool); run_with_runner(&mut runner, num_threads, num_repetitions, input) } @@ -241,11 +242,11 @@ fn main() { .flat_map(|x| { [ *x, - fib(x % 10), - fib(x % 21), - fib(x % 17), - fib(x % 33), - fib(x % 21), + fibonacci(x % 10), + fibonacci(x % 21), + fibonacci(x % 17), + fibonacci(x % 33), + fibonacci(x % 21), ] }) .map(|x| 3 * x) From da2bf756eeaa41857c4f16e6ef3479c55d301898 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 19:24:21 +0200 Subject: [PATCH 230/264] generic RunnerWithPool struct is introduced --- src/lib.rs | 3 +- src/runner/implementations/mod.rs | 3 ++ .../implementations/runner_with_pool.rs | 48 +++++++++++++++++++ src/runner/implementations/std_runner.rs | 29 +++++++++++ src/runner/implementations/tests/pond.rs | 4 +- src/runner/implementations/tests/poolite.rs | 4 +- .../implementations/tests/rayon_core.rs | 4 +- .../implementations/tests/scoped_pool.rs | 4 +- .../tests/scoped_threadpool.rs | 4 +- src/runner/implementations/tests/std.rs | 8 +++- src/runner/implementations/tests/yastl.rs | 6 +-- src/runner/mod.rs | 2 +- 12 files changed, 103 insertions(+), 16 deletions(-) create mode 100644 src/runner/implementations/runner_with_pool.rs diff --git a/src/lib.rs b/src/lib.rs index ebb0887..b739113 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -73,7 +73,8 @@ pub use parameters::{ChunkSize, IterationOrder, NumThreads, Params}; pub use special_type_sets::Sum; pub use using::ParIterUsing; -pub use runner::DefaultRunner; +pub use runner::{DefaultRunner, RunnerWithPool}; + #[cfg(feature = "poolite")] pub use runner::RunnerWithPoolitePool; #[cfg(feature = "rayon-core")] diff --git a/src/runner/implementations/mod.rs b/src/runner/implementations/mod.rs index d9b66cb..726d4c2 100644 --- a/src/runner/implementations/mod.rs +++ b/src/runner/implementations/mod.rs @@ -1,6 +1,9 @@ #[cfg(test)] mod tests; +mod runner_with_pool; +pub use runner_with_pool::RunnerWithPool; + mod sequential; pub use sequential::SequentialRunner; diff --git a/src/runner/implementations/runner_with_pool.rs b/src/runner/implementations/runner_with_pool.rs new file mode 100644 index 0000000..7b1f16c --- /dev/null +++ b/src/runner/implementations/runner_with_pool.rs @@ -0,0 +1,48 @@ +use crate::{DefaultExecutor, ParThreadPool, ParallelExecutor, runner::ParallelRunner}; +use core::marker::PhantomData; + +pub struct RunnerWithPool +where + P: ParThreadPool, + R: ParallelExecutor, +{ + pool: P, + runner: PhantomData, +} + +impl From

for RunnerWithPool { + fn from(pool: P) -> Self { + Self { + pool, + runner: PhantomData, + } + } +} + +impl RunnerWithPool +where + P: ParThreadPool, + R: ParallelExecutor, +{ + pub fn into_inner_pool(self) -> P { + self.pool + } +} + +impl ParallelRunner for RunnerWithPool +where + P: ParThreadPool, + R: ParallelExecutor, +{ + type Executor = R; + + type ThreadPool = P; + + fn thread_pool(&self) -> &Self::ThreadPool { + &self.pool + } + + fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { + &mut self.pool + } +} diff --git a/src/runner/implementations/std_runner.rs b/src/runner/implementations/std_runner.rs index 4830839..934be08 100644 --- a/src/runner/implementations/std_runner.rs +++ b/src/runner/implementations/std_runner.rs @@ -58,6 +58,35 @@ impl ParThreadPool for StdDefaultPool { } } +impl ParThreadPool for &StdDefaultPool { + type ScopeRef<'s, 'env, 'scope> + = &'s std::thread::Scope<'s, 'env> + where + 'scope: 's, + 'env: 'scope + 's; + + fn max_num_threads(&self) -> NonZeroUsize { + self.max_num_threads + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(&'s std::thread::Scope<'s, 'env>) + Send, + { + std::thread::scope(f) + } + + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Send + 'scope + 'env, + { + s.spawn(work); + } +} + // RUNNER /// Parallel runner using std threads. diff --git a/src/runner/implementations/tests/pond.rs b/src/runner/implementations/tests/pond.rs index d58d49b..4727956 100644 --- a/src/runner/implementations/tests/pond.rs +++ b/src/runner/implementations/tests/pond.rs @@ -1,7 +1,7 @@ use super::run_map; use crate::{ IterationOrder, - runner::implementations::{PondPool, RunnerWithPondPool}, + runner::implementations::{PondPool, RunnerWithPool}, }; use test_case::test_matrix; @@ -18,6 +18,6 @@ const N: [usize; 2] = [1025, 4735]; ] fn pool_pond_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { let mut pool = PondPool::new_threads_unbounded(nt); - let orch: RunnerWithPondPool<_> = (&mut pool).into(); + let orch: RunnerWithPool<_> = (&mut pool).into(); run_map(n, chunk, ordering, orch); } diff --git a/src/runner/implementations/tests/poolite.rs b/src/runner/implementations/tests/poolite.rs index 85431fc..67122a6 100644 --- a/src/runner/implementations/tests/poolite.rs +++ b/src/runner/implementations/tests/poolite.rs @@ -1,5 +1,5 @@ use super::run_map; -use crate::{IterationOrder, runner::implementations::RunnerWithPoolitePool}; +use crate::{IterationOrder, runner::implementations::RunnerWithPool}; use poolite::{Builder, Pool}; use test_case::test_matrix; @@ -16,6 +16,6 @@ const N: [usize; 2] = [1025, 4735]; ] fn pool_poolite_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { let pool = Pool::with_builder(Builder::new().max(nt).min(nt)).unwrap(); - let orch: RunnerWithPoolitePool<_> = (&pool).into(); + let orch: RunnerWithPool<_> = (&pool).into(); run_map(n, chunk, ordering, orch); } diff --git a/src/runner/implementations/tests/rayon_core.rs b/src/runner/implementations/tests/rayon_core.rs index 2654589..6ffb018 100644 --- a/src/runner/implementations/tests/rayon_core.rs +++ b/src/runner/implementations/tests/rayon_core.rs @@ -1,5 +1,5 @@ use super::run_map; -use crate::{IterationOrder, runner::implementations::RunnerWithRayonPool}; +use crate::{IterationOrder, runner::implementations::RunnerWithPool}; use test_case::test_matrix; #[cfg(miri)] @@ -20,6 +20,6 @@ fn pool_rayon_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { .num_threads(nt) .build() .unwrap(); - let orch: RunnerWithRayonPool<_> = (&pool).into(); + let orch: RunnerWithPool<_> = (&pool).into(); run_map(n, chunk, ordering, orch); } diff --git a/src/runner/implementations/tests/scoped_pool.rs b/src/runner/implementations/tests/scoped_pool.rs index f353980..e8891cb 100644 --- a/src/runner/implementations/tests/scoped_pool.rs +++ b/src/runner/implementations/tests/scoped_pool.rs @@ -1,5 +1,5 @@ use super::run_map; -use crate::{IterationOrder, runner::implementations::RunnerWithScopedPool}; +use crate::{IterationOrder, runner::implementations::RunnerWithPool}; use scoped_pool::Pool; use test_case::test_matrix; @@ -16,6 +16,6 @@ const N: [usize; 2] = [1025, 4735]; ] fn pool_scoped_pool_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { let pool = Pool::new(nt); - let orch: RunnerWithScopedPool<_> = (&pool).into(); + let orch: RunnerWithPool<_> = (&pool).into(); run_map(n, chunk, ordering, orch); } diff --git a/src/runner/implementations/tests/scoped_threadpool.rs b/src/runner/implementations/tests/scoped_threadpool.rs index f2a492c..47f0b2b 100644 --- a/src/runner/implementations/tests/scoped_threadpool.rs +++ b/src/runner/implementations/tests/scoped_threadpool.rs @@ -1,5 +1,5 @@ use super::run_map; -use crate::{IterationOrder, runner::implementations::RunnerWithScopedThreadPool}; +use crate::{IterationOrder, runner::implementations::RunnerWithPool}; use scoped_threadpool::Pool; use test_case::test_matrix; @@ -16,6 +16,6 @@ const N: [usize; 2] = [1025, 4735]; ] fn pool_scoped_threadpool_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { let mut pool = Pool::new(nt as u32); - let orch: RunnerWithScopedThreadPool<_> = (&mut pool).into(); + let orch: RunnerWithPool<_> = (&mut pool).into(); run_map(n, chunk, ordering, orch); } diff --git a/src/runner/implementations/tests/std.rs b/src/runner/implementations/tests/std.rs index 5bc174d..a4d6a54 100644 --- a/src/runner/implementations/tests/std.rs +++ b/src/runner/implementations/tests/std.rs @@ -1,5 +1,7 @@ use super::run_map; -use crate::{IterationOrder, StdRunner}; +use crate::{ + IterationOrder, RunnerWithPool, StdRunner, runner::implementations::std_runner::StdDefaultPool, +}; use test_case::test_matrix; #[cfg(miri)] @@ -16,4 +18,8 @@ const N: [usize; 2] = [1025, 4735]; fn pool_scoped_threadpool_map(n: usize, _: usize, chunk: usize, ordering: IterationOrder) { let orch = StdRunner::default(); run_map(n, chunk, ordering, orch); + + let pool = StdDefaultPool::default(); + let orch: RunnerWithPool<_> = (&pool).into(); + run_map(n, chunk, ordering, orch); } diff --git a/src/runner/implementations/tests/yastl.rs b/src/runner/implementations/tests/yastl.rs index 604aaa4..2edc813 100644 --- a/src/runner/implementations/tests/yastl.rs +++ b/src/runner/implementations/tests/yastl.rs @@ -1,7 +1,7 @@ use super::run_map; use crate::{ IterationOrder, - runner::implementations::{RunnerWithYastlPool, YastlPool}, + runner::implementations::{RunnerWithPool, YastlPool}, }; use test_case::test_matrix; use yastl::ThreadConfig; @@ -19,10 +19,10 @@ const N: [usize; 2] = [1025, 4735]; ] fn pool_yastl_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { let pool = YastlPool::new(nt); - let orch: RunnerWithYastlPool<_> = (&pool).into(); + let orch: RunnerWithPool<_> = (&pool).into(); run_map(n, chunk, ordering, orch); let pool = YastlPool::with_config(nt, ThreadConfig::new()); - let orch: RunnerWithYastlPool<_> = (&pool).into(); + let orch: RunnerWithPool<_> = (&pool).into(); run_map(n, chunk, ordering, orch); } diff --git a/src/runner/mod.rs b/src/runner/mod.rs index 894df54..b4ee856 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -9,7 +9,7 @@ pub use computation_kind::ComputationKind; pub use num_spawned::NumSpawned; pub use parallel_runner::ParallelRunner; -pub use implementations::SequentialRunner; +pub use implementations::{RunnerWithPool, SequentialRunner}; #[cfg(feature = "std")] pub use implementations::StdRunner; From 490f886f39663b2715ee47d7dc8fd04c428c7d4a Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 19:25:57 +0200 Subject: [PATCH 231/264] pools bench example uses generic RunnerWithPool --- .../{using_pools.rs => benchmark_pools.rs} | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) rename examples/{using_pools.rs => benchmark_pools.rs} (93%) diff --git a/examples/using_pools.rs b/examples/benchmark_pools.rs similarity index 93% rename from examples/using_pools.rs rename to examples/benchmark_pools.rs index 4f781f2..16c63ef 100644 --- a/examples/using_pools.rs +++ b/examples/benchmark_pools.rs @@ -1,6 +1,4 @@ -mod utils; - -// cargo run --all-features --release --example using_pools +// cargo run --all-features --release --example benchmark_pools // to run with all options: // // output: @@ -15,7 +13,7 @@ mod utils; // ScopedThreadPool => 17.228307255s // Yastl => 43.914882593s -// cargo run --all-features --release --example using_pools -- --pool-type scoped-pool +// cargo run --all-features --release --example benchmark_pools -- --pool-type scoped-pool // to run only using scoped-pool // // output: @@ -23,7 +21,7 @@ mod utils; // Args { pool_type: ScopedPool, num_threads: 16, len: 100000, num_repetitions: 1000 } // ScopedPool => 16.640308686s -// cargo run --all-features --release --example using_pools -- --pool-type rayon-core --len 1000 --num-repetitions 10000 +// cargo run --all-features --release --example benchmark_pools -- --pool-type rayon-core --len 1000 --num-repetitions 10000 // to run only using rayon-core ThreadPool, with 10000 repetitions for input size of 1000 // // output: @@ -31,6 +29,8 @@ mod utils; // Args { pool_type: RayonCore, num_threads: 16, len: 1000, num_repetitions: 10000 } // RayonCore => 6.950370104s +mod utils; + fn main() { #[cfg(feature = "std")] #[cfg(feature = "pond")] @@ -181,7 +181,7 @@ fn main() { fn run_pond(num_threads: usize, num_repetitions: usize, input: &[usize]) -> Vec { let mut pool = PondPool::new_threads_unbounded(num_threads); - let mut runner = RunnerWithPondPool::from(&mut pool); + let mut runner = RunnerWithPool::from(&mut pool); run_with_runner(&mut runner, num_threads, num_repetitions, input) } @@ -190,7 +190,7 @@ fn main() { poolite::Builder::new().min(num_threads).max(num_threads), ) .unwrap(); - let mut runner = RunnerWithPoolitePool::from(&pool); + let mut runner = RunnerWithPool::from(&pool); run_with_runner(&mut runner, num_threads, num_repetitions, input) } @@ -203,7 +203,7 @@ fn main() { .num_threads(num_threads) .build() .unwrap(); - let mut runner = RunnerWithRayonPool::from(&pool); + let mut runner = RunnerWithPool::from(&pool); run_with_runner(&mut runner, num_threads, num_repetitions, input) } @@ -213,7 +213,7 @@ fn main() { input: &[usize], ) -> Vec { let pool = scoped_pool::Pool::new(num_threads); - let mut runner = RunnerWithScopedPool::from(&pool); + let mut runner = RunnerWithPool::from(&pool); run_with_runner(&mut runner, num_threads, num_repetitions, input) } @@ -223,13 +223,13 @@ fn main() { input: &[usize], ) -> Vec { let mut pool = scoped_threadpool::Pool::new(num_threads as u32); - let mut runner = RunnerWithScopedThreadPool::from(&mut pool); + let mut runner = RunnerWithPool::from(&mut pool); run_with_runner(&mut runner, num_threads, num_repetitions, input) } fn run_yastl(num_threads: usize, num_repetitions: usize, input: &[usize]) -> Vec { let pool = YastlPool::new(num_threads); - let mut runner = RunnerWithYastlPool::from(&pool); + let mut runner = RunnerWithPool::from(&pool); run_with_runner(&mut runner, num_threads, num_repetitions, input) } From 8248ea562d2235fac644ff8b6cba125555e8b286 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 19:37:03 +0200 Subject: [PATCH 232/264] refactoring to use a generic RunnerWithPool --- examples/benchmark_pools.rs | 2 +- src/lib.rs | 16 +----- src/runner/implementations/mod.rs | 16 ++---- src/runner/implementations/pond.rs | 55 +----------------- src/runner/implementations/poolite.rs | 55 +----------------- src/runner/implementations/rayon_core.rs | 57 +------------------ .../implementations/runner_with_pool.rs | 13 +++++ src/runner/implementations/scoped_pool.rs | 55 +----------------- .../implementations/scoped_threadpool.rs | 57 +------------------ src/runner/implementations/sequential.rs | 31 +--------- src/runner/implementations/std_runner.rs | 36 ------------ .../implementations/tests/sequential.rs | 4 +- src/runner/implementations/tests/std.rs | 5 +- src/runner/implementations/yastl.rs | 55 +----------------- src/runner/mod.rs | 35 ++++-------- 15 files changed, 50 insertions(+), 442 deletions(-) diff --git a/examples/benchmark_pools.rs b/examples/benchmark_pools.rs index 16c63ef..cd8a356 100644 --- a/examples/benchmark_pools.rs +++ b/examples/benchmark_pools.rs @@ -175,7 +175,7 @@ fn main() { num_repetitions: usize, input: &[usize], ) -> Vec { - let mut runner = SequentialRunner::default(); + let mut runner = RunnerWithPool::from(SequentialPool); run_with_runner(&mut runner, num_threads, num_repetitions, input) } diff --git a/src/lib.rs b/src/lib.rs index b739113..eb0c186 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -75,18 +75,8 @@ pub use using::ParIterUsing; pub use runner::{DefaultRunner, RunnerWithPool}; -#[cfg(feature = "poolite")] -pub use runner::RunnerWithPoolitePool; -#[cfg(feature = "rayon-core")] -pub use runner::RunnerWithRayonPool; -#[cfg(feature = "scoped-pool")] -pub use runner::RunnerWithScopedPool; -#[cfg(feature = "scoped_threadpool")] -pub use runner::RunnerWithScopedThreadPool; -pub use runner::SequentialRunner; -#[cfg(feature = "std")] -pub use runner::StdRunner; #[cfg(feature = "pond")] -pub use runner::{PondPool, RunnerWithPondPool}; +pub use runner::PondPool; +pub use runner::SequentialPool; #[cfg(feature = "yastl")] -pub use runner::{RunnerWithYastlPool, YastlPool}; +pub use runner::YastlPool; diff --git a/src/runner/implementations/mod.rs b/src/runner/implementations/mod.rs index 726d4c2..46ec352 100644 --- a/src/runner/implementations/mod.rs +++ b/src/runner/implementations/mod.rs @@ -5,39 +5,31 @@ mod runner_with_pool; pub use runner_with_pool::RunnerWithPool; mod sequential; -pub use sequential::SequentialRunner; +pub use sequential::SequentialPool; #[cfg(feature = "std")] mod std_runner; #[cfg(feature = "std")] -pub use std_runner::StdRunner; +pub use std_runner::StdDefaultPool; #[cfg(feature = "pond")] mod pond; #[cfg(feature = "pond")] -pub use pond::{PondPool, RunnerWithPondPool}; +pub use pond::PondPool; #[cfg(feature = "poolite")] mod poolite; -#[cfg(feature = "poolite")] -pub use poolite::RunnerWithPoolitePool; #[cfg(feature = "rayon-core")] mod rayon_core; -#[cfg(feature = "rayon-core")] -pub use rayon_core::RunnerWithRayonPool; #[cfg(feature = "scoped-pool")] mod scoped_pool; -#[cfg(feature = "scoped-pool")] -pub use scoped_pool::RunnerWithScopedPool; #[cfg(feature = "scoped_threadpool")] mod scoped_threadpool; -#[cfg(feature = "scoped_threadpool")] -pub use scoped_threadpool::RunnerWithScopedThreadPool; #[cfg(feature = "yastl")] mod yastl; #[cfg(feature = "yastl")] -pub use yastl::{RunnerWithYastlPool, YastlPool}; +pub use yastl::YastlPool; diff --git a/src/runner/implementations/pond.rs b/src/runner/implementations/pond.rs index c4068e8..ed7f53c 100644 --- a/src/runner/implementations/pond.rs +++ b/src/runner/implementations/pond.rs @@ -1,10 +1,7 @@ -use crate::{DefaultExecutor, ParThreadPool, ParallelExecutor, runner::ParallelRunner}; -use core::{marker::PhantomData, num::NonZeroUsize}; -use orx_self_or::SoM; +use crate::par_thread_pool::ParThreadPool; +use core::num::NonZeroUsize; use pond::{Pool, Scope}; -// POOL - /// A wrapper for `pond::Pool` and number of threads it was built with. /// /// NOTE: The reason why `pond::Pool` does not directly implement `ParThreadPool` @@ -97,51 +94,3 @@ impl ParThreadPool for &mut PondPool { self.1 } } - -// RUNNER - -/// Parallel runner using threads provided by pond::Pool. -pub struct RunnerWithPondPool -where - R: ParallelExecutor, - P: SoM + ParThreadPool, -{ - pool: P, - runner: PhantomData, -} - -impl From for RunnerWithPondPool { - fn from(pool: PondPool) -> Self { - Self { - pool, - runner: PhantomData, - } - } -} - -impl<'a> From<&'a mut PondPool> for RunnerWithPondPool<&'a mut PondPool, DefaultExecutor> { - fn from(pool: &'a mut PondPool) -> Self { - Self { - pool, - runner: PhantomData, - } - } -} - -impl ParallelRunner for RunnerWithPondPool -where - R: ParallelExecutor, - P: SoM + ParThreadPool, -{ - type Executor = R; - - type ThreadPool = P; - - fn thread_pool(&self) -> &Self::ThreadPool { - &self.pool - } - - fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { - &mut self.pool - } -} diff --git a/src/runner/implementations/poolite.rs b/src/runner/implementations/poolite.rs index 68821ad..92a9aed 100644 --- a/src/runner/implementations/poolite.rs +++ b/src/runner/implementations/poolite.rs @@ -1,10 +1,7 @@ -use crate::{DefaultExecutor, ParThreadPool, ParallelExecutor, runner::ParallelRunner}; -use core::{marker::PhantomData, num::NonZeroUsize}; -use orx_self_or::SoR; +use crate::par_thread_pool::ParThreadPool; +use core::num::NonZeroUsize; use poolite::{Pool, Scoped}; -// POOL - impl ParThreadPool for Pool { type ScopeRef<'s, 'env, 'scope> = &'s Scoped<'env, 'scope> @@ -62,51 +59,3 @@ impl ParThreadPool for &Pool { NonZeroUsize::new(self.threads_future().max(1)).expect(">0") } } - -// RUNNER - -/// Parallel runner using threads provided by poolite::Pool. -pub struct RunnerWithPoolitePool -where - R: ParallelExecutor, - P: SoR + ParThreadPool, -{ - pool: P, - runner: PhantomData, -} - -impl From for RunnerWithPoolitePool { - fn from(pool: Pool) -> Self { - Self { - pool, - runner: PhantomData, - } - } -} - -impl<'a> From<&'a Pool> for RunnerWithPoolitePool<&'a Pool, DefaultExecutor> { - fn from(pool: &'a Pool) -> Self { - Self { - pool, - runner: PhantomData, - } - } -} - -impl ParallelRunner for RunnerWithPoolitePool -where - R: ParallelExecutor, - P: SoR + ParThreadPool, -{ - type Executor = R; - - type ThreadPool = P; - - fn thread_pool(&self) -> &Self::ThreadPool { - &self.pool - } - - fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { - &mut self.pool - } -} diff --git a/src/runner/implementations/rayon_core.rs b/src/runner/implementations/rayon_core.rs index a49f1b4..062f948 100644 --- a/src/runner/implementations/rayon_core.rs +++ b/src/runner/implementations/rayon_core.rs @@ -1,12 +1,7 @@ -use crate::{ - DefaultExecutor, ParallelExecutor, par_thread_pool::ParThreadPool, runner::ParallelRunner, -}; -use core::{marker::PhantomData, num::NonZeroUsize}; -use orx_self_or::SoR; +use crate::par_thread_pool::ParThreadPool; +use core::num::NonZeroUsize; use rayon_core::ThreadPool; -// POOL - impl ParThreadPool for ThreadPool { type ScopeRef<'s, 'env, 'scope> = &'s rayon_core::Scope<'scope> @@ -64,51 +59,3 @@ impl ParThreadPool for &rayon_core::ThreadPool { NonZeroUsize::new(self.current_num_threads().max(1)).expect(">0") } } - -// RUNNER - -/// Parallel runner using threads provided by rayon_core::ThreadPool. -pub struct RunnerWithRayonPool -where - R: ParallelExecutor, - P: SoR + ParThreadPool, -{ - pool: P, - runner: PhantomData, -} - -impl From for RunnerWithRayonPool { - fn from(pool: ThreadPool) -> Self { - Self { - pool, - runner: PhantomData, - } - } -} - -impl<'a> From<&'a ThreadPool> for RunnerWithRayonPool<&'a ThreadPool, DefaultExecutor> { - fn from(pool: &'a ThreadPool) -> Self { - Self { - pool, - runner: PhantomData, - } - } -} - -impl ParallelRunner for RunnerWithRayonPool -where - R: ParallelExecutor, - P: SoR + ParThreadPool, -{ - type Executor = R; - - type ThreadPool = P; - - fn thread_pool(&self) -> &Self::ThreadPool { - &self.pool - } - - fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { - &mut self.pool - } -} diff --git a/src/runner/implementations/runner_with_pool.rs b/src/runner/implementations/runner_with_pool.rs index 7b1f16c..7c00807 100644 --- a/src/runner/implementations/runner_with_pool.rs +++ b/src/runner/implementations/runner_with_pool.rs @@ -10,6 +10,19 @@ where runner: PhantomData, } +impl Default for RunnerWithPool +where + P: ParThreadPool + Default, + R: ParallelExecutor, +{ + fn default() -> Self { + Self { + pool: Default::default(), + runner: PhantomData, + } + } +} + impl From

for RunnerWithPool { fn from(pool: P) -> Self { Self { diff --git a/src/runner/implementations/scoped_pool.rs b/src/runner/implementations/scoped_pool.rs index d7299c0..17e4cee 100644 --- a/src/runner/implementations/scoped_pool.rs +++ b/src/runner/implementations/scoped_pool.rs @@ -1,10 +1,7 @@ -use crate::{DefaultExecutor, ParThreadPool, ParallelExecutor, runner::ParallelRunner}; -use core::{marker::PhantomData, num::NonZeroUsize}; -use orx_self_or::SoR; +use crate::par_thread_pool::ParThreadPool; +use core::num::NonZeroUsize; use scoped_pool::{Pool, Scope}; -// POOL - impl ParThreadPool for Pool { type ScopeRef<'s, 'env, 'scope> = &'s Scope<'scope> @@ -62,51 +59,3 @@ impl ParThreadPool for &Pool { NonZeroUsize::new(self.workers().max(1)).expect(">0") } } - -// RUNNER - -/// Parallel runner using threads provided by scoped_pool::Pool. -pub struct RunnerWithScopedPool -where - R: ParallelExecutor, - P: SoR + ParThreadPool, -{ - pool: P, - runner: PhantomData, -} - -impl From for RunnerWithScopedPool { - fn from(pool: Pool) -> Self { - Self { - pool, - runner: PhantomData, - } - } -} - -impl<'a> From<&'a Pool> for RunnerWithScopedPool<&'a Pool, DefaultExecutor> { - fn from(pool: &'a Pool) -> Self { - Self { - pool, - runner: PhantomData, - } - } -} - -impl ParallelRunner for RunnerWithScopedPool -where - R: ParallelExecutor, - P: SoR + ParThreadPool, -{ - type Executor = R; - - type ThreadPool = P; - - fn thread_pool(&self) -> &Self::ThreadPool { - &self.pool - } - - fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { - &mut self.pool - } -} diff --git a/src/runner/implementations/scoped_threadpool.rs b/src/runner/implementations/scoped_threadpool.rs index 286ff91..79852f0 100644 --- a/src/runner/implementations/scoped_threadpool.rs +++ b/src/runner/implementations/scoped_threadpool.rs @@ -1,12 +1,7 @@ -use crate::{ - DefaultExecutor, ParallelExecutor, par_thread_pool::ParThreadPool, runner::ParallelRunner, -}; -use core::{marker::PhantomData, num::NonZeroUsize}; -use orx_self_or::SoM; +use crate::par_thread_pool::ParThreadPool; +use core::num::NonZeroUsize; use scoped_threadpool::Pool; -// POOL - impl ParThreadPool for Pool { type ScopeRef<'s, 'env, 'scope> = &'s scoped_threadpool::Scope<'env, 'scope> @@ -64,51 +59,3 @@ impl ParThreadPool for &mut Pool { NonZeroUsize::new((self.thread_count() as usize).max(1)).expect(">0") } } - -// RUNNER - -/// Parallel runner using threads provided by scoped_threadpool::Pool. -pub struct RunnerWithScopedThreadPool -where - R: ParallelExecutor, - P: SoM + ParThreadPool, -{ - pool: P, - runner: PhantomData, -} - -impl From for RunnerWithScopedThreadPool { - fn from(pool: Pool) -> Self { - Self { - pool, - runner: PhantomData, - } - } -} - -impl<'a> From<&'a mut Pool> for RunnerWithScopedThreadPool<&'a mut Pool, DefaultExecutor> { - fn from(pool: &'a mut Pool) -> Self { - Self { - pool, - runner: PhantomData, - } - } -} - -impl ParallelRunner for RunnerWithScopedThreadPool -where - R: ParallelExecutor, - P: SoM + ParThreadPool, -{ - type Executor = R; - - type ThreadPool = P; - - fn thread_pool(&self) -> &Self::ThreadPool { - &self.pool - } - - fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { - &mut self.pool - } -} diff --git a/src/runner/implementations/sequential.rs b/src/runner/implementations/sequential.rs index 84014e6..6db7a7f 100644 --- a/src/runner/implementations/sequential.rs +++ b/src/runner/implementations/sequential.rs @@ -1,8 +1,6 @@ -use crate::{DefaultExecutor, ParThreadPool, runner::ParallelRunner}; +use crate::ParThreadPool; use core::num::NonZeroUsize; -// POOL - #[derive(Default)] pub struct SequentialPool; @@ -34,30 +32,3 @@ impl ParThreadPool for SequentialPool { NonZeroUsize::new(1).expect(">0") } } - -// RUNNER - -/// Sequential runner using using the main thread. -/// -/// This is the default runner when "std" feature is not enabled. -/// -/// Parallelization can be achieved by providing a parallel runner -/// using the [`with_runner`] method of parallel iterators. -/// -/// [`with_runner`]: crate::ParIter::with_runner -#[derive(Default)] -pub struct SequentialRunner(SequentialPool); - -impl ParallelRunner for SequentialRunner { - type Executor = DefaultExecutor; - - type ThreadPool = SequentialPool; - - fn thread_pool(&self) -> &Self::ThreadPool { - &self.0 - } - - fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { - &mut self.0 - } -} diff --git a/src/runner/implementations/std_runner.rs b/src/runner/implementations/std_runner.rs index 934be08..52a5745 100644 --- a/src/runner/implementations/std_runner.rs +++ b/src/runner/implementations/std_runner.rs @@ -1,11 +1,6 @@ -use crate::ParallelExecutor; use crate::par_thread_pool::ParThreadPool; -use crate::{DefaultExecutor, runner::ParallelRunner}; -use core::marker::PhantomData; use core::num::NonZeroUsize; -// POOL - const MAX_UNSET_NUM_THREADS: NonZeroUsize = NonZeroUsize::new(8).expect(">0"); pub struct StdDefaultPool { @@ -86,34 +81,3 @@ impl ParThreadPool for &StdDefaultPool { s.spawn(work); } } - -// RUNNER - -/// Parallel runner using std threads. -pub struct StdRunner { - pool: StdDefaultPool, - executor: PhantomData, -} - -impl Default for StdRunner { - fn default() -> Self { - Self { - pool: Default::default(), - executor: PhantomData, - } - } -} - -impl ParallelRunner for StdRunner { - type Executor = E; - - type ThreadPool = StdDefaultPool; - - fn thread_pool(&self) -> &Self::ThreadPool { - &self.pool - } - - fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { - &mut self.pool - } -} diff --git a/src/runner/implementations/tests/sequential.rs b/src/runner/implementations/tests/sequential.rs index 62492ba..2dbbf13 100644 --- a/src/runner/implementations/tests/sequential.rs +++ b/src/runner/implementations/tests/sequential.rs @@ -1,5 +1,5 @@ use super::run_map; -use crate::{IterationOrder, runner::implementations::SequentialRunner}; +use crate::{IterationOrder, RunnerWithPool, runner::implementations::sequential::SequentialPool}; use test_case::test_matrix; #[cfg(miri)] @@ -14,6 +14,6 @@ const N: [usize; 2] = [1025, 4735]; [IterationOrder::Ordered, IterationOrder::Arbitrary]) ] fn pool_scoped_threadpool_map(n: usize, _: usize, chunk: usize, ordering: IterationOrder) { - let orch = SequentialRunner::default(); + let orch = RunnerWithPool::from(SequentialPool); run_map(n, chunk, ordering, orch); } diff --git a/src/runner/implementations/tests/std.rs b/src/runner/implementations/tests/std.rs index a4d6a54..5069343 100644 --- a/src/runner/implementations/tests/std.rs +++ b/src/runner/implementations/tests/std.rs @@ -1,6 +1,7 @@ use super::run_map; use crate::{ - IterationOrder, RunnerWithPool, StdRunner, runner::implementations::std_runner::StdDefaultPool, + DefaultRunner, IterationOrder, RunnerWithPool, + runner::implementations::std_runner::StdDefaultPool, }; use test_case::test_matrix; @@ -16,7 +17,7 @@ const N: [usize; 2] = [1025, 4735]; [IterationOrder::Ordered, IterationOrder::Arbitrary]) ] fn pool_scoped_threadpool_map(n: usize, _: usize, chunk: usize, ordering: IterationOrder) { - let orch = StdRunner::default(); + let orch = DefaultRunner::default(); run_map(n, chunk, ordering, orch); let pool = StdDefaultPool::default(); diff --git a/src/runner/implementations/yastl.rs b/src/runner/implementations/yastl.rs index 8617cec..558c23c 100644 --- a/src/runner/implementations/yastl.rs +++ b/src/runner/implementations/yastl.rs @@ -1,10 +1,7 @@ -use crate::{DefaultExecutor, ParThreadPool, ParallelExecutor, runner::ParallelRunner}; -use core::{marker::PhantomData, num::NonZeroUsize}; -use orx_self_or::SoR; +use crate::ParThreadPool; +use core::num::NonZeroUsize; use yastl::{Pool, Scope, ThreadConfig}; -// POOL - /// A wrapper for `yastl::Pool` and number of threads it was built with. /// /// NOTE: The reason why `yastl::Pool` does not directly implement `ParThreadPool` @@ -105,51 +102,3 @@ impl ParThreadPool for &YastlPool { self.1 } } - -// RUNNER - -/// Parallel runner using threads provided by yastl::Pool. -pub struct RunnerWithYastlPool -where - R: ParallelExecutor, - P: SoR + ParThreadPool, -{ - pool: P, - runner: PhantomData, -} - -impl From for RunnerWithYastlPool { - fn from(pool: YastlPool) -> Self { - Self { - pool, - runner: PhantomData, - } - } -} - -impl<'a> From<&'a YastlPool> for RunnerWithYastlPool<&'a YastlPool, DefaultExecutor> { - fn from(pool: &'a YastlPool) -> Self { - Self { - pool, - runner: PhantomData, - } - } -} - -impl ParallelRunner for RunnerWithYastlPool -where - R: ParallelExecutor, - P: SoR + ParThreadPool, -{ - type Executor = R; - - type ThreadPool = P; - - fn thread_pool(&self) -> &Self::ThreadPool { - &self.pool - } - - fn thread_pool_mut(&mut self) -> &mut Self::ThreadPool { - &mut self.pool - } -} diff --git a/src/runner/mod.rs b/src/runner/mod.rs index b4ee856..29f0674 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -6,43 +6,30 @@ mod parallel_runner; pub(crate) use parallel_runner::{SharedStateOf, ThreadRunnerOf}; pub use computation_kind::ComputationKind; +pub use implementations::{RunnerWithPool, SequentialPool}; pub use num_spawned::NumSpawned; pub use parallel_runner::ParallelRunner; -pub use implementations::{RunnerWithPool, SequentialRunner}; - -#[cfg(feature = "std")] -pub use implementations::StdRunner; - #[cfg(feature = "pond")] -pub use implementations::{PondPool, RunnerWithPondPool}; - -#[cfg(feature = "poolite")] -pub use implementations::RunnerWithPoolitePool; +pub use implementations::PondPool; -#[cfg(feature = "rayon-core")] -pub use implementations::RunnerWithRayonPool; - -#[cfg(feature = "scoped-pool")] -pub use implementations::RunnerWithScopedPool; - -#[cfg(feature = "scoped_threadpool")] -pub use implementations::RunnerWithScopedThreadPool; +#[cfg(feature = "std")] +pub use implementations::StdDefaultPool; #[cfg(feature = "yastl")] -pub use implementations::{RunnerWithYastlPool, YastlPool}; +pub use implementations::YastlPool; // DEFAULT /// Default runner used by orx-parallel computations: /// -/// * [`StdRunner`] when "std" feature is enabled, -/// * `SequentialRunner` otherwise. +/// * [`RunnerWithPool`] with [`StdDefaultPool`] when "std" feature is enabled, +/// * [`RunnerWithPool`] with `SequentialPool` otherwise. #[cfg(feature = "std")] -pub type DefaultRunner = StdRunner; +pub type DefaultRunner = RunnerWithPool; /// Default runner used by orx-parallel computations: /// -/// * `StdRunner` when "std" feature is enabled, -/// * [`SequentialRunner`] otherwise. +/// * [`RunnerWithPool`] with `StdDefaultPool` when "std" feature is enabled, +/// * [`RunnerWithPool`] with [`SequentialPool`] otherwise. #[cfg(not(feature = "std"))] -pub type DefaultRunner = SequentialRunner; +pub type DefaultRunner = RunnerWithPool; From 75dab833c12e3cca312c37464b1b18a0e1f894b6 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 19:40:26 +0200 Subject: [PATCH 233/264] fix pool doc-tests --- src/par_thread_pool.rs | 12 ++++++------ src/runner/implementations/tests/scoped_pool.rs | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/par_thread_pool.rs b/src/par_thread_pool.rs index a99b96f..b4b4eda 100644 --- a/src/par_thread_pool.rs +++ b/src/par_thread_pool.rs @@ -40,7 +40,7 @@ use orx_concurrent_bag::ConcurrentBag; /// ``` /// use orx_parallel::*; /// -/// #[cfg(feature = "rayon")] +/// #[cfg(feature = "rayon-core")] /// { /// let pool = rayon::ThreadPoolBuilder::new() /// .num_threads(4) @@ -48,19 +48,19 @@ use orx_concurrent_bag::ConcurrentBag; /// .unwrap(); /// /// // creating a runner for the computation -/// let runner = RunnerWithRayonPool::from(&pool); +/// let runner = RunnerWithPool::from(&pool); /// let sum = (0..1000).par().with_runner(runner).sum(); /// assert_eq!(sum, 1000 * 999 / 2); /// /// // or reuse a runner multiple times (identical under the hood) -/// let mut runner = RunnerWithRayonPool::from(&pool); +/// let mut runner = RunnerWithPool::from(&pool); /// let sum = (0..1000).par().with_runner(&mut runner).sum(); /// assert_eq!(sum, 1000 * 999 / 2); /// } /// ``` /// /// Note that since rayon::ThreadPool::scope only requires a shared reference `&self`, -/// we can create as many runners as we want from the same thread pool and use them concurrently. +/// we can concurrently create as many runners as we want from the same thread pool and use them concurrently. /// /// ## Scoped thread pool /// @@ -76,13 +76,13 @@ use orx_concurrent_bag::ConcurrentBag; /// { /// // creating a runner for the computation /// let mut pool = scoped_threadpool::Pool::new(4); -/// let runner = RunnerWithScopedThreadPool::from(&mut pool); +/// let runner = RunnerWithPool::from(&mut pool); /// let sum = (0..1000).par().with_runner(runner).sum(); /// assert_eq!(sum, 1000 * 999 / 2); /// /// // or reuse a runner multiple times (identical under the hood) /// let mut pool = scoped_threadpool::Pool::new(4); -/// let mut runner = RunnerWithScopedThreadPool::from(&mut pool); +/// let runner = RunnerWithPool::from(&mut pool); /// let sum = (0..1000).par().with_runner(&mut runner).sum(); /// assert_eq!(sum, 1000 * 999 / 2); /// } diff --git a/src/runner/implementations/tests/scoped_pool.rs b/src/runner/implementations/tests/scoped_pool.rs index e8891cb..bab4294 100644 --- a/src/runner/implementations/tests/scoped_pool.rs +++ b/src/runner/implementations/tests/scoped_pool.rs @@ -16,6 +16,6 @@ const N: [usize; 2] = [1025, 4735]; ] fn pool_scoped_pool_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { let pool = Pool::new(nt); - let orch: RunnerWithPool<_> = (&pool).into(); + let orch = RunnerWithPool::from(&pool); run_map(n, chunk, ordering, orch); } From f3dc7952914907c30f73dfe99de68a5735ed0b31 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 19:45:22 +0200 Subject: [PATCH 234/264] Document StdDefaultPool --- src/par_thread_pool.rs | 2 +- src/runner/implementations/std_runner.rs | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/par_thread_pool.rs b/src/par_thread_pool.rs index b4b4eda..cad4278 100644 --- a/src/par_thread_pool.rs +++ b/src/par_thread_pool.rs @@ -82,7 +82,7 @@ use orx_concurrent_bag::ConcurrentBag; /// /// // or reuse a runner multiple times (identical under the hood) /// let mut pool = scoped_threadpool::Pool::new(4); -/// let runner = RunnerWithPool::from(&mut pool); +/// let mut runner = RunnerWithPool::from(&mut pool); /// let sum = (0..1000).par().with_runner(&mut runner).sum(); /// assert_eq!(sum, 1000 * 999 / 2); /// } diff --git a/src/runner/implementations/std_runner.rs b/src/runner/implementations/std_runner.rs index 52a5745..b0c41f1 100644 --- a/src/runner/implementations/std_runner.rs +++ b/src/runner/implementations/std_runner.rs @@ -3,6 +3,18 @@ use core::num::NonZeroUsize; const MAX_UNSET_NUM_THREADS: NonZeroUsize = NonZeroUsize::new(8).expect(">0"); +/// Native standard thread pool. +/// +/// This is the default thread pool used when "std" feature is enabled. +/// +/// Uses `std::thread::scope` and `scope.spawn(..)` to distribute work to threads. +/// +/// Its [`max_num_threads`] is determined as the minimum of: +/// +/// * the available parallelism of the host obtained via `std::thread::available_parallelism()`, and +/// * the upper bound set by the environment variable "ORX_PARALLEL_MAX_NUM_THREADS", only if set. +/// +/// [`max_num_threads`]: ParThreadPool::max_num_threads pub struct StdDefaultPool { max_num_threads: NonZeroUsize, } From ad8e76f9b6f08d33886a1bf4e253b7ff339d6df5 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 19:48:38 +0200 Subject: [PATCH 235/264] document SequentialPool --- src/runner/implementations/sequential.rs | 9 +++++++++ src/runner/implementations/std_runner.rs | 7 +++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/src/runner/implementations/sequential.rs b/src/runner/implementations/sequential.rs index 6db7a7f..14b0d56 100644 --- a/src/runner/implementations/sequential.rs +++ b/src/runner/implementations/sequential.rs @@ -1,6 +1,15 @@ use crate::ParThreadPool; use core::num::NonZeroUsize; +/// A fake thread pool with [`max_num_threads`] of 1. +/// All computations using this thread pool are executed sequentially by the main thread. +/// +/// This is the default thread pool used when "std" feature is disabled. +/// Note that the thread pool to be used for a parallel computation can be set by the +/// [`with_runner`] transformation separately for each parallel iterator. +/// +/// [`max_num_threads`]: ParThreadPool::max_num_threads +/// [`with_runner`]: crate::ParIter::with_runner #[derive(Default)] pub struct SequentialPool; diff --git a/src/runner/implementations/std_runner.rs b/src/runner/implementations/std_runner.rs index b0c41f1..d2c17ca 100644 --- a/src/runner/implementations/std_runner.rs +++ b/src/runner/implementations/std_runner.rs @@ -6,15 +6,18 @@ const MAX_UNSET_NUM_THREADS: NonZeroUsize = NonZeroUsize::new(8).expect(">0"); /// Native standard thread pool. /// /// This is the default thread pool used when "std" feature is enabled. +/// Note that the thread pool to be used for a parallel computation can be set by the +/// [`with_runner`] transformation separately for each parallel iterator. /// /// Uses `std::thread::scope` and `scope.spawn(..)` to distribute work to threads. /// -/// Its [`max_num_threads`] is determined as the minimum of: +/// Value of [`max_num_threads`] is determined as the minimum of: /// /// * the available parallelism of the host obtained via `std::thread::available_parallelism()`, and -/// * the upper bound set by the environment variable "ORX_PARALLEL_MAX_NUM_THREADS", only if set. +/// * the upper bound set by the environment variable "ORX_PARALLEL_MAX_NUM_THREADS", when set. /// /// [`max_num_threads`]: ParThreadPool::max_num_threads +/// [`with_runner`]: crate::ParIter::with_runner pub struct StdDefaultPool { max_num_threads: NonZeroUsize, } From b50cf0296af47f32747dbb3b7fe1ba254f4d7477 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 20:08:01 +0200 Subject: [PATCH 236/264] RunnerWithPool tests --- src/lib.rs | 5 +- .../implementations/runner_with_pool.rs | 87 +++++++++++++++++++ src/runner/implementations/sequential.rs | 2 +- 3 files changed, 91 insertions(+), 3 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index eb0c186..ce18d13 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -73,10 +73,11 @@ pub use parameters::{ChunkSize, IterationOrder, NumThreads, Params}; pub use special_type_sets::Sum; pub use using::ParIterUsing; -pub use runner::{DefaultRunner, RunnerWithPool}; +pub use runner::{DefaultRunner, ParallelRunner, RunnerWithPool, SequentialPool}; #[cfg(feature = "pond")] pub use runner::PondPool; -pub use runner::SequentialPool; +#[cfg(feature = "std")] +pub use runner::StdDefaultPool; #[cfg(feature = "yastl")] pub use runner::YastlPool; diff --git a/src/runner/implementations/runner_with_pool.rs b/src/runner/implementations/runner_with_pool.rs index 7c00807..c55bb0c 100644 --- a/src/runner/implementations/runner_with_pool.rs +++ b/src/runner/implementations/runner_with_pool.rs @@ -1,6 +1,93 @@ use crate::{DefaultExecutor, ParThreadPool, ParallelExecutor, runner::ParallelRunner}; use core::marker::PhantomData; +/// Parallel runner with a given pool of type `P` and parallel executor of `R`. +/// +/// It can be constructed from any pool, or reference of a pool, implementing [`ParThreadPool`] +/// since `RunnerWithPool` implements `From

`. +/// +/// Note that default parallel runner; i.e., [`DefaultRunner`] is: +/// * `RunnerWithPool` when "std" feature is enabled, +/// * `RunnerWithPool` when "std" feature is disabled. +/// +/// # Examples +/// +/// ``` +/// use orx_parallel::*; +/// +/// // parallel computation generic over parallel runner; and hence, the thread pool +/// fn run_with_runner(runner: R, input: &[usize]) -> Vec { +/// input +/// .par() +/// .with_runner(runner) +/// .flat_map(|x| [*x, 2 * x, x / 7]) +/// .map(|x| x.to_string()) +/// .collect() +/// } +/// +/// let vec: Vec<_> = (0..42).collect(); +/// let input = vec.as_slice(); +/// +/// // runs on the main thread +/// let runner = RunnerWithPool::from(SequentialPool); +/// let expected = run_with_runner(runner, input); +/// +/// // uses native threads +/// let runner = RunnerWithPool::from(StdDefaultPool::default()); +/// let result = run_with_runner(runner, input); +/// assert_eq!(&expected, &result); +/// +/// // uses rayon-core ThreadPool with 8 threads +/// #[cfg(feature = "rayon-core")] +/// { +/// let pool = rayon_core::ThreadPoolBuilder::new() +/// .num_threads(8) +/// .build() +/// .unwrap(); +/// let result = run_with_runner(RunnerWithPool::from(&pool), input); +/// assert_eq!(&expected, &result); +/// } +/// +/// // uses scoped-pool Pool with 8 threads +/// #[cfg(feature = "scoped-pool")] +/// { +/// let pool = scoped_pool::Pool::new(8); +/// let result = run_with_runner(RunnerWithPool::from(&pool), input); +/// assert_eq!(&expected, &result); +/// } +/// +/// // uses scoped_threadpool Pool with 8 threads +/// #[cfg(feature = "scoped_threadpool")] +/// { +/// let mut pool = scoped_threadpool::Pool::new(8); +/// let result = run_with_runner(RunnerWithPool::from(&mut pool), input); // requires &mut pool +/// assert_eq!(&expected, &result); +/// } +/// +/// // uses yastl Pool wrapped as YastlPool with 8 threads +/// #[cfg(feature = "yastl")] +/// { +/// let pool = YastlPool::new(8); +/// let result = run_with_runner(RunnerWithPool::from(&pool), input); +/// assert_eq!(&expected, &result); +/// } +/// +/// // uses pond Pool wrapped as PondPool with 8 threads +/// #[cfg(feature = "pond")] +/// { +/// let mut pool = PondPool::new_threads_unbounded(8); +/// let result = run_with_runner(RunnerWithPool::from(&mut pool), input); // requires &mut pool +/// assert_eq!(&expected, &result); +/// } +/// +/// // uses poolite Pool with 8 threads +/// #[cfg(feature = "poolite")] +/// { +/// let pool = poolite::Pool::with_builder(poolite::Builder::new().min(8).max(8)).unwrap(); +/// let result = run_with_runner(RunnerWithPool::from(&pool), input); +/// assert_eq!(&expected, &result); +/// } +/// ``` pub struct RunnerWithPool where P: ParThreadPool, diff --git a/src/runner/implementations/sequential.rs b/src/runner/implementations/sequential.rs index 14b0d56..eb09a8e 100644 --- a/src/runner/implementations/sequential.rs +++ b/src/runner/implementations/sequential.rs @@ -1,7 +1,7 @@ use crate::ParThreadPool; use core::num::NonZeroUsize; -/// A fake thread pool with [`max_num_threads`] of 1. +/// A 'thread pool' with [`max_num_threads`] of 1. /// All computations using this thread pool are executed sequentially by the main thread. /// /// This is the default thread pool used when "std" feature is disabled. From 32b0d30354f678de4c9b0c70230ae5cd9fe816f6 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 20:14:35 +0200 Subject: [PATCH 237/264] document into_inner_pool --- .../implementations/runner_with_pool.rs | 137 ++++++++++++++++++ 1 file changed, 137 insertions(+) diff --git a/src/runner/implementations/runner_with_pool.rs b/src/runner/implementations/runner_with_pool.rs index c55bb0c..d6a18a9 100644 --- a/src/runner/implementations/runner_with_pool.rs +++ b/src/runner/implementations/runner_with_pool.rs @@ -124,6 +124,46 @@ where P: ParThreadPool, R: ParallelExecutor, { + /// Converts the runner into the wrapped underlying pool. + /// + /// Note that a `RunnerWithPool` can always be created from owned `pool`, but also from + /// * `&pool` in most cases, + /// * `&mut pool` in others. + /// + /// This function is only relevant when the runner is created from owned pool, in which case + /// `into_inner_pool` can be used to get back ownership of the pool. + /// + /// # Example + /// + /// The following example demonstrates the use case for rayon-core thread pool; however, it + /// holds for all thread pool implementations. + /// + /// ``` + /// use orx_parallel::*; + /// + /// #[cfg(feature = "rayon-core")] + /// { + /// let pool = rayon_core::ThreadPoolBuilder::new() + /// .num_threads(8) + /// .build() + /// .unwrap(); + /// + /// // create runner owning the pool + /// let mut runner = RunnerWithPool::from(pool); + /// + /// // use runner, and hence the pool, in parallel computations + /// let sum = input.par().with_runner(&mut runner).sum(); + /// let max = input.par().with_runner(&mut runner).max(); + /// let txt: Vec<_> = input + /// .par() + /// .with_runner(&mut runner) + /// .map(|x| x.to_string()) + /// .collect(); + /// + /// // get back ownership of the pool + /// let pool: rayon_core::ThreadPool = runner.into_inner_pool(); + /// } + /// ``` pub fn into_inner_pool(self) -> P { self.pool } @@ -146,3 +186,100 @@ where &mut self.pool } } + +#[cfg(test)] +mod tsts { + use crate::*; + use alloc::string::{String, ToString}; + use alloc::vec; + use alloc::vec::Vec; + + #[test] + fn abc() { + // parallel computation generic over parallel runner; and hence, the thread pool + fn run_with_runner(runner: R, input: &[usize]) -> Vec { + input + .par() + .with_runner(runner) + .flat_map(|x| [*x, 2 * x, x / 7]) + .map(|x| x.to_string()) + .collect() + } + + let vec: Vec<_> = (0..42).collect(); + let input = vec.as_slice(); + + // runs on the main thread + let runner = RunnerWithPool::from(SequentialPool); + let expected = run_with_runner(runner, input); + + // uses native threads + let runner = RunnerWithPool::from(StdDefaultPool::default()); + let result = run_with_runner(runner, input); + assert_eq!(&expected, &result); + + // uses rayon-core ThreadPool with 8 threads + #[cfg(feature = "rayon-core")] + { + let pool = rayon_core::ThreadPoolBuilder::new() + .num_threads(8) + .build() + .unwrap(); + + // create runner owning the pool + let mut runner = RunnerWithPool::from(pool); + + // use runner, and hence the pool, in parallel computations + let sum = input.par().with_runner(&mut runner).sum(); + let max = input.par().with_runner(&mut runner).max(); + let txt: Vec<_> = input + .par() + .with_runner(&mut runner) + .map(|x| x.to_string()) + .collect(); + + // get back ownership of the pool + let pool: rayon_core::ThreadPool = runner.into_inner_pool(); + } + + // uses scoped-pool Pool with 8 threads + #[cfg(feature = "scoped-pool")] + { + let pool = scoped_pool::Pool::new(8); + let result = run_with_runner(RunnerWithPool::from(&pool), input); + assert_eq!(&expected, &result); + } + + // uses scoped_threadpool Pool with 8 threads + #[cfg(feature = "scoped_threadpool")] + { + let mut pool = scoped_threadpool::Pool::new(8); + let result = run_with_runner(RunnerWithPool::from(&mut pool), input); // requires &mut pool + assert_eq!(&expected, &result); + } + + // uses yastl Pool wrapped as YastlPool with 8 threads + #[cfg(feature = "yastl")] + { + let pool = YastlPool::new(8); + let result = run_with_runner(RunnerWithPool::from(&pool), input); + assert_eq!(&expected, &result); + } + + // uses pond Pool wrapped as PondPool with 8 threads + #[cfg(feature = "pond")] + { + let mut pool = PondPool::new_threads_unbounded(8); + let result = run_with_runner(RunnerWithPool::from(&mut pool), input); // requires &mut pool + assert_eq!(&expected, &result); + } + + // uses poolite Pool with 8 threads + #[cfg(feature = "poolite")] + { + let pool = poolite::Pool::with_builder(poolite::Builder::new().min(8).max(8)).unwrap(); + let result = run_with_runner(RunnerWithPool::from(&pool), input); + assert_eq!(&expected, &result); + } + } +} From f5dc044920f147208e596c0bd3d5a974c63450a2 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 20:15:24 +0200 Subject: [PATCH 238/264] fix doc-test --- src/runner/implementations/runner_with_pool.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/runner/implementations/runner_with_pool.rs b/src/runner/implementations/runner_with_pool.rs index d6a18a9..40325fa 100644 --- a/src/runner/implementations/runner_with_pool.rs +++ b/src/runner/implementations/runner_with_pool.rs @@ -141,6 +141,9 @@ where /// ``` /// use orx_parallel::*; /// + /// let vec: Vec<_> = (0..42).collect(); + /// let input = vec.as_slice(); + /// /// #[cfg(feature = "rayon-core")] /// { /// let pool = rayon_core::ThreadPoolBuilder::new() From 15196050e264d3f963907a49bacc09719726644f Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 20:16:56 +0200 Subject: [PATCH 239/264] clean up --- .../implementations/runner_with_pool.rs | 104 +----------------- 1 file changed, 4 insertions(+), 100 deletions(-) diff --git a/src/runner/implementations/runner_with_pool.rs b/src/runner/implementations/runner_with_pool.rs index 40325fa..8a5daf3 100644 --- a/src/runner/implementations/runner_with_pool.rs +++ b/src/runner/implementations/runner_with_pool.rs @@ -3,8 +3,9 @@ use core::marker::PhantomData; /// Parallel runner with a given pool of type `P` and parallel executor of `R`. /// -/// It can be constructed from any pool, or reference of a pool, implementing [`ParThreadPool`] -/// since `RunnerWithPool` implements `From

`. +/// A `RunnerWithPool` can always be created from owned `pool` implementing [`ParThreadPool`], but also from +/// * `&pool` in most cases, +/// * `&mut pool` in others. /// /// Note that default parallel runner; i.e., [`DefaultRunner`] is: /// * `RunnerWithPool` when "std" feature is enabled, @@ -28,7 +29,7 @@ use core::marker::PhantomData; /// let vec: Vec<_> = (0..42).collect(); /// let input = vec.as_slice(); /// -/// // runs on the main thread +/// // runs sequentially on the main thread /// let runner = RunnerWithPool::from(SequentialPool); /// let expected = run_with_runner(runner, input); /// @@ -189,100 +190,3 @@ where &mut self.pool } } - -#[cfg(test)] -mod tsts { - use crate::*; - use alloc::string::{String, ToString}; - use alloc::vec; - use alloc::vec::Vec; - - #[test] - fn abc() { - // parallel computation generic over parallel runner; and hence, the thread pool - fn run_with_runner(runner: R, input: &[usize]) -> Vec { - input - .par() - .with_runner(runner) - .flat_map(|x| [*x, 2 * x, x / 7]) - .map(|x| x.to_string()) - .collect() - } - - let vec: Vec<_> = (0..42).collect(); - let input = vec.as_slice(); - - // runs on the main thread - let runner = RunnerWithPool::from(SequentialPool); - let expected = run_with_runner(runner, input); - - // uses native threads - let runner = RunnerWithPool::from(StdDefaultPool::default()); - let result = run_with_runner(runner, input); - assert_eq!(&expected, &result); - - // uses rayon-core ThreadPool with 8 threads - #[cfg(feature = "rayon-core")] - { - let pool = rayon_core::ThreadPoolBuilder::new() - .num_threads(8) - .build() - .unwrap(); - - // create runner owning the pool - let mut runner = RunnerWithPool::from(pool); - - // use runner, and hence the pool, in parallel computations - let sum = input.par().with_runner(&mut runner).sum(); - let max = input.par().with_runner(&mut runner).max(); - let txt: Vec<_> = input - .par() - .with_runner(&mut runner) - .map(|x| x.to_string()) - .collect(); - - // get back ownership of the pool - let pool: rayon_core::ThreadPool = runner.into_inner_pool(); - } - - // uses scoped-pool Pool with 8 threads - #[cfg(feature = "scoped-pool")] - { - let pool = scoped_pool::Pool::new(8); - let result = run_with_runner(RunnerWithPool::from(&pool), input); - assert_eq!(&expected, &result); - } - - // uses scoped_threadpool Pool with 8 threads - #[cfg(feature = "scoped_threadpool")] - { - let mut pool = scoped_threadpool::Pool::new(8); - let result = run_with_runner(RunnerWithPool::from(&mut pool), input); // requires &mut pool - assert_eq!(&expected, &result); - } - - // uses yastl Pool wrapped as YastlPool with 8 threads - #[cfg(feature = "yastl")] - { - let pool = YastlPool::new(8); - let result = run_with_runner(RunnerWithPool::from(&pool), input); - assert_eq!(&expected, &result); - } - - // uses pond Pool wrapped as PondPool with 8 threads - #[cfg(feature = "pond")] - { - let mut pool = PondPool::new_threads_unbounded(8); - let result = run_with_runner(RunnerWithPool::from(&mut pool), input); // requires &mut pool - assert_eq!(&expected, &result); - } - - // uses poolite Pool with 8 threads - #[cfg(feature = "poolite")] - { - let pool = poolite::Pool::with_builder(poolite::Builder::new().min(8).max(8)).unwrap(); - let result = run_with_runner(RunnerWithPool::from(&pool), input); - assert_eq!(&expected, &result); - } - } -} From fc7bd74c9f63532b76de9b169a2feb7ad559e278 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 20:18:24 +0200 Subject: [PATCH 240/264] clippy fixes --- src/runner/implementations/pond.rs | 1 + src/runner/implementations/yastl.rs | 2 ++ 2 files changed, 3 insertions(+) diff --git a/src/runner/implementations/pond.rs b/src/runner/implementations/pond.rs index ed7f53c..be8bf8e 100644 --- a/src/runner/implementations/pond.rs +++ b/src/runner/implementations/pond.rs @@ -18,6 +18,7 @@ impl PondPool { pub fn new_threads_unbounded(num_threads: usize) -> Self { let num_threads = num_threads.min(1); let pool = Pool::new_threads_unbounded(num_threads); + #[allow(clippy::missing_panics_doc)] Self(pool, NonZeroUsize::new(num_threads).expect(">0")) } diff --git a/src/runner/implementations/yastl.rs b/src/runner/implementations/yastl.rs index 558c23c..77ce509 100644 --- a/src/runner/implementations/yastl.rs +++ b/src/runner/implementations/yastl.rs @@ -18,6 +18,7 @@ impl YastlPool { pub fn new(num_threads: usize) -> Self { let num_threads = num_threads.min(1); let pool = Pool::new(num_threads); + #[allow(clippy::missing_panics_doc)] Self(pool, NonZeroUsize::new(num_threads).expect(">0")) } @@ -26,6 +27,7 @@ impl YastlPool { pub fn with_config(num_threads: usize, config: ThreadConfig) -> Self { let num_threads = num_threads.min(1); let pool = Pool::with_config(num_threads, config); + #[allow(clippy::missing_panics_doc)] Self(pool, NonZeroUsize::new(num_threads).expect(">0")) } From e6681fd1306bbaede69859b57773a4266a4ac23a Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 20:19:43 +0200 Subject: [PATCH 241/264] fix doc --- src/runner/implementations/runner_with_pool.rs | 2 ++ src/runner/mod.rs | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/runner/implementations/runner_with_pool.rs b/src/runner/implementations/runner_with_pool.rs index 8a5daf3..7cb9067 100644 --- a/src/runner/implementations/runner_with_pool.rs +++ b/src/runner/implementations/runner_with_pool.rs @@ -11,6 +11,8 @@ use core::marker::PhantomData; /// * `RunnerWithPool` when "std" feature is enabled, /// * `RunnerWithPool` when "std" feature is disabled. /// +/// [`DefaultRunner`]: crate::DefaultRunner +/// /// # Examples /// /// ``` diff --git a/src/runner/mod.rs b/src/runner/mod.rs index 29f0674..33d7e62 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -24,7 +24,7 @@ pub use implementations::YastlPool; /// Default runner used by orx-parallel computations: /// /// * [`RunnerWithPool`] with [`StdDefaultPool`] when "std" feature is enabled, -/// * [`RunnerWithPool`] with `SequentialPool` otherwise. +/// * [`RunnerWithPool`] with [`SequentialPool`] otherwise. #[cfg(feature = "std")] pub type DefaultRunner = RunnerWithPool; /// Default runner used by orx-parallel computations: From 4a1a0ca71f17ccb03e1f6c91907ce9c7c2475070 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 20:31:19 +0200 Subject: [PATCH 242/264] with_runner doc test --- src/par_iter.rs | 49 ++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 44 insertions(+), 5 deletions(-) diff --git a/src/par_iter.rs b/src/par_iter.rs index 57cfe49..8ac6e27 100644 --- a/src/par_iter.rs +++ b/src/par_iter.rs @@ -249,18 +249,57 @@ where /// Rather than the [`DefaultRunner`], uses the parallel runner `Q` which implements [`ParallelRunner`]. /// + /// Parallel runner of each computation can be independently specified using `with_runner`. + /// + /// When not specified the default runner is used, which is: + /// * [`RunnerWithPool`] with [`StdDefaultPool`] when "std" feature is enabled, + /// * [`RunnerWithPool`] with [`SequentialPool`] when "std" feature is disabled. + /// + /// Note that [`StdDefaultPool`] uses standard native threads. + /// + /// When working in a no-std environment, the default runner falls back to sequential. + /// In this case, `RunnerWithPool` using a particular thread pool must be passed in using `with_runner` + /// transformation to achieve parallel computation. + /// + /// [`RunnerWithPool`]: crate::[`RunnerWithPool`] + /// [`StdDefaultPool`]: crate::[`StdDefaultPool`] + /// [`SequentialPool`]: crate::[`SequentialPool`] + /// /// # Examples /// - /// ```ignore + /// ``` /// use orx_parallel::*; /// - /// let inputs = vec![1, 2, 3, 4]; + /// let inputs: Vec<_> = (0..42).collect(); /// - /// // uses the default runner + /// // uses the DefaultRunner + /// // assuming "std" enabled, RunnerWithPool will be used; i.e., native threads /// let sum = inputs.par().sum(); /// - /// // uses the custom parallel runner MyParallelRunner: ParallelRunner - /// let sum = inputs.par().with_runner::().sum(); + /// // equivalent to: + /// let sum2 = inputs.par().with_runner(RunnerWithPool::from(StdDefaultPool::default())).sum(); + /// assert_eq!(sum, sum2); + /// + /// #[cfg(feature = "scoped_threadpool")] + /// { + /// let mut pool = scoped_threadpool::Pool::new(8); + /// + /// // uses the scoped_threadpool::Pool created with 8 threads + /// let sum2 = inputs.par().with_runner(RunnerWithPool::from(&mut pool)).sum(); + /// assert_eq!(sum, sum2); + /// } + /// + /// #[cfg(feature = "rayon-core")] + /// { + /// let pool = rayon_core::ThreadPoolBuilder::new() + /// .num_threads(8) + /// .build() + /// .unwrap(); + /// + /// // uses the rayon-core::ThreadPool created with 8 threads + /// let sum2 = inputs.par().with_runner(RunnerWithPool::from(&pool)).sum(); + /// assert_eq!(sum, sum2); + /// } /// ``` fn with_runner(self, orchestrator: Q) -> impl ParIter; From c04b8dd6c8dcbd34f753976392a0b46854be1618 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 20:36:36 +0200 Subject: [PATCH 243/264] with_executor is added to RunnerWithPool --- src/runner/implementations/runner_with_pool.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/runner/implementations/runner_with_pool.rs b/src/runner/implementations/runner_with_pool.rs index 7cb9067..a96585d 100644 --- a/src/runner/implementations/runner_with_pool.rs +++ b/src/runner/implementations/runner_with_pool.rs @@ -173,6 +173,14 @@ where pub fn into_inner_pool(self) -> P { self.pool } + + /// Converts the runner into one using the [`ParallelExecutor`] `Q` rather than `R`. + pub fn with_executor(self) -> RunnerWithPool { + RunnerWithPool { + pool: self.pool, + runner: PhantomData, + } + } } impl ParallelRunner for RunnerWithPool From 39e4c6a501c0103ed8d25f6b715338231ed83918 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 20:45:25 +0200 Subject: [PATCH 244/264] with_pool is documented --- src/par_iter.rs | 79 +++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 74 insertions(+), 5 deletions(-) diff --git a/src/par_iter.rs b/src/par_iter.rs index 8ac6e27..c34be41 100644 --- a/src/par_iter.rs +++ b/src/par_iter.rs @@ -1,9 +1,9 @@ -use crate::ParIterResult; use crate::computational_variants::fallible_option::ParOption; use crate::par_iter_option::{IntoOption, ParIterOption}; use crate::par_iter_result::IntoResult; use crate::runner::{DefaultRunner, ParallelRunner}; use crate::using::{UsingClone, UsingFun}; +use crate::{ParIterResult, ParThreadPool, RunnerWithPool}; use crate::{ ParIterUsing, Params, collect_into::ParCollectInto, @@ -249,6 +249,8 @@ where /// Rather than the [`DefaultRunner`], uses the parallel runner `Q` which implements [`ParallelRunner`]. /// + /// See also [`with_pool`]. + /// /// Parallel runner of each computation can be independently specified using `with_runner`. /// /// When not specified the default runner is used, which is: @@ -261,9 +263,10 @@ where /// In this case, `RunnerWithPool` using a particular thread pool must be passed in using `with_runner` /// transformation to achieve parallel computation. /// - /// [`RunnerWithPool`]: crate::[`RunnerWithPool`] - /// [`StdDefaultPool`]: crate::[`StdDefaultPool`] - /// [`SequentialPool`]: crate::[`SequentialPool`] + /// [`RunnerWithPool`]: crate::RunnerWithPool + /// [`StdDefaultPool`]: crate::StdDefaultPool + /// [`SequentialPool`]: crate::SequentialPool + /// [`with_pool`]: crate::ParIter::with_pool /// /// # Examples /// @@ -301,7 +304,73 @@ where /// assert_eq!(sum, sum2); /// } /// ``` - fn with_runner(self, orchestrator: Q) -> impl ParIter; + fn with_runner(self, runner: Q) -> impl ParIter; + + /// Rather than [`DefaultPool`], uses the parallel runner with the given `pool` implementing + /// [`ParThreadPool`]. + /// + /// See also [`with_runner`]. + /// + /// Thread pool of each computation can be independently specified using `with_pool`. + /// + /// When not specified the default pool is used, which is: + /// * [`StdDefaultPool`] when "std" feature is enabled, + /// * [`SequentialPool`] when "std" feature is disabled. + /// + /// Note that [`StdDefaultPool`] uses standard native threads. + /// + /// When working in a no-std environment, the default pool falls back to sequential. + /// In this case, a thread pool must be passed in using `with_pool` transformation to achieve parallel computation. + /// + /// [`DefaultPool`]: crate::DefaultPool + /// [`RunnerWithPool`]: crate::RunnerWithPool + /// [`StdDefaultPool`]: crate::StdDefaultPool + /// [`SequentialPool`]: crate::SequentialPool + /// [`with_runner`]: crate::ParIter::with_runner + /// + /// # Examples + /// + /// ``` + /// use orx_parallel::*; + /// + /// let inputs: Vec<_> = (0..42).collect(); + /// + /// // uses the DefaultPool + /// // assuming "std" enabled, StdDefaultPool will be used; i.e., native threads + /// let sum = inputs.par().sum(); + /// + /// // equivalent to: + /// let sum2 = inputs.par().with_pool(StdDefaultPool::default()).sum(); + /// assert_eq!(sum, sum2); + /// + /// #[cfg(feature = "scoped_threadpool")] + /// { + /// let mut pool = scoped_threadpool::Pool::new(8); + /// + /// // uses the scoped_threadpool::Pool created with 8 threads + /// let sum2 = inputs.par().with_pool(&mut pool).sum(); + /// assert_eq!(sum, sum2); + /// } + /// + /// #[cfg(feature = "rayon-core")] + /// { + /// let pool = rayon_core::ThreadPoolBuilder::new() + /// .num_threads(8) + /// .build() + /// .unwrap(); + /// + /// // uses the rayon-core::ThreadPool created with 8 threads + /// let sum2 = inputs.par().with_pool(&pool).sum(); + /// assert_eq!(sum, sum2); + /// } + /// ``` + fn with_pool( + self, + pool: P, + ) -> impl ParIter, Item = Self::Item> { + let runner = RunnerWithPool::from(pool).with_executor::(); + self.with_runner(runner) + } // using transformations From d8e06585cba24feda6442b004af4f1a0a2676135 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 20:45:33 +0200 Subject: [PATCH 245/264] doc --- src/lib.rs | 2 +- src/runner/mod.rs | 22 ++++++++++++++-------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index ce18d13..728e05b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -73,7 +73,7 @@ pub use parameters::{ChunkSize, IterationOrder, NumThreads, Params}; pub use special_type_sets::Sum; pub use using::ParIterUsing; -pub use runner::{DefaultRunner, ParallelRunner, RunnerWithPool, SequentialPool}; +pub use runner::{DefaultPool, DefaultRunner, ParallelRunner, RunnerWithPool, SequentialPool}; #[cfg(feature = "pond")] pub use runner::PondPool; diff --git a/src/runner/mod.rs b/src/runner/mod.rs index 33d7e62..2adeb4c 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -21,15 +21,21 @@ pub use implementations::YastlPool; // DEFAULT -/// Default runner used by orx-parallel computations: +/// Default pool used by orx-parallel computations: /// -/// * [`RunnerWithPool`] with [`StdDefaultPool`] when "std" feature is enabled, -/// * [`RunnerWithPool`] with [`SequentialPool`] otherwise. +/// * [`StdDefaultPool`] when "std" feature is enabled, +/// * [`SequentialPool`] otherwise. #[cfg(feature = "std")] -pub type DefaultRunner = RunnerWithPool; -/// Default runner used by orx-parallel computations: +pub type DefaultPool = StdDefaultPool; +/// Default pool used by orx-parallel computations: /// -/// * [`RunnerWithPool`] with `StdDefaultPool` when "std" feature is enabled, -/// * [`RunnerWithPool`] with [`SequentialPool`] otherwise. +/// * `StdDefaultPool` when "std" feature is enabled, +/// * [`SequentialPool`] otherwise. #[cfg(not(feature = "std"))] -pub type DefaultRunner = RunnerWithPool; +pub type DefaultPool = SequentialPool; + +/// Default runner used by orx-parallel computations, using the [`DefaultPool`]: +/// +/// * [`RunnerWithPool`] with [`StdDefaultPool`] when "std" feature is enabled, +/// * [`RunnerWithPool`] with [`SequentialPool`] otherwise. +pub type DefaultRunner = RunnerWithPool; From d154682c2205f478fac3b9e4b3921a552e674178 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 20:50:51 +0200 Subject: [PATCH 246/264] with_pool documentation --- src/par_iter.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/par_iter.rs b/src/par_iter.rs index c34be41..329fde7 100644 --- a/src/par_iter.rs +++ b/src/par_iter.rs @@ -322,6 +322,11 @@ where /// When working in a no-std environment, the default pool falls back to sequential. /// In this case, a thread pool must be passed in using `with_pool` transformation to achieve parallel computation. /// + /// Note that if a thread pool, say `pool`, is of a type that implements [`ParThreadPool`]; then: + /// * `with_pool` can be called with owned value `with_pool(pool)` for all implementors; but also, + /// * with a shared reference `with_pool(&pool)` for most of the implementations (eg: rayon-core, yastl), and + /// * with a mutable reference `with_pool(&mut pool)` for others (eg: scoped_threadpool). + /// /// [`DefaultPool`]: crate::DefaultPool /// [`RunnerWithPool`]: crate::RunnerWithPool /// [`StdDefaultPool`]: crate::StdDefaultPool From 288a2392566429526ac00fb584f9e3715de0e545 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 20:53:34 +0200 Subject: [PATCH 247/264] with_pool transformation for fallible iterators --- src/par_iter_result.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/par_iter_result.rs b/src/par_iter_result.rs index 35b2a63..25d8d69 100644 --- a/src/par_iter_result.rs +++ b/src/par_iter_result.rs @@ -1,6 +1,6 @@ use crate::default_fns::{map_count, reduce_sum, reduce_unit}; use crate::runner::{DefaultRunner, ParallelRunner}; -use crate::{ChunkSize, IterationOrder, NumThreads, Sum}; +use crate::{ChunkSize, IterationOrder, NumThreads, ParThreadPool, RunnerWithPool, Sum}; use crate::{ParCollectInto, ParIter, generic_values::fallible_iterators::ResultOfIter}; use core::cmp::Ordering; @@ -205,6 +205,21 @@ where orchestrator: Q, ) -> impl ParIterResult; + /// Rather than [`DefaultPool`], uses the parallel runner with the given `pool` implementing + /// [`ParThreadPool`]. + /// + /// See [`ParIter::with_pool`] for details. + fn with_pool( + self, + pool: P, + ) -> impl ParIterResult, Item = Self::Item, Err = Self::Err> + where + Self: Sized, + { + let runner = RunnerWithPool::from(pool).with_executor::(); + self.with_runner(runner) + } + // computation transformations /// Takes a closure `map` and creates a parallel iterator which calls that closure on each element. From 4083ea5b9a056baff323bbaa6ee7865715c1b313 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 20:55:16 +0200 Subject: [PATCH 248/264] with_pool for optional iterator --- src/par_iter_option.rs | 25 +++++++++++++++++++++++-- src/par_iter_result.rs | 4 ++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/src/par_iter_option.rs b/src/par_iter_option.rs index c948941..a5dd310 100644 --- a/src/par_iter_option.rs +++ b/src/par_iter_option.rs @@ -1,6 +1,8 @@ use crate::default_fns::{map_count, reduce_sum, reduce_unit}; use crate::runner::{DefaultRunner, ParallelRunner}; -use crate::{ChunkSize, IterationOrder, NumThreads, ParCollectInto, Sum}; +use crate::{ + ChunkSize, IterationOrder, NumThreads, ParCollectInto, ParThreadPool, RunnerWithPool, Sum, +}; use core::cmp::Ordering; /// A parallel iterator for which the computation either completely succeeds, @@ -158,12 +160,31 @@ where /// Rather than the [`DefaultRunner`], uses the parallel runner `Q` which implements [`ParallelRunner`]. /// - /// See [`crate::ParIter::with_runner`] for details. + /// See [`ParIter::with_runner`] for details. + /// + /// [`DefaultRunner`]: crate::DefaultRunner fn with_runner( self, orchestrator: Q, ) -> impl ParIterOption; + /// Rather than [`DefaultPool`], uses the parallel runner with the given `pool` implementing + /// [`ParThreadPool`]. + /// + /// See [`ParIter::with_pool`] for details. + /// + /// [`DefaultPool`]: crate::DefaultPool + fn with_pool( + self, + pool: P, + ) -> impl ParIterOption, Item = Self::Item> + where + Self: Sized, + { + let runner = RunnerWithPool::from(pool).with_executor::(); + self.with_runner(runner) + } + // computation transformations /// Takes a closure `map` and creates a parallel iterator which calls that closure on each element. diff --git a/src/par_iter_result.rs b/src/par_iter_result.rs index 25d8d69..35dbc85 100644 --- a/src/par_iter_result.rs +++ b/src/par_iter_result.rs @@ -200,6 +200,8 @@ where /// Rather than the [`DefaultRunner`], uses the parallel runner `Q` which implements [`ParallelRunner`]. /// /// See [`ParIter::with_runner`] for details. + /// + /// [`DefaultRunner`]: crate::DefaultRunner fn with_runner( self, orchestrator: Q, @@ -209,6 +211,8 @@ where /// [`ParThreadPool`]. /// /// See [`ParIter::with_pool`] for details. + /// + /// [`DefaultPool`]: crate::DefaultPool fn with_pool( self, pool: P, From 2b81f83e45782c7c24a9e2129d022bc8c7c50f93 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 20:56:20 +0200 Subject: [PATCH 249/264] with-pool transformation for using iterators --- src/using/u_par_iter.rs | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/src/using/u_par_iter.rs b/src/using/u_par_iter.rs index 6e67ebc..a4c255d 100644 --- a/src/using/u_par_iter.rs +++ b/src/using/u_par_iter.rs @@ -1,9 +1,9 @@ -use crate::default_fns::*; use crate::{ - ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, Sum, + ChunkSize, IterationOrder, NumThreads, ParCollectInto, Params, RunnerWithPool, Sum, runner::{DefaultRunner, ParallelRunner}, using::using_variants::Using, }; +use crate::{ParThreadPool, default_fns::*}; use core::cmp::Ordering; use orx_concurrent_iter::ConcurrentIter; @@ -58,12 +58,31 @@ where /// Rather than the [`DefaultRunner`], uses the parallel runner `Q` which implements [`ParallelRunner`]. /// - /// See [crate::ParIter::with_runner] for details. + /// See [`ParIter::with_runner`] for details. + /// + /// [`DefaultRunner`]: crate::DefaultRunner fn with_runner( self, orchestrator: Q, ) -> impl ParIterUsing; + /// Rather than [`DefaultPool`], uses the parallel runner with the given `pool` implementing + /// [`ParThreadPool`]. + /// + /// See [`ParIter::with_pool`] for details. + /// + /// [`DefaultPool`]: crate::DefaultPool + fn with_pool( + self, + pool: P, + ) -> impl ParIterUsing, Item = Self::Item> + where + Self: Sized, + { + let runner = RunnerWithPool::from(pool).with_executor::(); + self.with_runner(runner) + } + // computation transformations /// Takes a closure `map` and creates a parallel iterator which calls that closure on each element. From a672233eb5a1c221c7c52f8dfdfcc155dfb82558 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 20:58:33 +0200 Subject: [PATCH 250/264] fix docs --- src/par_iter_option.rs | 2 ++ src/par_iter_result.rs | 2 ++ src/using/u_par_iter.rs | 2 ++ 3 files changed, 6 insertions(+) diff --git a/src/par_iter_option.rs b/src/par_iter_option.rs index a5dd310..116f8ec 100644 --- a/src/par_iter_option.rs +++ b/src/par_iter_option.rs @@ -163,6 +163,7 @@ where /// See [`ParIter::with_runner`] for details. /// /// [`DefaultRunner`]: crate::DefaultRunner + /// [`ParIter::with_runner`]: crate::ParIter::with_runner fn with_runner( self, orchestrator: Q, @@ -174,6 +175,7 @@ where /// See [`ParIter::with_pool`] for details. /// /// [`DefaultPool`]: crate::DefaultPool + /// [`ParIter::with_pool`]: crate::ParIter::with_pool fn with_pool( self, pool: P, diff --git a/src/par_iter_result.rs b/src/par_iter_result.rs index 35dbc85..7e704fe 100644 --- a/src/par_iter_result.rs +++ b/src/par_iter_result.rs @@ -202,6 +202,7 @@ where /// See [`ParIter::with_runner`] for details. /// /// [`DefaultRunner`]: crate::DefaultRunner + /// [`ParIter::with_runner`]: crate::ParIter::with_runner fn with_runner( self, orchestrator: Q, @@ -213,6 +214,7 @@ where /// See [`ParIter::with_pool`] for details. /// /// [`DefaultPool`]: crate::DefaultPool + /// [`ParIter::with_pool`]: crate::ParIter::with_pool fn with_pool( self, pool: P, diff --git a/src/using/u_par_iter.rs b/src/using/u_par_iter.rs index a4c255d..7509463 100644 --- a/src/using/u_par_iter.rs +++ b/src/using/u_par_iter.rs @@ -61,6 +61,7 @@ where /// See [`ParIter::with_runner`] for details. /// /// [`DefaultRunner`]: crate::DefaultRunner + /// [`ParIter::with_runner`]: crate::ParIter::with_runner fn with_runner( self, orchestrator: Q, @@ -72,6 +73,7 @@ where /// See [`ParIter::with_pool`] for details. /// /// [`DefaultPool`]: crate::DefaultPool + /// [`ParIter::with_pool`]: crate::ParIter::with_pool fn with_pool( self, pool: P, From 51319ea06eb445f595744d72fe1dede4ccce5633 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 20:59:07 +0200 Subject: [PATCH 251/264] fix default --- Cargo.toml | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index bd63a43..c65f5e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,14 +53,15 @@ harness = false all-features = true [features] -default = [ - "std", - "pond", - "poolite", - "rayon-core", - "scoped-pool", - "scoped_threadpool", - "yastl", -] +default = ["std"] +# default = [ +# "std", +# "pond", +# "poolite", +# "rayon-core", +# "scoped-pool", +# "scoped_threadpool", +# "yastl", +# ] std = [] generic_iterator = ["rayon"] From 43b735aa2dc30c4935eeff05ae9ecb23837a13ee Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 22:09:56 +0200 Subject: [PATCH 252/264] clippy fix --- src/executor/fixed_chunk_executor/parallel_executor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/executor/fixed_chunk_executor/parallel_executor.rs b/src/executor/fixed_chunk_executor/parallel_executor.rs index dc4e872..be328a4 100644 --- a/src/executor/fixed_chunk_executor/parallel_executor.rs +++ b/src/executor/fixed_chunk_executor/parallel_executor.rs @@ -101,7 +101,7 @@ impl ParallelExecutor for FixedChunkRunner { I: ConcurrentIter, { let num_spawned = num_spawned.into_inner(); - if num_spawned % LAG_PERIODICITY == 0 { + if num_spawned.is_multiple_of(LAG_PERIODICITY) { match self.next_chunk(num_spawned, iter.try_get_len()) { Some(c) => self.current_chunk_size.store(c, Ordering::Relaxed), None => return false, From 5feb4d1d6a6d7cde9a5a87c224223a3a3b96b08d Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 22:57:28 +0200 Subject: [PATCH 253/264] readme is updated for pools --- README.md | 93 +++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 87 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index ef05108..b61f61c 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ * [Fallible Parallel Iterators](#fallible-parallel-iterators) * [Using Mutable Variables](#using-mutable-variables) * [Configurations](#configurations) -* [Underlying Approach and Parallel Runners](#underlying-approach-and-parallel-runners) +* [Runner: Pools and Executors](#runner-pools-and-executors) * [Contributing](#contributing) ## Parallel Computation by Iterators @@ -420,16 +420,97 @@ This is guaranteed by the fact that both consuming computation calls and configu Additionally, maximum number of threads that can be used by parallel computations can be globally bounded by the environment variable `ORX_PARALLEL_MAX_NUM_THREADS`. Please see the corresponding [example](https://github.com/orxfun/orx-parallel/blob/main/examples/max_num_threads_config.rs) for details. -## Underlying Approach and Parallel Runners +## Runner: Pools and Executors This crate defines parallel computation by combining two basic components. -* Pulling **inputs** in parallel is achieved through [`ConcurrentIter`](https://crates.io/crates/orx-concurrent-iter). Concurrent iterator implementations are lock-free, efficient and support pull-by-chunks optimization to reduce the parallelization overhead. A thread can pull any number of inputs from the concurrent iterator every time it becomes idle. This provides the means to dynamically decide on the chunk sizes. -* Writing **outputs** in parallel is handled using thread-safe containers such as [`ConcurrentBag`](https://crates.io/crates/orx-concurrent-bag) and [`ConcurrentOrderedBag`](https://crates.io/crates/orx-concurrent-ordered-bag). Similarly, these are lock-free collections that aim for high performance collection of results. +**Pulling inputs** +* Pulling inputs in parallel is achieved through [`ConcurrentIter`](https://crates.io/crates/orx-concurrent-iter). Concurrent iterator implementations are lock-free, efficient and support pull-by-chunks optimization to reduce the parallelization overhead. A thread can pull any number of inputs from the concurrent iterator every time it becomes idle. This provides the means to dynamically decide on the chunk sizes. +* Furthermore, this allows to reduce the overhead of defining creating tasks. To illustrate, provided that the computation will be handled by `n` threads, a closure holding a reference to the input concurrent iterator is defined to represent the computation. This same closure is passed to `n` threads; i.e., `n` spawn calls are made. Each of these threads keep pulling elements from the input until the computation is completed, without requiring to define another task. -Finally, [`ParallelRunner`](https://docs.rs/orx-parallel/latest/orx_parallel/runner/trait.ParallelRunner.html) trait manages parallelization of the given computation with desired configuration. The objective of the parallel runner is to optimize the chunk sizes to solve the tradeoff between impact of heterogeneity of individual computations and overhead of parallelization. +**Writing outputs** +* When we collect results, writing outputs is handled using lock-free containers such as [`ConcurrentBag`](https://crates.io/crates/orx-concurrent-bag) and [`ConcurrentOrderedBag`](https://crates.io/crates/orx-concurrent-ordered-bag) which aim for high performance collection of results. -Since it is a trait, parallel runner is customizable. It is possible to implement and use your *own runner* by calling [`with_runner`](https://docs.rs/orx-parallel/latest/orx_parallel/trait.ParIter.html#tymethod.with_runner) transformation method on the parallel iterator. Default parallel runner targets to be efficient in general. When we have a use case with special characteristics, we can implement a `ParallelRunner` optimized for this scenario and use with the parallel iterators. +There are two main decisions to be taken while executing these components: +* how many threads do we use? +* what is the chunk size; i.e., how many input items does a thread pull each time? + +A [`ParallelRunner`](https://docs.rs/orx-parallel/latest/orx_parallel/trait.ParallelRunner) is a combination of a `ParThreadPool` and a `ParallelExecutor` that are responsible for these decisions, respectively. + +### ParThreadPool: number of threads + +[`ParThreadPool`](https://docs.rs/orx-parallel/latest/orx_parallel/trait.ParThreadPool) trait generalizes thread pools that can be used for parallel computations. This allows the parallel computation to be generic over thread pools. + +When not explicitly set, [`DefaultPool`](https://docs.rs/orx-parallel/latest/orx_parallel/type.DefaultPool) is used: +* When **std** feature is enabled, default pool is the [`StdDefaultPool`](https://docs.rs/orx-parallel/latest/orx_parallel/struct.StdDefaultPool). In other words, all available native threads can be used by the parallel computation. This number can globally bounded by "ORX_PARALLEL_MAX_NUM_THREADS" environment variable when set. +* When working in a **no-std** environment, default pool is the [`SequentialPool`](https://docs.rs/orx-parallel/latest/orx_parallel/struct.SequentialPool). As the name suggests, this pool executes the parallel computation sequentially on the main thread. It can be considered as a placeholder to be overwritten by `with_pool` or `with_runner` methods to achieve parallelism. + +*Note that thread pool defines the resource, or upper bound. This upper bound can further be bounded by the [`num_threads`](https://docs.rs/orx-parallel/latest/orx_parallel/trait.ParIter.html#tymethod.num_threads) configuration. Finally, parallel executor might choose not to use all available threads if it decides that the computation is small enough.* + +To overwrite the defaults and explicitly set the thread pool to be used for the computation, [`with_pool`](https://docs.rs/orx-parallel/latest/orx_parallel/trait.ParIter.html#tymethod.with_pool) or [`with_runner`](https://docs.rs/orx-parallel/latest/orx_parallel/trait.ParIter.html#tymethod.with_runner) methods are used. + +```rust +use orx_parallel::*; + +let inputs: Vec<_> = (0..42).collect(); + +// uses the DefaultPool +// assuming "std" enabled, StdDefaultPool will be used; i.e., native threads +let sum = inputs.par().sum(); + +// equivalent to: +let sum2 = inputs.par().with_pool(StdDefaultPool::default()).sum(); +assert_eq!(sum, sum2); + +#[cfg(feature = "scoped_threadpool")] +{ + let mut pool = scoped_threadpool::Pool::new(8); + // uses the scoped_threadpool::Pool created with 8 threads + let sum2 = inputs.par().with_pool(&mut pool).sum(); + assert_eq!(sum, sum2); +} + +#[cfg(feature = "rayon-core")] +{ + let pool = rayon_core::ThreadPoolBuilder::new() + .num_threads(8) + .build() + .unwrap(); + // uses the rayon-core::ThreadPool created with 8 threads + let sum2 = inputs.par().with_pool(&pool).sum(); + assert_eq!(sum, sum2); +} + +#[cfg(feature = "yastl")] +{ + let pool = YastlPool::new(8); + // uses the yastl::Pool created with 8 threads + let sum2 = inputs.par().with_pool(&pool).sum(); + assert_eq!(sum, sum2); +} +``` + +`ParThreadPool` implementations of several thread pools are provided in this crate as optional features (see [features](#features) section). Provided that the pool supports scoped computations, it is trivial to implement this trait in most cases (see [implementations](https://github.com/orxfun/orx-parallel/tree/main/src/runner/implementations) for examples). + +### ParallelExecutor: chunk size + +Once thread pool provides the computation resources, it is [`ParallelExecutor`](https://docs.rs/orx-parallel/latest/orx_parallel/trait.ParallelExecutor)'s task to distribute work to available threads. As mentioned above, all threads receive exactly the same closure. This closure continues to pull elements from the input concurrent iterator and operate on the inputs until all elements are processed. + +The critical decision that parallel executor makes is the chunk size. Depending on the state of the computation, it can dynamically decide on number of elements to pull from the input iterator. The tradeoff it tries to solve is as follows: + +* the larger the chunk size, + * the smaller the parallelization overhead; but also + * the larger the risk of imbalance in cases of heterogeneity. + +## Features + +* **std**: This is a **no-std** crate while *std* is included as a default feature. Please use `--no-default-features` flag for no-std use cases. **std** feature enables `StdDefaultPool` as the default thread provider which uses native threads. +* **rayon-core**: This feature enables using `rayon_core::ThreadPool` for parallel computations. +* **scoped_threadpool**: This feature enables using `scoped_threadpool::Pool`. +* **scoped-pool**: This feature enables using `scoped-pool::Pool`. +* **yastl**: This feature enables using `yastl::Pool`. +* **pond**: This feature enables using `pond::Pool`. +* **poolite**: This feature enables using `poolite::Pool`. ## Contributing From a0c5bb9e92de0b71205b2730f3f02c5a0b78d5ae Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 22:58:30 +0200 Subject: [PATCH 254/264] update contributing --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b61f61c..e5525dc 100644 --- a/README.md +++ b/README.md @@ -520,7 +520,8 @@ Please open an [issue](https://github.com/orxfun/orx-parallel/issues/new) or cre * if you notice an error, * have a question or think something could be improved, -* have an input collection or generator that needs to be parallelized, or +* have an input collection or generator that needs to be parallelized, +* want to use a particular thread pool with parallel iterators, * having trouble representing a particular parallel computation with parallel iterators, * or anything else:) From f800454024b6db34aa8d9a6cc61382726959cf3c Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 23:05:02 +0200 Subject: [PATCH 255/264] performance note on pool performance --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index e5525dc..3334c8c 100644 --- a/README.md +++ b/README.md @@ -492,6 +492,8 @@ assert_eq!(sum, sum2); `ParThreadPool` implementations of several thread pools are provided in this crate as optional features (see [features](#features) section). Provided that the pool supports scoped computations, it is trivial to implement this trait in most cases (see [implementations](https://github.com/orxfun/orx-parallel/tree/main/src/runner/implementations) for examples). +In order to have quick tests on the performance of different thread pools, you may use the example [`benchmark_pools`](https://github.com/orxfun/orx-parallel/blob/main/examples/benchmark_pools.rs). In most of the cases, using native threads with `StdDefaultPool`, *rayon-core*, *scoped_threadpool* and *scoped_pool* consistently perform similar and better than others for the tested computations. + ### ParallelExecutor: chunk size Once thread pool provides the computation resources, it is [`ParallelExecutor`](https://docs.rs/orx-parallel/latest/orx_parallel/trait.ParallelExecutor)'s task to distribute work to available threads. As mentioned above, all threads receive exactly the same closure. This closure continues to pull elements from the input concurrent iterator and operate on the inputs until all elements are processed. From 012ba2fb943c042ea2c486b4ae2f0469e67424ce Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 23:24:01 +0200 Subject: [PATCH 256/264] different thread pools are added to some of the benchmarks. --- README.md | 2 +- benches/collect_filter.rs | 55 ++++++++++++++++++++++++++ benches/collect_map_filter.rs | 68 +++++++++++++++++++++++++++++++++ benches/reduce_iter_into_par.rs | 57 +++++++++++++++++++++++++++ 4 files changed, 181 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 3334c8c..0704d40 100644 --- a/README.md +++ b/README.md @@ -492,7 +492,7 @@ assert_eq!(sum, sum2); `ParThreadPool` implementations of several thread pools are provided in this crate as optional features (see [features](#features) section). Provided that the pool supports scoped computations, it is trivial to implement this trait in most cases (see [implementations](https://github.com/orxfun/orx-parallel/tree/main/src/runner/implementations) for examples). -In order to have quick tests on the performance of different thread pools, you may use the example [`benchmark_pools`](https://github.com/orxfun/orx-parallel/blob/main/examples/benchmark_pools.rs). In most of the cases, using native threads with `StdDefaultPool`, *rayon-core*, *scoped_threadpool* and *scoped_pool* consistently perform similar and better than others for the tested computations. +In order to have quick tests on the performance of different thread pools, you may use the example [`benchmark_pools`](https://github.com/orxfun/orx-parallel/blob/main/examples/benchmark_pools.rs). In most of the cases, *rayon-core*, *scoped_threadpool* and *scoped_pool* perform similar and better than others, and gets close to native threads performance with `StdDefaultPool`. ### ParallelExecutor: chunk size diff --git a/benches/collect_filter.rs b/benches/collect_filter.rs index 6f76d03..07befac 100644 --- a/benches/collect_filter.rs +++ b/benches/collect_filter.rs @@ -84,6 +84,11 @@ fn orx_into_split_vec(inputs: &[Output]) -> SplitVec<&Output> { inputs.into_par().filter(filter).collect() } +#[allow(dead_code)] +fn orx_into_vec_with(inputs: &[Output], pool: P) -> Vec<&Output> { + inputs.into_par().with_pool(pool).filter(filter).collect() +} + fn run(c: &mut Criterion) { let treatments = [65_536 * 2]; @@ -114,6 +119,56 @@ fn run(c: &mut Criterion) { assert_eq!(&expected, &orx_into_split_vec(&input)); b.iter(|| orx_into_split_vec(black_box(&input))) }); + + #[cfg(feature = "rayon-core")] + group.bench_with_input( + BenchmarkId::new("orx-vec (rayon-core::ThreadPool)", n), + n, + |b, _| { + let pool = rayon_core::ThreadPoolBuilder::new() + .num_threads(32) + .build() + .unwrap(); + assert_eq!(&expected, &orx_into_vec_with(&input, &pool)); + b.iter(|| orx_into_vec_with(black_box(&input), &pool)) + }, + ); + + #[cfg(feature = "scoped-pool")] + group.bench_with_input( + BenchmarkId::new("orx-vec (scoped-pool::Pool)", n), + n, + |b, _| { + let pool = scoped_pool::Pool::new(32); + assert_eq!(&expected, &orx_into_vec_with(&input, &pool)); + b.iter(|| orx_into_vec_with(black_box(&input), &pool)) + }, + ); + + #[cfg(feature = "scoped_threadpool")] + group.bench_with_input( + BenchmarkId::new("orx-vec (scoped_threadpool::Pool)", n), + n, + |b, _| { + let pool = || scoped_threadpool::Pool::new(32); + assert_eq!(&expected, &orx_into_vec_with(&input, pool())); + b.iter(|| orx_into_vec_with(black_box(&input), pool())) + }, + ); + + #[cfg(feature = "yastl")] + group.bench_with_input(BenchmarkId::new("orx-vec (yastl::Pool)", n), n, |b, _| { + let pool = YastlPool::new(32); + assert_eq!(&expected, &orx_into_vec_with(&input, &pool)); + b.iter(|| orx_into_vec_with(black_box(&input), &pool)) + }); + + #[cfg(feature = "pond")] + group.bench_with_input(BenchmarkId::new("orx-vec (pond::Pool)", n), n, |b, _| { + let pool = || PondPool::new_threads_unbounded(32); + assert_eq!(&expected, &orx_into_vec_with(&input, pool())); + b.iter(|| orx_into_vec_with(black_box(&input), pool())) + }); } group.finish(); diff --git a/benches/collect_map_filter.rs b/benches/collect_map_filter.rs index 7f8d4c1..9f91cd8 100644 --- a/benches/collect_map_filter.rs +++ b/benches/collect_map_filter.rs @@ -83,6 +83,16 @@ fn orx_into_split_vec(inputs: &[usize]) -> SplitVec { inputs.into_par().map(map).filter(filter).collect() } +#[allow(dead_code)] +fn orx_into_vec_with(inputs: &[usize], pool: P) -> Vec { + inputs + .into_par() + .with_pool(pool) + .map(map) + .filter(filter) + .collect() +} + fn run(c: &mut Criterion) { let treatments = [65_536 * 2]; @@ -113,6 +123,64 @@ fn run(c: &mut Criterion) { assert_eq!(&expected, &orx_into_split_vec(&input)); b.iter(|| orx_into_split_vec(black_box(&input))) }); + + #[cfg(feature = "rayon-core")] + group.bench_with_input( + BenchmarkId::new("orx-into-vec (rayon-core::ThreadPool)", n), + n, + |b, _| { + let pool = rayon_core::ThreadPoolBuilder::new() + .num_threads(32) + .build() + .unwrap(); + assert_eq!(&expected, &orx_into_vec_with(&input, &pool)); + b.iter(|| orx_into_vec_with(black_box(&input), &pool)) + }, + ); + + #[cfg(feature = "scoped-pool")] + group.bench_with_input( + BenchmarkId::new("orx-into-vec (scoped-pool::Pool)", n), + n, + |b, _| { + let pool = scoped_pool::Pool::new(32); + assert_eq!(&expected, &orx_into_vec_with(&input, &pool)); + b.iter(|| orx_into_vec_with(black_box(&input), &pool)) + }, + ); + + #[cfg(feature = "scoped_threadpool")] + group.bench_with_input( + BenchmarkId::new("orx-into-vec (scoped_threadpool::Pool)", n), + n, + |b, _| { + let pool = || scoped_threadpool::Pool::new(32); + assert_eq!(&expected, &orx_into_vec_with(&input, pool())); + b.iter(|| orx_into_vec_with(black_box(&input), pool())) + }, + ); + + #[cfg(feature = "yastl")] + group.bench_with_input( + BenchmarkId::new("orx-into-vec (yastl::Pool)", n), + n, + |b, _| { + let pool = YastlPool::new(32); + assert_eq!(&expected, &orx_into_vec_with(&input, &pool)); + b.iter(|| orx_into_vec_with(black_box(&input), &pool)) + }, + ); + + #[cfg(feature = "pond")] + group.bench_with_input( + BenchmarkId::new("orx-into-vec (pond::Pool)", n), + n, + |b, _| { + let pool = || PondPool::new_threads_unbounded(32); + assert_eq!(&expected, &orx_into_vec_with(&input, pool())); + b.iter(|| orx_into_vec_with(black_box(&input), pool())) + }, + ); } group.finish(); diff --git a/benches/reduce_iter_into_par.rs b/benches/reduce_iter_into_par.rs index 7d00d3a..817c80b 100644 --- a/benches/reduce_iter_into_par.rs +++ b/benches/reduce_iter_into_par.rs @@ -93,6 +93,17 @@ fn orx(inputs: &[usize]) -> Option { .reduce(reduce) } +#[allow(dead_code)] +fn orx_with(inputs: &[usize], pool: P) -> Option { + inputs + .into_iter() + .iter_into_par() + .with_pool(pool) + .map(map) + .filter(filter) + .reduce(reduce) +} + fn run(c: &mut Criterion) { let treatments = [65_536 * 2]; @@ -116,6 +127,52 @@ fn run(c: &mut Criterion) { assert_eq!(&expected, &orx(&input)); b.iter(|| orx(black_box(&input))) }); + + #[cfg(feature = "rayon-core")] + group.bench_with_input( + BenchmarkId::new("orx (rayon-core::ThreadPool)", n), + n, + |b, _| { + let pool = rayon_core::ThreadPoolBuilder::new() + .num_threads(32) + .build() + .unwrap(); + assert_eq!(&expected, &orx_with(&input, &pool)); + b.iter(|| orx_with(black_box(&input), &pool)) + }, + ); + + #[cfg(feature = "scoped-pool")] + group.bench_with_input(BenchmarkId::new("orx (scoped-pool::Pool)", n), n, |b, _| { + let pool = scoped_pool::Pool::new(32); + assert_eq!(&expected, &orx_with(&input, &pool)); + b.iter(|| orx_with(black_box(&input), &pool)) + }); + + #[cfg(feature = "scoped_threadpool")] + group.bench_with_input( + BenchmarkId::new("orx (scoped_threadpool::Pool)", n), + n, + |b, _| { + let pool = || scoped_threadpool::Pool::new(32); + assert_eq!(&expected, &orx_with(&input, pool())); + b.iter(|| orx_with(black_box(&input), pool())) + }, + ); + + #[cfg(feature = "yastl")] + group.bench_with_input(BenchmarkId::new("orx (yastl::Pool)", n), n, |b, _| { + let pool = YastlPool::new(32); + assert_eq!(&expected, &orx_with(&input, &pool)); + b.iter(|| orx_with(black_box(&input), &pool)) + }); + + #[cfg(feature = "pond")] + group.bench_with_input(BenchmarkId::new("orx (pond::Pool)", n), n, |b, _| { + let pool = || PondPool::new_threads_unbounded(32); + assert_eq!(&expected, &orx_with(&input, pool())); + b.iter(|| orx_with(black_box(&input), pool())) + }); } group.finish(); From 746658710657f770fac47d1b40aa9e34e0a8fbd6 Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 23:28:59 +0200 Subject: [PATCH 257/264] update readme --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 0704d40..1158daf 100644 --- a/README.md +++ b/README.md @@ -492,7 +492,9 @@ assert_eq!(sum, sum2); `ParThreadPool` implementations of several thread pools are provided in this crate as optional features (see [features](#features) section). Provided that the pool supports scoped computations, it is trivial to implement this trait in most cases (see [implementations](https://github.com/orxfun/orx-parallel/tree/main/src/runner/implementations) for examples). -In order to have quick tests on the performance of different thread pools, you may use the example [`benchmark_pools`](https://github.com/orxfun/orx-parallel/blob/main/examples/benchmark_pools.rs). In most of the cases, *rayon-core*, *scoped_threadpool* and *scoped_pool* perform similar and better than others, and gets close to native threads performance with `StdDefaultPool`. +In most of the cases, *rayon-core*, *scoped_threadpool* and *scoped_pool* perform better than others, and get close to native threads performance with `StdDefaultPool`. + +Since parallel computations are generic over the thread pools, performances can be conveniently compared for specific use cases. Such an example benchmark can be found in [collect_filter_map](https://github.com/orxfun/orx-parallel/blob/main/benches/collect_filter_map.rs) file. To have quick tests, you may also use the example [benchmark_pools](https://github.com/orxfun/orx-parallel/blob/main/examples/benchmark_pools.rs). ### ParallelExecutor: chunk size From fc8f0aee757c9dd3f7bc18f2792da8006ec18a2a Mon Sep 17 00:00:00 2001 From: orxfun Date: Thu, 18 Sep 2025 23:44:23 +0200 Subject: [PATCH 258/264] --all-features is added to benchmark script --- .scripts/run_benchmark.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.scripts/run_benchmark.sh b/.scripts/run_benchmark.sh index eb5b54f..1a3ce1d 100755 --- a/.scripts/run_benchmark.sh +++ b/.scripts/run_benchmark.sh @@ -5,6 +5,6 @@ sed -i "s/$original_bench/$bench/g" Cargo.toml rm -f benches/results/$bench.txt -cargo bench >> benches/results/$bench.txt +cargo bench --all-features >> benches/results/$bench.txt sed -i "s/$bench/$original_bench/g" Cargo.toml From 2d6403ea378c6236b61b5c507ae5856800831a6b Mon Sep 17 00:00:00 2001 From: orxfun Date: Fri, 19 Sep 2025 14:00:46 +0200 Subject: [PATCH 259/264] increment version --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index c65f5e2..dd414a3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "orx-parallel" -version = "3.2.0" +version = "4.0.0" edition = "2024" authors = ["orxfun "] readme = "README.md" From 9a2ef21c05c9ea801d5ce1b514ef442ae1e89e60 Mon Sep 17 00:00:00 2001 From: orxfun Date: Fri, 19 Sep 2025 14:11:52 +0200 Subject: [PATCH 260/264] simplify yastl thread pool impl --- Cargo.toml | 36 +++++++++++------------ src/runner/implementations/tests/yastl.rs | 2 +- src/runner/implementations/yastl.rs | 36 +++-------------------- 3 files changed, 23 insertions(+), 51 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index dd414a3..6ca947d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,18 +21,18 @@ orx-iterable = { version = "1.3.0", default-features = false } orx-pinned-concurrent-col = { version = "2.15.0", default-features = false } orx-priority-queue = { version = "1.7.0", default-features = false } orx-pseudo-default = { version = "2.1.0", default-features = false } -orx-self-or = { version = "1.2.0" } +orx-self-or = { version = "1.2.0", default-features = false } # optional: generic iterator -rayon = { version = "1.11.0", optional = true } +rayon = { version = "1.11.0", optional = true, default-features = false } # optional: thread pool -pond = { version = "0.3.1", optional = true } -poolite = { version = "0.7.1", optional = true } -rayon-core = { version = "1.13.0", optional = true } -scoped-pool = { version = "1.0.0", optional = true } -scoped_threadpool = { version = "0.1.9", optional = true } -yastl = { version = "0.1.2", optional = true } +pond = { version = "0.3.1", optional = true, default-features = false } +poolite = { version = "0.7.1", optional = true, default-features = false } +rayon-core = { version = "1.13.0", optional = true, default-features = false } +scoped-pool = { version = "1.0.0", optional = true, default-features = false } +scoped_threadpool = { version = "0.1.9", optional = true, default-features = false } +yastl = { version = "0.1.2", optional = true, default-features = false } [dev-dependencies] chrono = "0.4.42" @@ -53,15 +53,15 @@ harness = false all-features = true [features] -default = ["std"] -# default = [ -# "std", -# "pond", -# "poolite", -# "rayon-core", -# "scoped-pool", -# "scoped_threadpool", -# "yastl", -# ] +# default = ["std"] +default = [ + "std", + "pond", + "poolite", + "rayon-core", + "scoped-pool", + "scoped_threadpool", + "yastl", +] std = [] generic_iterator = ["rayon"] diff --git a/src/runner/implementations/tests/yastl.rs b/src/runner/implementations/tests/yastl.rs index 2edc813..f20e852 100644 --- a/src/runner/implementations/tests/yastl.rs +++ b/src/runner/implementations/tests/yastl.rs @@ -23,6 +23,6 @@ fn pool_yastl_map(n: usize, nt: usize, chunk: usize, ordering: IterationOrder) { run_map(n, chunk, ordering, orch); let pool = YastlPool::with_config(nt, ThreadConfig::new()); - let orch: RunnerWithPool<_> = (&pool).into(); + let orch: RunnerWithPool<_> = (pool).into(); run_map(n, chunk, ordering, orch); } diff --git a/src/runner/implementations/yastl.rs b/src/runner/implementations/yastl.rs index 77ce509..accaf2e 100644 --- a/src/runner/implementations/yastl.rs +++ b/src/runner/implementations/yastl.rs @@ -1,5 +1,6 @@ use crate::ParThreadPool; use core::num::NonZeroUsize; +use orx_self_or::SoR; use yastl::{Pool, Scope, ThreadConfig}; /// A wrapper for `yastl::Pool` and number of threads it was built with. @@ -47,7 +48,7 @@ impl YastlPool { } } -impl ParThreadPool for YastlPool { +impl> ParThreadPool for P { type ScopeRef<'s, 'env, 'scope> = &'s Scope<'scope> where @@ -68,39 +69,10 @@ impl ParThreadPool for YastlPool { 'env: 'scope, for<'s> F: FnOnce(&'s Scope<'scope>) + Send, { - self.0.scoped(f) + P::get_ref(self).0.scoped(f) } fn max_num_threads(&self) -> NonZeroUsize { - self.1 - } -} - -impl ParThreadPool for &YastlPool { - type ScopeRef<'s, 'env, 'scope> - = &'s Scope<'scope> - where - 'scope: 's, - 'env: 'scope + 's; - - fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) - where - 'scope: 's, - 'env: 'scope + 's, - W: Fn() + Send + 'scope + 'env, - { - s.execute(work); - } - - fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) - where - 'env: 'scope, - for<'s> F: FnOnce(&'s Scope<'scope>) + Send, - { - self.0.scoped(f) - } - - fn max_num_threads(&self) -> NonZeroUsize { - self.1 + self.get_ref().1 } } From 0c3848994dfafc87efef5b34814ef476d77df4ee Mon Sep 17 00:00:00 2001 From: orxfun Date: Fri, 19 Sep 2025 14:15:48 +0200 Subject: [PATCH 261/264] remove self-or dependency --- Cargo.toml | 12 +--------- src/runner/implementations/yastl.rs | 36 +++++++++++++++++++++++++---- 2 files changed, 33 insertions(+), 15 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6ca947d..efc9ee1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,6 @@ orx-iterable = { version = "1.3.0", default-features = false } orx-pinned-concurrent-col = { version = "2.15.0", default-features = false } orx-priority-queue = { version = "1.7.0", default-features = false } orx-pseudo-default = { version = "2.1.0", default-features = false } -orx-self-or = { version = "1.2.0", default-features = false } # optional: generic iterator rayon = { version = "1.11.0", optional = true, default-features = false } @@ -53,15 +52,6 @@ harness = false all-features = true [features] -# default = ["std"] -default = [ - "std", - "pond", - "poolite", - "rayon-core", - "scoped-pool", - "scoped_threadpool", - "yastl", -] +default = ["std"] std = [] generic_iterator = ["rayon"] diff --git a/src/runner/implementations/yastl.rs b/src/runner/implementations/yastl.rs index accaf2e..77ce509 100644 --- a/src/runner/implementations/yastl.rs +++ b/src/runner/implementations/yastl.rs @@ -1,6 +1,5 @@ use crate::ParThreadPool; use core::num::NonZeroUsize; -use orx_self_or::SoR; use yastl::{Pool, Scope, ThreadConfig}; /// A wrapper for `yastl::Pool` and number of threads it was built with. @@ -48,7 +47,7 @@ impl YastlPool { } } -impl> ParThreadPool for P { +impl ParThreadPool for YastlPool { type ScopeRef<'s, 'env, 'scope> = &'s Scope<'scope> where @@ -69,10 +68,39 @@ impl> ParThreadPool for P { 'env: 'scope, for<'s> F: FnOnce(&'s Scope<'scope>) + Send, { - P::get_ref(self).0.scoped(f) + self.0.scoped(f) } fn max_num_threads(&self) -> NonZeroUsize { - self.get_ref().1 + self.1 + } +} + +impl ParThreadPool for &YastlPool { + type ScopeRef<'s, 'env, 'scope> + = &'s Scope<'scope> + where + 'scope: 's, + 'env: 'scope + 's; + + fn run_in_scope<'s, 'env, 'scope, W>(s: &Self::ScopeRef<'s, 'env, 'scope>, work: W) + where + 'scope: 's, + 'env: 'scope + 's, + W: Fn() + Send + 'scope + 'env, + { + s.execute(work); + } + + fn scoped_computation<'env, 'scope, F>(&'env mut self, f: F) + where + 'env: 'scope, + for<'s> F: FnOnce(&'s Scope<'scope>) + Send, + { + self.0.scoped(f) + } + + fn max_num_threads(&self) -> NonZeroUsize { + self.1 } } From 517efa4090f910aff4a822dc82875bc442d80857 Mon Sep 17 00:00:00 2001 From: orxfun Date: Fri, 19 Sep 2025 14:23:55 +0200 Subject: [PATCH 262/264] revise example parameters --- examples/benchmark_pools.rs | 2 +- examples/max_num_threads_config.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/benchmark_pools.rs b/examples/benchmark_pools.rs index cd8a356..15e5349 100644 --- a/examples/benchmark_pools.rs +++ b/examples/benchmark_pools.rs @@ -59,7 +59,7 @@ fn main() { #[arg(long, default_value_t = 100000)] len: usize, /// Number of repetitions to measure time; total time will be reported. - #[arg(long, default_value_t = 1000)] + #[arg(long, default_value_t = 100)] num_repetitions: usize, } diff --git a/examples/max_num_threads_config.rs b/examples/max_num_threads_config.rs index 0225636..d812611 100644 --- a/examples/max_num_threads_config.rs +++ b/examples/max_num_threads_config.rs @@ -50,7 +50,7 @@ fn main() { } } - let n = 1 << 30; + let n = 1 << 32; let input = 0..n; // default -> might use all threads From bc409be8e8c35f91a62d23ffed7fe3bde4f756e1 Mon Sep 17 00:00:00 2001 From: orxfun Date: Fri, 19 Sep 2025 14:28:22 +0200 Subject: [PATCH 263/264] fix 32-bit error in test --- examples/max_num_threads_config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/max_num_threads_config.rs b/examples/max_num_threads_config.rs index d812611..9f618b8 100644 --- a/examples/max_num_threads_config.rs +++ b/examples/max_num_threads_config.rs @@ -50,7 +50,7 @@ fn main() { } } - let n = 1 << 32; + let n = 1 << 31; let input = 0..n; // default -> might use all threads From 46829013e617ab9b7694c8d8e7ce3f903b269723 Mon Sep 17 00:00:00 2001 From: orxfun Date: Sun, 21 Sep 2025 18:14:08 +0200 Subject: [PATCH 264/264] version number is set --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index efc9ee1..79eb1b6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "orx-parallel" -version = "4.0.0" +version = "3.3.0" edition = "2024" authors = ["orxfun "] readme = "README.md"