diff --git a/tokio/src/runtime/builder.rs b/tokio/src/runtime/builder.rs index 2629065f14d..adee919656e 100644 --- a/tokio/src/runtime/builder.rs +++ b/tokio/src/runtime/builder.rs @@ -1231,12 +1231,6 @@ cfg_rt_multi_thread! { use crate::runtime::scheduler::{self, MultiThread}; let core_threads = self.worker_threads.unwrap_or_else(num_cpus); - // Shrink the size of spawn_concurrency_level when using loom. This shouldn't impact - // logic, but allows loom to test more edge cases in a reasoable a mount of time. - #[cfg(loom)] - let spawn_concurrency_level = 4; - #[cfg(not(loom))] - let spawn_concurrency_level = Self::get_spawn_concurrency_level(core_threads); let (driver, driver_handle) = driver::Driver::new(self.get_cfg())?; @@ -1255,7 +1249,6 @@ cfg_rt_multi_thread! { driver_handle, blocking_spawner, seed_generator_2, - spawn_concurrency_level, Config { before_park: self.before_park.clone(), after_unpark: self.after_unpark.clone(), @@ -1286,14 +1279,6 @@ cfg_rt_multi_thread! { use crate::runtime::scheduler::MultiThreadAlt; let core_threads = self.worker_threads.unwrap_or_else(num_cpus); - - // Shrink the size of spawn_concurrency_level when using loom. This shouldn't impact - // logic, but allows loom to test more edge cases in a reasoable a mount of time. - #[cfg(loom)] - let spawn_concurrency_level = 4; - #[cfg(not(loom))] - let spawn_concurrency_level = Self::get_spawn_concurrency_level(core_threads); - let (driver, driver_handle) = driver::Driver::new(self.get_cfg())?; // Create the blocking pool @@ -1311,7 +1296,6 @@ cfg_rt_multi_thread! { driver_handle, blocking_spawner, seed_generator_2, - spawn_concurrency_level, Config { before_park: self.before_park.clone(), after_unpark: self.after_unpark.clone(), @@ -1329,15 +1313,6 @@ cfg_rt_multi_thread! { Ok(Runtime::from_parts(Scheduler::MultiThreadAlt(scheduler), handle, blocking_pool)) } } - - fn get_spawn_concurrency_level(core_threads : usize) -> usize { - const MAX_SPAWN_CONCURRENCY_LEVEL: usize = 1 << 16; - let mut size = 1; - while size / 4 < core_threads && size < MAX_SPAWN_CONCURRENCY_LEVEL { - size <<= 1; - } - size.min(MAX_SPAWN_CONCURRENCY_LEVEL) - } } } diff --git a/tokio/src/runtime/scheduler/multi_thread/mod.rs b/tokio/src/runtime/scheduler/multi_thread/mod.rs index 24d353cdfac..d85a0ae0a2a 100644 --- a/tokio/src/runtime/scheduler/multi_thread/mod.rs +++ b/tokio/src/runtime/scheduler/multi_thread/mod.rs @@ -60,7 +60,6 @@ impl MultiThread { driver_handle: driver::Handle, blocking_spawner: blocking::Spawner, seed_generator: RngSeedGenerator, - spawn_concurrency_level: usize, config: Config, ) -> (MultiThread, Arc, Launch) { let parker = Parker::new(driver); @@ -70,7 +69,6 @@ impl MultiThread { driver_handle, blocking_spawner, seed_generator, - spawn_concurrency_level, config, ); diff --git a/tokio/src/runtime/scheduler/multi_thread/worker.rs b/tokio/src/runtime/scheduler/multi_thread/worker.rs index d4aed2ec231..c1dd6df9865 100644 --- a/tokio/src/runtime/scheduler/multi_thread/worker.rs +++ b/tokio/src/runtime/scheduler/multi_thread/worker.rs @@ -245,7 +245,6 @@ pub(super) fn create( driver_handle: driver::Handle, blocking_spawner: blocking::Spawner, seed_generator: RngSeedGenerator, - spawn_concurrency_level: usize, config: Config, ) -> (Arc, Launch) { let mut cores = Vec::with_capacity(size); @@ -288,7 +287,7 @@ pub(super) fn create( remotes: remotes.into_boxed_slice(), inject, idle, - owned: OwnedTasks::new(spawn_concurrency_level as u32), + owned: OwnedTasks::new(size), synced: Mutex::new(Synced { idle: idle_synced, inject: inject_synced, diff --git a/tokio/src/runtime/scheduler/multi_thread_alt/mod.rs b/tokio/src/runtime/scheduler/multi_thread_alt/mod.rs index 86c6f96087d..e30c9b4783b 100644 --- a/tokio/src/runtime/scheduler/multi_thread_alt/mod.rs +++ b/tokio/src/runtime/scheduler/multi_thread_alt/mod.rs @@ -49,7 +49,6 @@ impl MultiThread { driver_handle: driver::Handle, blocking_spawner: blocking::Spawner, seed_generator: RngSeedGenerator, - spawn_concurrency_level: usize, config: Config, ) -> (MultiThread, runtime::Handle) { let handle = worker::create( @@ -58,7 +57,6 @@ impl MultiThread { driver_handle, blocking_spawner, seed_generator, - spawn_concurrency_level, config, ); diff --git a/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs b/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs index 6db95a935d0..8d16418a80b 100644 --- a/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs +++ b/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs @@ -259,7 +259,6 @@ pub(super) fn create( driver_handle: driver::Handle, blocking_spawner: blocking::Spawner, seed_generator: RngSeedGenerator, - spawn_concurrency_level: usize, config: Config, ) -> runtime::Handle { let mut num_workers = num_cores; @@ -308,7 +307,7 @@ pub(super) fn create( remotes: remotes.into_boxed_slice(), inject, idle, - owned: OwnedTasks::new(spawn_concurrency_level as u32), + owned: OwnedTasks::new(num_cores), synced: Mutex::new(Synced { assigned_cores: (0..num_workers).map(|_| None).collect(), shutdown_cores: Vec::with_capacity(num_cores), diff --git a/tokio/src/runtime/task/list.rs b/tokio/src/runtime/task/list.rs index 44873540986..b46deb9e909 100644 --- a/tokio/src/runtime/task/list.rs +++ b/tokio/src/runtime/task/list.rs @@ -68,15 +68,17 @@ pub(crate) struct LocalOwnedTasks { pub(crate) id: NonZeroU64, _not_send_or_sync: PhantomData<*const ()>, } + struct OwnedTasksInner { list: LinkedList, as Link>::Target>, closed: bool, } impl OwnedTasks { - pub(crate) fn new(concurrency_level: u32) -> Self { + pub(crate) fn new(num_cores: usize) -> Self { + let shard_size = Self::gen_shared_list_size(num_cores); Self { - list: List::new(concurrency_level as usize), + list: List::new(shard_size), closed: AtomicBool::new(false), id: get_next_id(), } @@ -183,6 +185,22 @@ impl OwnedTasks { pub(crate) fn is_empty(&self) -> bool { self.list.is_empty() } + + fn gen_shared_list_size(num_cores: usize) -> usize { + // Shrink the size of spawn_concurrency_level when using loom. This shouldn't impact + // logic, but allows loom to test more edge cases in a reasoable a mount of time. + #[cfg(loom)] + return 4; + #[cfg(not(loom))] + { + const MAX_SHARED_LIST_SIZE: usize = 1 << 16; + let mut size = 1; + while size / 4 < num_cores && size < MAX_SHARED_LIST_SIZE { + size <<= 1; + } + size.min(MAX_SHARED_LIST_SIZE) + } + } } cfg_taskdump! {