Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
style: Shutdown Servo's thread-pool in leak-checking builds, leak the…
… atom table elsewhere.

Differential Revision: https://phabricator.services.mozilla.com/D44217
  • Loading branch information
emilio committed Sep 12, 2019
1 parent d54d1bc commit 9eaadc6
Show file tree
Hide file tree
Showing 4 changed files with 70 additions and 18 deletions.
6 changes: 2 additions & 4 deletions components/selectors/bloom.rs
Expand Up @@ -64,7 +64,7 @@ pub type BloomFilter = CountingBloomFilter<BloomStorageU8>;
/// Similarly, using a KeySize of 10 would lead to a 4% false
/// positive rate for N == 100 and to quite bad false positive
/// rates for larger N.
#[derive(Clone)]
#[derive(Clone, Default)]
pub struct CountingBloomFilter<S>
where
S: BloomStorage,
Expand All @@ -79,9 +79,7 @@ where
/// Creates a new bloom filter.
#[inline]
pub fn new() -> Self {
CountingBloomFilter {
storage: Default::default(),
}
Default::default()
}

#[inline]
Expand Down
15 changes: 11 additions & 4 deletions components/style/bloom.rs
Expand Up @@ -13,13 +13,20 @@ use owning_ref::OwningHandle;
use selectors::bloom::BloomFilter;
use servo_arc::Arc;
use smallvec::SmallVec;
use std::mem::ManuallyDrop;

thread_local! {
/// Bloom filters are large allocations, so we store them in thread-local storage
/// such that they can be reused across style traversals. StyleBloom is responsible
/// for ensuring that the bloom filter is zeroed when it is dropped.
static BLOOM_KEY: Arc<AtomicRefCell<BloomFilter>> =
Arc::new_leaked(AtomicRefCell::new(BloomFilter::new()));
///
/// We intentionally leak this from TLS because we don't have the guarantee
/// of TLS destructors to run in worker threads.
///
/// We could change this once https://github.com/rayon-rs/rayon/issues/688
/// is fixed, hopefully.
static BLOOM_KEY: ManuallyDrop<Arc<AtomicRefCell<BloomFilter>>> =
ManuallyDrop::new(Arc::new_leaked(Default::default()));
}

/// A struct that allows us to fast-reject deep descendant selectors avoiding
Expand Down Expand Up @@ -128,15 +135,15 @@ impl<E: TElement> StyleBloom<E> {
// See https://github.com/servo/servo/pull/18420#issuecomment-328769322
#[inline(never)]
pub fn new() -> Self {
let bloom_arc = BLOOM_KEY.with(|b| b.clone());
let bloom_arc = BLOOM_KEY.with(|b| Arc::clone(&*b));
let filter =
OwningHandle::new_with_fn(bloom_arc, |x| unsafe { x.as_ref() }.unwrap().borrow_mut());
debug_assert!(
filter.is_zeroed(),
"Forgot to zero the bloom filter last time"
);
StyleBloom {
filter: filter,
filter,
elements: Default::default(),
pushed_hashes: Default::default(),
}
Expand Down
56 changes: 52 additions & 4 deletions components/style/global_style_data.rs
Expand Up @@ -12,6 +12,8 @@ use crate::shared_lock::SharedRwLock;
use crate::thread_state;
use rayon;
use std::env;
use std::sync::atomic::{AtomicUsize, Ordering};
use parking_lot::{RwLock, RwLockReadGuard};

/// Global style data
pub struct GlobalStyleData {
Expand All @@ -22,20 +24,28 @@ pub struct GlobalStyleData {
pub options: StyleSystemOptions,
}

/// Global thread pool
/// Global thread pool.
pub struct StyleThreadPool {
/// How many threads parallel styling can use.
pub num_threads: usize,

/// The parallel styling thread pool.
pub style_thread_pool: Option<rayon::ThreadPool>,
///
/// For leak-checking purposes, we want to terminate the thread-pool, which
/// waits for all the async jobs to complete. Thus the RwLock.
style_thread_pool: RwLock<Option<rayon::ThreadPool>>,
}

fn thread_name(index: usize) -> String {
format!("StyleThread#{}", index)
}

// A counter so that we can wait for shutdown of all threads. See
// StyleThreadPool::shutdown.
static ALIVE_WORKER_THREADS: AtomicUsize = AtomicUsize::new(0);

fn thread_startup(_index: usize) {
ALIVE_WORKER_THREADS.fetch_add(1, Ordering::Relaxed);
thread_state::initialize_layout_worker_thread();
#[cfg(feature = "gecko")]
unsafe {
Expand All @@ -55,6 +65,43 @@ fn thread_shutdown(_: usize) {
bindings::Gecko_UnregisterProfilerThread();
bindings::Gecko_SetJemallocThreadLocalArena(false);
}
ALIVE_WORKER_THREADS.fetch_sub(1, Ordering::Relaxed);
}

impl StyleThreadPool {
/// Shuts down the thread pool, waiting for all work to complete.
pub fn shutdown() {
if ALIVE_WORKER_THREADS.load(Ordering::Relaxed) == 0 {
return;
}
{
// Drop the pool.
let _ = STYLE_THREAD_POOL.style_thread_pool.write().take();
}
// Spin until all our threads are done. This will usually be pretty
// fast, as on shutdown there should be basically no threads left
// running.
//
// This still _technically_ doesn't give us the guarantee of TLS
// destructors running on the worker threads. For that we'd need help
// from rayon to properly join the threads.
//
// See https://github.com/rayon-rs/rayon/issues/688
//
// So we instead intentionally leak TLS stuff (see BLOOM_KEY and co) for
// now until that's fixed.
while ALIVE_WORKER_THREADS.load(Ordering::Relaxed) != 0 {
std::thread::yield_now();
}
}

/// Returns a reference to the thread pool.
///
/// We only really want to give read-only access to the pool, except
/// for shutdown().
pub fn pool(&self) -> RwLockReadGuard<Option<rayon::ThreadPool>> {
self.style_thread_pool.read()
}
}

lazy_static! {
Expand Down Expand Up @@ -113,10 +160,11 @@ lazy_static! {
};

StyleThreadPool {
num_threads: num_threads,
style_thread_pool: pool,
num_threads,
style_thread_pool: RwLock::new(pool),
}
};

/// Global style data
pub static ref GLOBAL_STYLE_DATA: GlobalStyleData = GlobalStyleData {
shared_lock: SharedRwLock::new_leaked(),
Expand Down
11 changes: 5 additions & 6 deletions components/style/sharing/mod.rs
Expand Up @@ -82,7 +82,7 @@ use servo_arc::Arc;
use smallbitvec::SmallBitVec;
use smallvec::SmallVec;
use std::marker::PhantomData;
use std::mem;
use std::mem::{self, ManuallyDrop};
use std::ops::Deref;
use std::ptr::NonNull;
use uluru::{Entry, LRUCache};
Expand Down Expand Up @@ -477,10 +477,9 @@ type TypelessSharingCache = SharingCacheBase<FakeCandidate>;
type StoredSharingCache = Arc<AtomicRefCell<TypelessSharingCache>>;

thread_local! {
// TODO(emilio): Looks like a few of these should just be Rc<RefCell<>> or
// something. No need for atomics in the thread-local code.
static SHARING_CACHE_KEY: StoredSharingCache =
Arc::new_leaked(AtomicRefCell::new(TypelessSharingCache::default()));
// See the comment on bloom.rs about why do we leak this.
static SHARING_CACHE_KEY: ManuallyDrop<StoredSharingCache> =
ManuallyDrop::new(Arc::new_leaked(Default::default()));
}

/// An LRU cache of the last few nodes seen, so that we can aggressively try to
Expand Down Expand Up @@ -533,7 +532,7 @@ impl<E: TElement> StyleSharingCache<E> {
mem::align_of::<SharingCache<E>>(),
mem::align_of::<TypelessSharingCache>()
);
let cache_arc = SHARING_CACHE_KEY.with(|c| c.clone());
let cache_arc = SHARING_CACHE_KEY.with(|c| Arc::clone(&*c));
let cache =
OwningHandle::new_with_fn(cache_arc, |x| unsafe { x.as_ref() }.unwrap().borrow_mut());
debug_assert!(cache.is_empty());
Expand Down

0 comments on commit 9eaadc6

Please sign in to comment.