Skip to content

Commit

Permalink
Protect the hashmaps outside of rebuilds.
Browse files Browse the repository at this point in the history
MozReview-Commit-ID: KACmfw4pZY2
  • Loading branch information
bholley committed Sep 28, 2017
1 parent ef04289 commit 039fe17
Show file tree
Hide file tree
Showing 3 changed files with 81 additions and 2 deletions.
6 changes: 6 additions & 0 deletions components/hashglobe/src/hash_map.rs
Expand Up @@ -1027,6 +1027,12 @@ impl<K, V, S> HashMap<K, V, S>
self.table.size()
}

/// Access to the raw buffer backing this hashmap.
pub fn raw_buffer(&self) -> (*const (), usize) {
assert!(self.raw_capacity() != 0);
self.table.raw_buffer()
}

/// Returns true if the map contains no elements.
///
/// # Examples
Expand Down
59 changes: 57 additions & 2 deletions components/hashglobe/src/protected.rs
Expand Up @@ -25,12 +25,14 @@ impl<K: Hash + Eq, V, S: BuildHasher> ProtectedHashMap<K, V, S>
#[inline(always)]
pub fn begin_mutation(&mut self) {
assert!(self.readonly);
self.unprotect();
self.readonly = false;
}

#[inline(always)]
pub fn end_mutation(&mut self) {
assert!(!self.readonly);
self.protect();
self.readonly = true;
}

Expand Down Expand Up @@ -130,6 +132,36 @@ impl<K: Hash + Eq, V, S: BuildHasher> ProtectedHashMap<K, V, S>
self.map.clear();
self.end_mutation();
}

fn protect(&mut self) {
if self.map.capacity() == 0 {
return;
}
let buff = self.map.raw_buffer();
if buff.0 as usize % ::SYSTEM_PAGE_SIZE.load(::std::sync::atomic::Ordering::Relaxed) != 0 {
// Safely handle weird allocators like ASAN that return
// non-page-aligned buffers to page-sized allocations.
return;
}
unsafe {
Gecko_ProtectBuffer(buff.0 as *mut _, buff.1);
}
}

fn unprotect(&mut self) {
if self.map.capacity() == 0 {
return;
}
let buff = self.map.raw_buffer();
if buff.0 as usize % ::SYSTEM_PAGE_SIZE.load(::std::sync::atomic::Ordering::Relaxed) != 0 {
// Safely handle weird allocators like ASAN that return
// non-page-aligned buffers to page-sized allocations.
return;
}
unsafe {
Gecko_UnprotectBuffer(buff.0 as *mut _, buff.1);
}
}
}

impl<K, V> ProtectedHashMap<K, V, RandomState>
Expand All @@ -143,10 +175,12 @@ impl<K, V> ProtectedHashMap<K, V, RandomState>
}

pub fn with_capacity(capacity: usize) -> Self {
Self {
let mut result = Self {
map: HashMap::with_capacity(capacity),
readonly: true,
}
};
result.protect();
result
}
}

Expand Down Expand Up @@ -178,3 +212,24 @@ impl<K, V, S> Default for ProtectedHashMap<K, V, S>
}
}
}

impl<K: Hash + Eq, V, S: BuildHasher> Drop for ProtectedHashMap<K, V, S>
where K: Eq + Hash,
S: BuildHasher
{
fn drop(&mut self) {
debug_assert!(self.readonly, "Dropped while mutating");
self.unprotect();
}
}

// Manually declare the FFI functions since we don't depend on the crate with
// the bindings.
extern "C" {
pub fn Gecko_ProtectBuffer(buffer: *mut ::std::os::raw::c_void,
size: usize);
}
extern "C" {
pub fn Gecko_UnprotectBuffer(buffer: *mut ::std::os::raw::c_void,
size: usize);
}
18 changes: 18 additions & 0 deletions components/hashglobe/src/table.rs
Expand Up @@ -813,6 +813,24 @@ impl<K, V> RawTable<K, V> {
}
}

/// Access to the raw buffer backing this table.
pub fn raw_buffer(&self) -> (*const (), usize) {
debug_assert!(self.capacity() != 0);

let buffer = self.hashes.ptr() as *const ();
let size = {
let hashes_size = self.capacity() * size_of::<HashUint>();
let pairs_size = self.capacity() * size_of::<(K, V)>();
let (_, _, size, _) = calculate_allocation(hashes_size,
align_of::<HashUint>(),
pairs_size,
align_of::<(K, V)>());
round_up_to_page_size(size)
};
(buffer, size)
}


/// Creates a new raw table from a given capacity. All buckets are
/// initially empty.
pub fn new(capacity: usize) -> Result<RawTable<K, V>, FailedAllocationError> {
Expand Down

0 comments on commit 039fe17

Please sign in to comment.