Skip to content

Commit

Permalink
Implement a proper bump allocator
Browse files Browse the repository at this point in the history
This commit introduces a proper thread-safe bump allocator to
bh_alloc. The previous allocator is now moved to the `fuzz`
sub-module. This work was inspired by a Reddit
[conversation](https://www.reddit.com/r/rust/comments/9twam5/jemalloc_was_just_removed_from_the_standard/e902qnh/). I
felt pretty bad that my poor fuzzy allocator got held up to
demonstrate a general purpose bump allocator. :)

Anyway, the only dicey bit here is the AtomicUsize. I haven't done any
stress testing on an ARM machine, which should be done. x86 has
stricter memory ordering, hiding bugs that only turn up under ARM.

Signed-off-by: Brian L. Troutwine <brian@troutwine.us>
  • Loading branch information
blt committed Nov 5, 2018
1 parent 7767f60 commit 8810756
Show file tree
Hide file tree
Showing 5 changed files with 139 additions and 22 deletions.
5 changes: 4 additions & 1 deletion Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "bh_alloc"
version = "0.1.1"
version = "0.2.0"
authors = ["Brian L. Troutwine <brian@troutwine.us>"]
keywords = ["fuzzing", "allocator"]
categories = ["memory-management"]
Expand All @@ -12,6 +12,9 @@ repository = "https://github.com/blt/bh_alloc"
[dependencies]
libc = "0.2"

[dev-dependencies]
quickcheck = "0.7"

[[bin]]
path = "src/bin/hello_world.rs"
name = "hello_world"
Expand Down
2 changes: 1 addition & 1 deletion src/bin/alloc_and_destroy.rs
@@ -1,7 +1,7 @@
extern crate bh_alloc;

#[global_allocator]
static ALLOC: bh_alloc::BumpAlloc = bh_alloc::BumpAlloc::INIT;
static ALLOC: bh_alloc::fuzz::BumpAlloc = bh_alloc::fuzz::BumpAlloc::INIT;

fn main() {
for i in 0..=bh_alloc::TOTAL_BYTES {
Expand Down
3 changes: 3 additions & 0 deletions src/bin/hello_world.rs
Expand Up @@ -5,4 +5,7 @@ static ALLOC: bh_alloc::BumpAlloc = bh_alloc::BumpAlloc::INIT;

fn main() {
println!("Hello, world!");
for i in 0..1_000_000 {
println!("{}", i);
}
}
72 changes: 72 additions & 0 deletions src/fuzz/mod.rs
@@ -0,0 +1,72 @@
//! Allocators suitable for fuzzing environments
//!
//! The allocators available in this sub-crate are intended to be used in
//! fuzzing targets. The number of branches are kept intentionally low and
//! suitability for threaded environments are a non-priority.

extern crate libc;

use self::libc::{_exit, EXIT_SUCCESS};

use std::alloc::{GlobalAlloc, Layout};
use std::cell::UnsafeCell;

/// Total number of bytes that [`BumpAlloc`] will have available to it.
pub const TOTAL_BYTES: usize = 500_000_000; // 500 MB
static mut HEAP: [u8; TOTAL_BYTES] = [0; TOTAL_BYTES];

/// Bump allocator for *single* core systems
///
/// A bump allocator keeps a single pointer to the start of the unitialized
/// heap. When an allocation happens this pointer is 'bumped' sufficiently to
/// fit the allocation. Deallocations have no effect on the pointer, meaning
/// that memory is allocated at program start and never freed. This is very
/// fast.
///
/// BumpAlloc has an additional feature. When all its heap memory is exhausted
/// `libc::_exit(EXIT_SUCCESS)` is called. This behaviour aids in the production
/// of fuzzers.
pub struct BumpAlloc {
offset: UnsafeCell<usize>,
}

unsafe impl Sync for BumpAlloc {}

// thanks, wee_alloc
trait ConstInit {
const INIT: Self;
}

impl ConstInit for BumpAlloc {
const INIT: BumpAlloc = BumpAlloc {
offset: UnsafeCell::new(0),
};
}

impl BumpAlloc {
/// Initialization for [`BumpAlloc`]
///
/// See the binaries in this repository for full examples.
pub const INIT: Self = <Self as ConstInit>::INIT;
}

unsafe impl GlobalAlloc for BumpAlloc {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let offset = self.offset.get();
let byte_size: usize = layout.size() as usize;

let end = *offset + byte_size;

if end >= TOTAL_BYTES {
_exit(EXIT_SUCCESS);
} else {
let p = HEAP[*offset..end].as_mut_ptr() as *mut u8;
*offset = end;
p
}
}

unsafe fn dealloc(&self, _: *mut u8, _: Layout) {
// never deallocate
}
}
79 changes: 59 additions & 20 deletions src/lib.rs
Expand Up @@ -6,6 +6,9 @@
#![cfg_attr(feature = "cargo-clippy", allow(clippy::style))]
#![cfg_attr(feature = "cargo-clippy", feature(tool_lints))]

#[cfg(test)]
extern crate quickcheck;

#[deny(bad_style)]
#[deny(future_incompatible)]
#[deny(missing_docs)]
Expand All @@ -14,30 +17,25 @@
#[deny(rust_2018_idioms)]
#[deny(unused)]
#[deny(warnings)]
extern crate libc;

use libc::{_exit, EXIT_SUCCESS};
pub mod fuzz;

use std::alloc::{GlobalAlloc, Layout};
use std::cell::UnsafeCell;
use std::ptr;
use std::sync::atomic::{AtomicUsize, Ordering};

/// Total number of bytes that [`BumpAlloc`] will have available to it.
pub const TOTAL_BYTES: usize = 500_000_000; // 500 MB
static mut HEAP: [u8; TOTAL_BYTES] = [0; TOTAL_BYTES];

/// Bump allocator for *single* core systems
/// Bump allocator for multi-core systems
///
/// A bump allocator keeps a single pointer to the start of the unitialized
/// heap. When an allocation happens this pointer is 'bumped' sufficiently to
/// fit the allocation. Deallocations have no effect on the pointer, meaning
/// that memory is allocated at program start and never freed. This is very
/// fast.
///
/// BumpAlloc has an additional feature. When all its heap memory is exhausted
/// `libc::_exit(EXIT_SUCCESS)` is called. This behaviour aids in the production
/// of fuzzers.
pub struct BumpAlloc {
offset: UnsafeCell<usize>,
offset: AtomicUsize,
}

unsafe impl Sync for BumpAlloc {}
Expand All @@ -49,31 +47,72 @@ trait ConstInit {

impl ConstInit for BumpAlloc {
const INIT: BumpAlloc = BumpAlloc {
offset: UnsafeCell::new(0),
offset: AtomicUsize::new(0),
};
}

impl BumpAlloc {
pub const INIT: Self = <Self as ConstInit>::INIT;
}

fn round_to_multiple_of(val: usize, align: usize) -> usize {
if align == 0 {
return val;
}

let rem = val % align;
if rem == 0 {
val
} else {
(val + align) - rem
}
}

unsafe impl GlobalAlloc for BumpAlloc {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let offset = self.offset.get();
let byte_size: usize = layout.size() as usize;
let alignment = layout.align();
let byte_size: usize = round_to_multiple_of(layout.size() as usize, alignment);

let end = *offset + byte_size;
let mut offset = self.offset.load(Ordering::Relaxed);
loop {
let end = offset + byte_size;

if end >= TOTAL_BYTES {
_exit(EXIT_SUCCESS);
} else {
let p = HEAP[*offset..end].as_mut_ptr() as *mut u8;
*offset = end;
p
if end >= TOTAL_BYTES {
return ptr::null_mut();
} else {
match self.offset.compare_exchange_weak(
offset,
end,
Ordering::Acquire,
Ordering::Relaxed,
) {
Ok(_) => {
return HEAP[offset..end].as_mut_ptr() as *mut u8;
}
Err(cur) => {
offset = cur;
}
}
}
}
}

unsafe fn dealloc(&self, _: *mut u8, _: Layout) {
// never deallocate
}
}

#[cfg(test)]
mod test {
use super::*;
use quickcheck::{QuickCheck, TestResult};

#[test]
fn round_up_test_always_greater_equal() {
fn inner(val: usize, align: usize) -> TestResult {
let ret = round_to_multiple_of(val, align);
TestResult::from_bool(ret >= val)
}
QuickCheck::new().quickcheck(inner as fn(usize, usize) -> TestResult);
}
}

0 comments on commit 8810756

Please sign in to comment.