Permalink
Browse files

Porting malloc() and __malloc0().

  • Loading branch information...
anp committed May 12, 2016
1 parent 6939fb3 commit 7161d26b7414e123e226d63672705daeac7476c1
Showing with 102 additions and 66 deletions.
  1. +4 −62 musl/src/malloc/malloc.c
  2. +98 −4 src/malloc/malloc.rs
@@ -84,7 +84,7 @@ void unlock_bin(int i)
unlock(mal.bins[i].lock);
}
static int first_set(uint64_t x)
int first_set(uint64_t x)
{
return a_ctz_64(x);
}
@@ -97,7 +97,7 @@ int bin_index(size_t x)
return ((union { float v; uint32_t r; }){(int)x}.r>>21) - 496;
}
static int bin_index_up(size_t x)
int bin_index_up(size_t x)
{
x = x / SIZE_ALIGN - 1;
if (x <= 32) return x;
@@ -112,66 +112,8 @@ int alloc_fwd(struct chunk *c);
int alloc_rev(struct chunk *c);
int pretrim(struct chunk *self, size_t n, int i, int j);
void trim(struct chunk *self, size_t n);
void *malloc(size_t n)
{
struct chunk *c;
int i, j;
if (adjust_size(&n) < 0) return 0;
if (n > MMAP_THRESHOLD) {
size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE;
char *base = __mmap(0, len, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if (base == (void *)-1) return 0;
c = (void *)(base + SIZE_ALIGN - OVERHEAD);
c->csize = len - (SIZE_ALIGN - OVERHEAD);
c->psize = SIZE_ALIGN - OVERHEAD;
return CHUNK_TO_MEM(c);
}
i = bin_index_up(n);
for (;;) {
uint64_t mask = mal.binmap & -(1ULL<<i);
if (!mask) {
c = expand_heap(n);
if (!c) return 0;
if (alloc_rev(c)) {
struct chunk *x = c;
c = PREV_CHUNK(c);
NEXT_CHUNK(x)->psize = c->csize =
x->csize + CHUNK_SIZE(c);
}
break;
}
j = first_set(mask);
lock_bin(j);
c = mal.bins[j].head;
if (c != BIN_TO_CHUNK(j)) {
if (!pretrim(c, n, i, j)) unbin(c, j);
unlock_bin(j);
break;
}
unlock_bin(j);
}
/* Now patch up in case we over-allocated */
trim(c, n);
return CHUNK_TO_MEM(c);
}
void *__malloc0(size_t n)
{
void *p = malloc(n);
if (p && !IS_MMAPPED(MEM_TO_CHUNK(p))) {
size_t *z;
n = (n + sizeof *z - 1)/sizeof *z;
for (z=p; n; n--, z++) if (*z) *z=0;
}
return p;
}
void *malloc(size_t n);
void *__malloc0(size_t n);
void *realloc(void *p, size_t n)
{
@@ -1,10 +1,12 @@
use core::isize;
use core::mem::size_of;
use spin::Mutex;
use c_types::*;
use errno::{set_errno, ENOMEM};
use malloc::expand_heap::__expand_heap;
use mmap::__mmap;
use platform::atomic::a_and_64;
use platform::malloc::*;
use platform::mman::*;
@@ -13,19 +15,22 @@ pub const MMAP_THRESHOLD: usize = 0x1c00 * SIZE_ALIGN;
pub const DONTCARE: usize = 16;
pub const RECLAIM: usize = 163_840;
#[repr(C)]
pub struct chunk {
psize: usize,
csize: usize,
next: *mut chunk,
prev: *mut chunk,
}
#[repr(C)]
pub struct bin {
lock: [c_int; 2],
head: *mut chunk,
tail: *mut chunk,
}
#[repr(C)]
pub struct mal {
binmap: u64,
bins: [bin; 64],
@@ -42,10 +47,87 @@ extern "C" {
fn bin_index(s: usize) -> c_int;
fn bin_index_up(x: usize) -> c_int;
fn malloc(n: usize) -> *mut c_void;
fn __malloc0(n: usize) -> *mut c_void;
fn realloc(p: *mut c_void, n: usize) -> *mut c_void;
fn free(p: *mut c_void);
fn memset(d: *mut c_void, c: c_int, n: usize) -> *mut c_void;
}
#[no_mangle]
pub unsafe extern "C" fn malloc(mut n: usize) -> *mut c_void {
let mut c: *mut chunk;
if adjust_size(&mut n as *mut usize) < 0 {
return 0 as *mut c_void;
}
if n > MMAP_THRESHOLD {
let len = n + OVERHEAD + PAGE_SIZE as usize - 1 & (-PAGE_SIZE) as usize;
let base = __mmap(0 as *mut c_void,
len,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
-1,
0) as *mut u8;
if base == ((-1isize) as usize) as *mut u8 {
return 0 as *mut c_void;
}
c = base.offset((SIZE_ALIGN - OVERHEAD) as isize) as *mut chunk;
(*c).csize = len - (SIZE_ALIGN - OVERHEAD);
(*c).psize = SIZE_ALIGN - OVERHEAD;
return chunk_to_mem(c);
}
let i = bin_index_up(n);
loop {
let mask = mal.binmap & (-((1usize << i) as isize)) as u64;
if mask == 0 {
c = expand_heap(n);
if c == 0 as *mut chunk { return 0 as *mut c_void; }
if alloc_rev(c) != 0 {
let x = c;
c = previous_chunk(c);
let new = (*x).csize + chunk_size(c);
(*c).csize = new;
(*next_chunk(x)).psize = new;
}
break;
}
let j = first_set(mask);
lock_bin(j);
c = mal.bins[j as usize].head;
if c != bin_to_chunk(j as usize) {
if pretrim(c, n, i, j) == 0 {
unbin(c, j);
}
unlock_bin(j);
break;
}
unlock_bin(j);
}
// Now patch up in case we over-allocated
trim(c, n);
chunk_to_mem(c)
}
#[no_mangle]
pub unsafe extern "C" fn __malloc0(n: usize) -> *mut c_void {
let p = malloc(n);
if p as usize != 0 && !is_mmapped(mem_to_chunk(p)) {
for i in 0..n {
*(p as *mut u8).offset(i as isize) = 0;
}
}
p
}
#[no_mangle]
@@ -136,7 +218,9 @@ pub unsafe extern "C" fn pretrim(s: *mut chunk, n: usize, i: c_int, j: c_int) ->
return 0;
}
if bin_index(n1-n) != j { return 0; }
if bin_index(n1 - n) != j {
return 0;
}
let next = next_chunk(s);
let split = (s as *mut u8).offset(n as isize) as *mut chunk;
@@ -159,7 +243,9 @@ pub unsafe extern "C" fn pretrim(s: *mut chunk, n: usize, i: c_int, j: c_int) ->
pub unsafe extern "C" fn trim(s: *mut chunk, n: usize) {
let n1 = chunk_size(s);
if n >= n1 - DONTCARE { return; }
if n >= n1 - DONTCARE {
return;
}
let next = next_chunk(s);
let split = (s as *mut u8).offset(n as isize) as *mut chunk;
@@ -226,6 +312,10 @@ unsafe fn chunk_to_mem(c: *mut chunk) -> *mut c_void {
(c as *mut u8).offset(OVERHEAD as isize) as *mut c_void
}
unsafe fn bin_to_chunk(i: usize) -> *mut chunk {
mem_to_chunk(mal.bins[i].head as *mut c_void)
}
unsafe fn chunk_size(c: *mut chunk) -> usize { (*c).csize & ((-2i64) as usize) }
unsafe fn chunk_psize(c: *mut chunk) -> usize { (*c).psize & ((-2i64) as usize) }
@@ -237,3 +327,7 @@ unsafe fn previous_chunk(c: *mut chunk) -> *mut chunk {
unsafe fn next_chunk(c: *mut chunk) -> *mut chunk {
(c as *mut u8).offset(chunk_size(c) as isize) as *mut chunk
}
unsafe fn is_mmapped(c: *mut chunk) -> bool {
((*c).csize & 1) == 0
}

0 comments on commit 7161d26

Please sign in to comment.