Skip to content

Commit

Permalink
Fix mimalloc formatting
Browse files Browse the repository at this point in the history
Summary:

Test Plan:

Reviewers:

Subscribers:

Tasks:

Tags:
  • Loading branch information
DinoV committed Sep 27, 2023
1 parent 18c7961 commit 41b62cb
Show file tree
Hide file tree
Showing 20 changed files with 239 additions and 242 deletions.
14 changes: 7 additions & 7 deletions Include/mimalloc/mimalloc.h
Expand Up @@ -340,18 +340,18 @@ typedef enum mi_option_e {
mi_option_deprecated_segment_cache,
mi_option_deprecated_page_reset,
mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination
mi_option_deprecated_segment_reset,
mi_option_eager_commit_delay,
mi_option_deprecated_segment_reset,
mi_option_eager_commit_delay,
mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all.
mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes.
mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas)
mi_option_os_tag, // tag used for OS logging (macOS only for now)
mi_option_max_errors, // issue at most N error messages
mi_option_max_warnings, // issue at most N warning messages
mi_option_max_segment_reclaim,
mi_option_max_segment_reclaim,
mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe.
mi_option_arena_reserve, // initial memory size in KiB for arena reservation (1GiB on 64-bit)
mi_option_arena_purge_mult,
mi_option_arena_purge_mult,
mi_option_purge_extend_delay,
_mi_option_last,
// legacy option names
Expand Down Expand Up @@ -521,7 +521,7 @@ template<class T, bool _mi_destroy> struct _mi_heap_stl_allocator_common : publi
protected:
std::shared_ptr<mi_heap_t> heap;
template<class U, bool D> friend struct _mi_heap_stl_allocator_common;

_mi_heap_stl_allocator_common() {
mi_heap_t* hp = mi_heap_new();
this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */
Expand All @@ -538,7 +538,7 @@ template<class T, bool _mi_destroy> struct _mi_heap_stl_allocator_common : publi
template<class T> struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common<T, false> {
using typename _mi_heap_stl_allocator_common<T, false>::size_type;
mi_heap_stl_allocator() : _mi_heap_stl_allocator_common<T, false>() { } // creates fresh heap that is deleted when the destructor is called
mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, false>(hp) { } // no delete nor destroy on the passed in heap
mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, false>(hp) { } // no delete nor destroy on the passed in heap
template<class U> mi_heap_stl_allocator(const mi_heap_stl_allocator<U>& x) mi_attr_noexcept : _mi_heap_stl_allocator_common<T, false>(x) { }

mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; }
Expand All @@ -555,7 +555,7 @@ template<class T1, class T2> bool operator!=(const mi_heap_stl_allocator<T1>& x,
template<class T> struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common<T, true> {
using typename _mi_heap_stl_allocator_common<T, true>::size_type;
mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common<T, true>() { } // creates fresh heap that is destroyed when the destructor is called
mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, true>(hp) { } // no delete nor destroy on the passed in heap
mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, true>(hp) { } // no delete nor destroy on the passed in heap
template<class U> mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator<U>& x) mi_attr_noexcept : _mi_heap_stl_allocator_common<T, true>(x) { }

mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; }
Expand Down
2 changes: 1 addition & 1 deletion Include/mimalloc/mimalloc/atomic.h
Expand Up @@ -300,7 +300,7 @@ typedef _Atomic(uintptr_t) mi_atomic_once_t;

// Returns true only on the first invocation
static inline bool mi_atomic_once( mi_atomic_once_t* once ) {
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
uintptr_t expected = 0;
return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1
}
Expand Down
2 changes: 1 addition & 1 deletion Include/mimalloc/mimalloc/internal.h
Expand Up @@ -89,7 +89,7 @@ void _mi_thread_abandon(mi_tld_t *tld);

// os.c
void _mi_os_init(void); // called from process init
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats);
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats);

Expand Down
14 changes: 7 additions & 7 deletions Include/mimalloc/mimalloc/prim.h
Expand Up @@ -35,10 +35,10 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config );

// Free OS memory
int _mi_prim_free(void* addr, size_t size );

// Allocate OS memory. Return NULL on error.
// The `try_alignment` is just a hint and the returned pointer does not have to be aligned.
// If `commit` is false, the virtual memory range only needs to be reserved (with no access)
// If `commit` is false, the virtual memory range only needs to be reserved (with no access)
// which will later be committed explicitly using `_mi_prim_commit`.
// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
// pre: !commit => !allow_large
Expand Down Expand Up @@ -82,11 +82,11 @@ mi_msecs_t _mi_prim_clock_now(void);
typedef struct mi_process_info_s {
mi_msecs_t elapsed;
mi_msecs_t utime;
mi_msecs_t stime;
size_t current_rss;
size_t peak_rss;
mi_msecs_t stime;
size_t current_rss;
size_t peak_rss;
size_t current_commit;
size_t peak_commit;
size_t peak_commit;
size_t page_faults;
} mi_process_info_t;

Expand Down Expand Up @@ -117,7 +117,7 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap);

//-------------------------------------------------------------------
// Thread id: `_mi_prim_thread_id()`
//
//
// Getting the thread id should be performant as it is called in the
// fast path of `_mi_free` and we specialize for various platforms as
// inlined definitions. Regular code should call `init.c:_mi_thread_id()`.
Expand Down
4 changes: 2 additions & 2 deletions Include/mimalloc/mimalloc/track.h
Expand Up @@ -34,7 +34,7 @@ The corresponding `mi_track_free` still uses the block start pointer and origina
The `mi_track_resize` is currently unused but could be called on reallocations within a block.
`mi_track_init` is called at program start.
The following macros are for tools like asan and valgrind to track whether memory is
The following macros are for tools like asan and valgrind to track whether memory is
defined, undefined, or not accessible at all:
#define mi_track_mem_defined(p,size)
Expand Down Expand Up @@ -94,7 +94,7 @@ defined, undefined, or not accessible at all:
// no tracking

#define MI_TRACK_ENABLED 0
#define MI_TRACK_HEAP_DESTROY 0
#define MI_TRACK_HEAP_DESTROY 0
#define MI_TRACK_TOOL "none"

#define mi_track_malloc_size(p,reqsize,size,zero)
Expand Down
22 changes: 11 additions & 11 deletions Include/mimalloc/mimalloc/types.h
Expand Up @@ -183,7 +183,7 @@ typedef int32_t mi_ssize_t;

#define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/4) // 8KiB on 64-bit
#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128KiB on 64-bit
#define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
#define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
#define MI_LARGE_OBJ_SIZE_MAX (MI_SEGMENT_SIZE/2) // 32MiB on 64-bit
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)

Expand All @@ -201,10 +201,10 @@ typedef int32_t mi_ssize_t;
#define MI_HUGE_BLOCK_SIZE ((uint32_t)(2*MI_GiB))

// blocks up to this size are always allocated aligned
#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)

// Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments
#define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
// Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments
#define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)


// ------------------------------------------------------
Expand Down Expand Up @@ -293,7 +293,7 @@ typedef uintptr_t mi_thread_free_t;
typedef struct mi_page_s {
// "owned" by the segment
uint32_t slice_count; // slices in this page (0 if not a page)
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
uint8_t is_committed : 1; // `true` if the page virtual memory is committed
uint8_t is_zero_init : 1; // `true` if the page was initially zero initialized
uint8_t tag : 4; // heap tag (mi_heap_tag_t)
Expand Down Expand Up @@ -349,17 +349,17 @@ typedef enum mi_segment_kind_e {
// A segment holds a commit mask where a bit is set if
// the corresponding MI_COMMIT_SIZE area is committed.
// The MI_COMMIT_SIZE must be a multiple of the slice
// size. If it is equal we have the most fine grained
// size. If it is equal we have the most fine grained
// decommit (but setting it higher can be more efficient).
// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will
// be committed in one go which can be set higher than
// MI_COMMIT_SIZE for efficiency (while the decommit mask
// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
// ------------------------------------------------------

#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS
#define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS)

Expand Down Expand Up @@ -432,11 +432,11 @@ typedef struct mi_segment_s {

// from here is zero initialized
struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)

size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long)
size_t used; // count of pages in use
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`

size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
size_t segment_info_slices; // initial slices we are using segment info and possible guard pages.
Expand Down Expand Up @@ -507,7 +507,7 @@ struct mi_heap_s {
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
_Atomic(mi_block_t*) thread_delayed_free;
mi_threadid_t thread_id; // thread this heap belongs too
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list
mi_random_ctx_t random; // random number context used for secure allocation
Expand Down
6 changes: 3 additions & 3 deletions Objects/mimalloc/alloc-aligned.c
Expand Up @@ -47,7 +47,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block
// zero afterwards as only the area from the aligned_p may be committed!
if (p == NULL) return NULL;
if (p == NULL) return NULL;
}
else {
// otherwise over-allocate
Expand All @@ -73,7 +73,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
mi_assert_internal(mi_usable_size(aligned_p)>=size);
mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);

// now zero the block if needed
if (alignment > MI_ALIGNMENT_MAX) {
// for the tracker, on huge aligned allocations only from the start of the large block is defined
Expand All @@ -85,7 +85,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*

if (p != aligned_p) {
mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p));
}
}
return aligned_p;
}

Expand Down
14 changes: 7 additions & 7 deletions Objects/mimalloc/alloc.c
Expand Up @@ -70,7 +70,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
}
else {
_mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE);
}
}
}

#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
Expand Down Expand Up @@ -126,7 +126,7 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
if (size == 0) { size = sizeof(void*); }
#endif
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
mi_track_malloc(p,size,zero);
#if MI_STAT>1
if (p != NULL) {
Expand Down Expand Up @@ -359,15 +359,15 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
// only maintain stats for smaller objects if requested
#if (MI_STAT>0)
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
#if (MI_STAT < 2)
#if (MI_STAT < 2)
MI_UNUSED(block);
#endif
mi_heap_t* const heap = mi_heap_get_default();
const size_t bsize = mi_page_usable_block_size(page);
#if (MI_STAT>1)
const size_t usize = mi_page_usable_size_of(page, block);
mi_heap_stat_decrease(heap, malloc, usize);
#endif
#endif
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
mi_heap_stat_decrease(heap, normal, bsize);
#if (MI_STAT > 1)
Expand All @@ -379,7 +379,7 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
}
else {
mi_heap_stat_decrease(heap, huge, bsize);
}
}
}
#else
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
Expand Down Expand Up @@ -418,7 +418,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
mi_check_padding(page, block);
_mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection

// huge page segments are always abandoned and can be freed immediately
mi_segment_t* segment = _mi_page_segment(page);
if (segment->kind == MI_SEGMENT_HUGE) {
Expand All @@ -434,7 +434,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
_mi_segment_huge_page_reset(segment, page, block);
#endif
}

#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
if (segment->kind != MI_SEGMENT_HUGE) { // not for huge segments as we just reset the content
_mi_debug_fill(page, block, MI_DEBUG_FREED, mi_usable_size(block));
Expand Down

0 comments on commit 41b62cb

Please sign in to comment.