Skip to content

Commit

Permalink
Replace tabs following #define with spaces.
Browse files Browse the repository at this point in the history
This resolves jemalloc#564.
  • Loading branch information
jasone committed Jan 20, 2017
1 parent dcf70d3 commit 190a5af
Show file tree
Hide file tree
Showing 110 changed files with 712 additions and 713 deletions.
10 changes: 5 additions & 5 deletions include/jemalloc/internal/arena_types.h
@@ -1,16 +1,16 @@
#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
#define JEMALLOC_INTERNAL_ARENA_TYPES_H

#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)

/* Maximum number of regions in one slab. */
#define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN)
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
#define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN)
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)

/* Default decay time in seconds. */
#define DECAY_TIME_DEFAULT 10
#define DECAY_TIME_DEFAULT 10
/* Number of event ticks between time checks. */
#define DECAY_NTICKS_PER_UPDATE 1000
#define DECAY_NTICKS_PER_UPDATE 1000

typedef struct arena_slab_data_s arena_slab_data_t;
typedef struct arena_bin_info_s arena_bin_info_t;
Expand Down
8 changes: 4 additions & 4 deletions include/jemalloc/internal/assert.h
Expand Up @@ -3,7 +3,7 @@
* assertion failure.
*/
#ifndef assert
#define assert(e) do { \
#define assert(e) do { \
if (unlikely(config_debug && !(e))) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
Expand All @@ -14,7 +14,7 @@
#endif

#ifndef not_reached
#define not_reached() do { \
#define not_reached() do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
Expand All @@ -26,7 +26,7 @@
#endif

#ifndef not_implemented
#define not_implemented() do { \
#define not_implemented() do { \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
Expand All @@ -36,7 +36,7 @@
#endif

#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
#define assert_not_implemented(e) do { \
if (unlikely(config_debug && !(e))) { \
not_implemented(); \
} \
Expand Down
10 changes: 5 additions & 5 deletions include/jemalloc/internal/atomic_externs.h
Expand Up @@ -2,11 +2,11 @@
#define JEMALLOC_INTERNAL_ATOMIC_EXTERNS_H

#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
#define atomic_read_u64(p) atomic_add_u64(p, 0)
#define atomic_read_u64(p) atomic_add_u64(p, 0)
#endif
#define atomic_read_u32(p) atomic_add_u32(p, 0)
#define atomic_read_p(p) atomic_add_p(p, NULL)
#define atomic_read_zu(p) atomic_add_zu(p, 0)
#define atomic_read_u(p) atomic_add_u(p, 0)
#define atomic_read_u32(p) atomic_add_u32(p, 0)
#define atomic_read_p(p) atomic_add_p(p, NULL)
#define atomic_read_zu(p) atomic_add_zu(p, 0)
#define atomic_read_u(p) atomic_add_u(p, 0)

#endif /* JEMALLOC_INTERNAL_ATOMIC_EXTERNS_H */
42 changes: 21 additions & 21 deletions include/jemalloc/internal/bitmap_types.h
Expand Up @@ -2,18 +2,18 @@
#define JEMALLOC_INTERNAL_BITMAP_TYPES_H

/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
#define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)

typedef struct bitmap_level_s bitmap_level_t;
typedef struct bitmap_info_s bitmap_info_t;
typedef unsigned long bitmap_t;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG

/* Number of bits per group. */
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)

/*
* Do some analysis on how big the bitmap is before we use a tree. For a brute
Expand All @@ -25,38 +25,38 @@ typedef unsigned long bitmap_t;
#endif

/* Number of groups required to store a given number of bits. */
#define BITMAP_BITS2GROUPS(nbits) \
#define BITMAP_BITS2GROUPS(nbits) \
(((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)

/*
* Number of groups required at a particular level for a given number of bits.
*/
#define BITMAP_GROUPS_L0(nbits) \
#define BITMAP_GROUPS_L0(nbits) \
BITMAP_BITS2GROUPS(nbits)
#define BITMAP_GROUPS_L1(nbits) \
#define BITMAP_GROUPS_L1(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
#define BITMAP_GROUPS_L2(nbits) \
#define BITMAP_GROUPS_L2(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
#define BITMAP_GROUPS_L3(nbits) \
#define BITMAP_GROUPS_L3(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS((nbits)))))
#define BITMAP_GROUPS_L4(nbits) \
#define BITMAP_GROUPS_L4(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))

/*
* Assuming the number of levels, number of groups required for a given number
* of bits.
*/
#define BITMAP_GROUPS_1_LEVEL(nbits) \
#define BITMAP_GROUPS_1_LEVEL(nbits) \
BITMAP_GROUPS_L0(nbits)
#define BITMAP_GROUPS_2_LEVEL(nbits) \
#define BITMAP_GROUPS_2_LEVEL(nbits) \
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
#define BITMAP_GROUPS_3_LEVEL(nbits) \
#define BITMAP_GROUPS_3_LEVEL(nbits) \
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
#define BITMAP_GROUPS_4_LEVEL(nbits) \
#define BITMAP_GROUPS_4_LEVEL(nbits) \
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
#define BITMAP_GROUPS_5_LEVEL(nbits) \
#define BITMAP_GROUPS_5_LEVEL(nbits) \
(BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))

/*
Expand Down Expand Up @@ -92,9 +92,9 @@ typedef unsigned long bitmap_t;
* unused trailing entries in bitmap_info_t structures; the bitmaps themselves
* are not impacted.
*/
#define BITMAP_MAX_LEVELS 5
#define BITMAP_MAX_LEVELS 5

#define BITMAP_INFO_INITIALIZER(nbits) { \
#define BITMAP_INFO_INITIALIZER(nbits) { \
/* nbits. */ \
nbits, \
/* nlevels. */ \
Expand All @@ -119,9 +119,9 @@ typedef unsigned long bitmap_t;

#else /* BITMAP_USE_TREE */

#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)

#define BITMAP_INFO_INITIALIZER(nbits) { \
#define BITMAP_INFO_INITIALIZER(nbits) { \
/* nbits. */ \
nbits, \
/* ngroups. */ \
Expand Down
6 changes: 3 additions & 3 deletions include/jemalloc/internal/ckh_types.h
Expand Up @@ -9,14 +9,14 @@ typedef void ckh_hash_t (const void *, size_t[2]);
typedef bool ckh_keycomp_t (const void *, const void *);

/* Maintain counters used to get an idea of performance. */
/* #define CKH_COUNT */
/* #define CKH_COUNT */
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
/* #define CKH_VERBOSE */
/* #define CKH_VERBOSE */

/*
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
* one bucket per L1 cache line.
*/
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)

#endif /* JEMALLOC_INTERNAL_CKH_TYPES_H */
6 changes: 3 additions & 3 deletions include/jemalloc/internal/ctl_externs.h
Expand Up @@ -13,7 +13,7 @@ void ctl_prefork(tsdn_t *tsdn);
void ctl_postfork_parent(tsdn_t *tsdn);
void ctl_postfork_child(tsdn_t *tsdn);

#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_printf( \
Expand All @@ -23,15 +23,15 @@ void ctl_postfork_child(tsdn_t *tsdn);
} \
} while (0)

#define xmallctlnametomib(name, mibp, miblenp) do { \
#define xmallctlnametomib(name, mibp, miblenp) do { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
abort(); \
} \
} while (0)

#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
Expand Down
4 changes: 2 additions & 2 deletions include/jemalloc/internal/extent_dss_types.h
Expand Up @@ -8,7 +8,7 @@ typedef enum {

dss_prec_limit = 3
} dss_prec_t;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"

#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_TYPES_H */
2 changes: 1 addition & 1 deletion include/jemalloc/internal/extent_types.h
Expand Up @@ -3,6 +3,6 @@

typedef struct extent_s extent_t;

#define EXTENT_HOOKS_INITIALIZER NULL
#define EXTENT_HOOKS_INITIALIZER NULL

#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
70 changes: 35 additions & 35 deletions include/jemalloc/internal/jemalloc_internal.h.in
@@ -1,5 +1,5 @@
#ifndef JEMALLOC_INTERNAL_H
#define JEMALLOC_INTERNAL_H
#define JEMALLOC_INTERNAL_H

#ifdef __cplusplus
extern "C" {
Expand All @@ -12,7 +12,7 @@ extern "C" {
#include <sys/ktrace.h>
#endif

#define JEMALLOC_NO_DEMANGLE
#define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
# define JEMALLOC_N(n) jet_##n
# include "jemalloc/internal/public_namespace.h"
Expand Down Expand Up @@ -166,7 +166,7 @@ static const bool have_thp =

#include "jemalloc/internal/ph.h"
#ifndef __PGI
#define RB_COMPACT
#define RB_COMPACT
#endif
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h"
Expand Down Expand Up @@ -224,34 +224,34 @@ typedef unsigned szind_t;
*
* aaaaaaaa aaaatttt tttttttt 0znnnnnn
*/
#define MALLOCX_ARENA_BITS 12
#define MALLOCX_TCACHE_BITS 12
#define MALLOCX_LG_ALIGN_BITS 6
#define MALLOCX_ARENA_SHIFT 20
#define MALLOCX_TCACHE_SHIFT 8
#define MALLOCX_ARENA_MASK \
#define MALLOCX_ARENA_BITS 12
#define MALLOCX_TCACHE_BITS 12
#define MALLOCX_LG_ALIGN_BITS 6
#define MALLOCX_ARENA_SHIFT 20
#define MALLOCX_TCACHE_SHIFT 8
#define MALLOCX_ARENA_MASK \
(((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
/* NB: Arena index bias decreases the maximum number of arenas by 1. */
#define MALLOCX_ARENA_MAX ((1 << MALLOCX_ARENA_BITS) - 2)
#define MALLOCX_TCACHE_MASK \
#define MALLOCX_ARENA_MAX ((1 << MALLOCX_ARENA_BITS) - 2)
#define MALLOCX_TCACHE_MASK \
(((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
(ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
#define MALLOCX_ALIGN_GET(flags) \
#define MALLOCX_ALIGN_GET(flags) \
(MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
#define MALLOCX_ZERO_GET(flags) \
#define MALLOCX_ZERO_GET(flags) \
((bool)(flags & MALLOCX_ZERO))

#define MALLOCX_TCACHE_GET(flags) \
#define MALLOCX_TCACHE_GET(flags) \
(((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
#define MALLOCX_ARENA_GET(flags) \
#define MALLOCX_ARENA_GET(flags) \
(((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)

/* Smallest size class to support. */
#define TINY_MIN (1U << LG_TINY_MIN)
#define TINY_MIN (1U << LG_TINY_MIN)

/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
Expand Down Expand Up @@ -312,25 +312,25 @@ typedef unsigned szind_t;
# endif
#endif

#define QUANTUM ((size_t)(1U << LG_QUANTUM))
#define QUANTUM_MASK (QUANTUM - 1)
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
#define QUANTUM_MASK (QUANTUM - 1)

/* Return the smallest quantum multiple that is >= a. */
#define QUANTUM_CEILING(a) \
#define QUANTUM_CEILING(a) \
(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)

#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
#define LONG_MASK (LONG - 1)
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
#define LONG_MASK (LONG - 1)

/* Return the smallest long multiple that is >= a. */
#define LONG_CEILING(a) \
#define LONG_CEILING(a) \
(((a) + LONG_MASK) & ~LONG_MASK)

#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
#define PTR_MASK (SIZEOF_PTR - 1)
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
#define PTR_MASK (SIZEOF_PTR - 1)

/* Return the smallest (void *) multiple that is >= a. */
#define PTR_CEILING(a) \
#define PTR_CEILING(a) \
(((a) + PTR_MASK) & ~PTR_MASK)

/*
Expand All @@ -340,24 +340,24 @@ typedef unsigned szind_t;
* CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
* only handle raw constants.
*/
#define LG_CACHELINE 6
#define CACHELINE 64
#define CACHELINE_MASK (CACHELINE - 1)
#define LG_CACHELINE 6
#define CACHELINE 64
#define CACHELINE_MASK (CACHELINE - 1)

/* Return the smallest cacheline multiple that is >= s. */
#define CACHELINE_CEILING(s) \
#define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)

/* Return the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2BASE(a, alignment) \
#define ALIGNMENT_ADDR2BASE(a, alignment) \
((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))

/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
((size_t)((uintptr_t)(a) & (alignment - 1)))

/* Return the smallest alignment multiple that is >= s. */
#define ALIGNMENT_CEILING(s, alignment) \
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & ((~(alignment)) + 1))

/* Declare a variable-length array. */
Expand Down
2 changes: 1 addition & 1 deletion include/jemalloc/internal/jemalloc_internal_decls.h
@@ -1,5 +1,5 @@
#ifndef JEMALLOC_INTERNAL_DECLS_H
#define JEMALLOC_INTERNAL_DECLS_H
#define JEMALLOC_INTERNAL_DECLS_H

#include <math.h>
#ifdef _WIN32
Expand Down
2 changes: 1 addition & 1 deletion include/jemalloc/internal/jemalloc_internal_defs.h.in
@@ -1,5 +1,5 @@
#ifndef JEMALLOC_INTERNAL_DEFS_H_
#define JEMALLOC_INTERNAL_DEFS_H_
#define JEMALLOC_INTERNAL_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
Expand Down

0 comments on commit 190a5af

Please sign in to comment.