Skip to content

Commit

Permalink
Defrag: activate it only if running modified version of Jemalloc.
Browse files Browse the repository at this point in the history
This commit also includes minor aesthetic changes like removal of
trailing spaces.
  • Loading branch information
antirez committed Jan 10, 2017
1 parent 5ab6a54 commit 173d692
Show file tree
Hide file tree
Showing 4 changed files with 40 additions and 26 deletions.
4 changes: 4 additions & 0 deletions deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
Expand Up @@ -100,3 +100,7 @@
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
#endif

/* This version of Jemalloc, modified for Redis, has the je_get_defrag_hint()
* function. */
#define JEMALLOC_FRAG_HINT
44 changes: 22 additions & 22 deletions src/defrag.c
Expand Up @@ -39,14 +39,14 @@
#include <assert.h>
#include <stddef.h>

#if defined(USE_JEMALLOC) && defined(MALLOCX_TCACHE_NONE)
#ifdef HAVE_DEFRAG

/* this method was added to jemalloc in order to help us understand which
* pointers are worthwhile moving and which aren't */
int je_get_defrag_hint(void* ptr, int *bin_util, int *run_util);

/* Defrag helper for generic allocations.
*
*
* returns NULL in case the allocatoin wasn't moved.
* when it returns a non-null value, the old pointer was already released
* and should NOT be accessed. */
Expand All @@ -58,13 +58,13 @@ void* activeDefragAlloc(void *ptr) {
server.stat_active_defrag_misses++;
return NULL;
}
/* if this run is more utilized than the average utilization in this bin (or it is full), skip it.
/* if this run is more utilized than the average utilization in this bin (or it is full), skip it.
* this will eventually move all the allocations from relatively empty runs into relatively full runs. */
if (run_util > bin_util || run_util == 1<<16) {
server.stat_active_defrag_misses++;
return NULL;
}
/* move this allocation to a new allocation.
/* move this allocation to a new allocation.
* make sure not to use the thread cache. so that we don't get back the same pointers we try to free */
size = zmalloc_size(ptr);
newptr = zmalloc_no_tcache(size);
Expand All @@ -74,7 +74,7 @@ void* activeDefragAlloc(void *ptr) {
}

/*Defrag helper for sds strings
*
*
* returns NULL in case the allocatoin wasn't moved.
* when it returns a non-null value, the old pointer was already released
* and should NOT be accessed. */
Expand All @@ -90,7 +90,7 @@ sds activeDefragSds(sds sdsptr) {
}

/* Defrag helper for robj and/or string objects
*
*
* returns NULL in case the allocatoin wasn't moved.
* when it returns a non-null value, the old pointer was already released
* and should NOT be accessed. */
Expand Down Expand Up @@ -221,7 +221,7 @@ double *zslDefrag(zskiplist *zsl, double score, sds oldele, sds newele) {
x = x->level[i].forward;
update[i] = x;
}

/* update the robj pointer inside the skip list record. */
x = x->level[0].forward;
serverAssert(x && score == x->score && x->ele==oldele);
Expand All @@ -243,7 +243,7 @@ double *zslDefrag(zskiplist *zsl, double score, sds oldele, sds newele) {
* newkey may be null if the key pointer wasn't moved.
* return value is the the dictEntry if found, or NULL if not found.
* NOTE: this is very ugly code, but it let's us avoid the complication of doing a scan on another dict. */
dictEntry* replaceSateliteDictKeyPtrAndOrDifragDictEntry(dict *d, sds oldkey, sds newkey, unsigned int hash, int *defragged) {
dictEntry* replaceSateliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, unsigned int hash, int *defragged) {
dictEntry **deref = dictFindEntryRefByPtrAndHash(d, oldkey, hash);
if (deref) {
dictEntry *de = *deref;
Expand All @@ -269,7 +269,7 @@ int defargKey(redisDb *db, dictEntry *de) {
dictIterator *di;
int defragged = 0;
sds newsds;

/* try to defrag the key name */
newsds = activeDefragSds(keysds);
if (newsds)
Expand All @@ -279,7 +279,7 @@ int defargKey(redisDb *db, dictEntry *de) {
* i can't search in db->expires for that key after i already released the pointer it holds
* it won't be able to do the string compare */
unsigned int hash = dictGetHash(db->dict, de->key);
replaceSateliteDictKeyPtrAndOrDifragDictEntry(db->expires, keysds, newsds, hash, &defragged);
replaceSateliteDictKeyPtrAndOrDefragDictEntry(db->expires, keysds, newsds, hash, &defragged);
}

/* try to defrag robj and / or string value */
Expand Down Expand Up @@ -334,7 +334,7 @@ int defargKey(redisDb *db, dictEntry *de) {
} else if (ob->encoding == OBJ_ENCODING_INTSET) {
intset *is = ob->ptr;
intset *newis = activeDefragAlloc(is);
if (newis)
if (newis)
defragged++, ob->ptr = newis;
} else {
serverPanic("Unknown set encoding");
Expand Down Expand Up @@ -407,7 +407,7 @@ void defragScanCallback(void *privdata, const dictEntry *de) {
if(defragged)
server.stat_active_defrag_key_hits++;
else
server.stat_active_defrag_key_misses++;
server.stat_active_defrag_key_misses++;
}

/* defrag scan callback for for each hash table bicket,
Expand Down Expand Up @@ -439,8 +439,8 @@ float getAllocatorFragmentation(size_t *out_frag_bytes) {
size_t rss_bytes = resident - allocated;
if(out_frag_bytes)
*out_frag_bytes = frag_bytes;
serverLog(LL_DEBUG,
"allocated=%zu, active=%zu, resident=%zu, frag=%.0f%% (%.0f%% rss), frag_bytes=%zu (%zu%% rss)",
serverLog(LL_DEBUG,
"allocated=%zu, active=%zu, resident=%zu, frag=%.0f%% (%.0f%% rss), frag_bytes=%zu (%zu%% rss)",
allocated, active, resident, frag_pct, rss_pct, frag_bytes, rss_bytes);
return frag_pct;
}
Expand All @@ -459,10 +459,10 @@ void activeDefragCycle(void) {
unsigned int iterations = 0;
unsigned long long defragged = server.stat_active_defrag_hits;
long long start, timelimit;

if (server.aof_child_pid!=-1 || server.rdb_child_pid!=-1)
return; /* defragging memory while there's a fork will just do damage. */

/* once a second, check if we the fragmentation justfies starting a scan or making it more aggressive */
run_with_period(1000) {
size_t frag_bytes;
Expand All @@ -472,16 +472,16 @@ void activeDefragCycle(void) {
if(frag_pct < server.active_defrag_threshold_lower || frag_bytes < server.active_defrag_ignore_bytes)
return;
}

/* calculate the adaptive aggressiveness of the defrag */
int cpu_pct = INTERPOLATE(frag_pct, server.active_defrag_threshold_lower, server.active_defrag_threshold_upper,
int cpu_pct = INTERPOLATE(frag_pct, server.active_defrag_threshold_lower, server.active_defrag_threshold_upper,
server.active_defrag_cycle_min, server.active_defrag_cycle_max);
cpu_pct = LIMIT(cpu_pct, server.active_defrag_cycle_min, server.active_defrag_cycle_max);
/* we allow increasing the aggressiveness during a scan, but don't reduce it */
if (!server.active_defrag_running || cpu_pct > server.active_defrag_running) {
server.active_defrag_running = cpu_pct;
serverLog(LL_VERBOSE,
"Starting active defrag, frag=%.0f%%, frag_bytes=%zu, cpu=%d%%",
serverLog(LL_VERBOSE,
"Starting active defrag, frag=%.0f%%, frag_bytes=%zu, cpu=%d%%",
frag_pct, frag_bytes, cpu_pct);
}
}
Expand All @@ -500,7 +500,7 @@ void activeDefragCycle(void) {
long long now = ustime();
size_t frag_bytes;
float frag_pct = getAllocatorFragmentation(&frag_bytes);
serverLog(LL_VERBOSE,
serverLog(LL_VERBOSE,
"Active defrag done in %dms, reallocated=%d, frag=%.0f%%, frag_bytes=%zu",
(int)((now - start_scan)/1000), (int)(server.stat_active_defrag_hits - start_stat), frag_pct, frag_bytes);

Expand Down Expand Up @@ -536,7 +536,7 @@ void activeDefragCycle(void) {
} while(1);
}

#else /* USE_JEMALLOC */
#else /* HAVE_DEFRAG */

void activeDefragCycle(void) {
/* not implemented yet*/
Expand Down
4 changes: 2 additions & 2 deletions src/zmalloc.c
Expand Up @@ -119,8 +119,8 @@ void *zmalloc(size_t size) {

/* Allocation and free functions that bypass the thread cache
* and go straight to the allocator arena bins.
* Currently implemented only for jemalloc */
#if defined(USE_JEMALLOC) && defined(MALLOCX_TCACHE_NONE)
* Currently implemented only for jemalloc. Used for online defragmentation. */
#ifdef HAVE_DEFRAG
void *zmalloc_no_tcache(size_t size) {
void *ptr = mallocx(size+PREFIX_SIZE, MALLOCX_TCACHE_NONE);
if (!ptr) zmalloc_oom_handler(size);
Expand Down
14 changes: 12 additions & 2 deletions src/zmalloc.h
Expand Up @@ -65,12 +65,17 @@
#define ZMALLOC_LIB "libc"
#endif

/* We can enable the Redis defrag capabilities only if we are using Jemalloc
* and the version used is our special version modified for Redis having
* the ability to return per-allocation fragmentation hints. */
#if defined(USE_JEMALLOC) && defined(JEMALLOC_FRAG_HINT)
#define HAVE_DEFRAG
#endif

void *zmalloc(size_t size);
void *zcalloc(size_t size);
void *zrealloc(void *ptr, size_t size);
void zfree(void *ptr);
void zfree_no_tcache(void *ptr);
void *zmalloc_no_tcache(size_t size);
char *zstrdup(const char *s);
size_t zmalloc_used_memory(void);
void zmalloc_enable_thread_safeness(void);
Expand All @@ -82,6 +87,11 @@ size_t zmalloc_get_smap_bytes_by_field(char *field, long pid);
size_t zmalloc_get_memory_size(void);
void zlibc_free(void *ptr);

#ifdef HAVE_DEFRAG
void zfree_no_tcache(void *ptr);
void *zmalloc_no_tcache(size_t size);
#endif

#ifndef HAVE_MALLOC_SIZE
size_t zmalloc_size(void *ptr);
#endif
Expand Down

0 comments on commit 173d692

Please sign in to comment.