Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

merge LRU queues, other fixes.

Summary: What's life without a memcached diff in diffcamp? :)
         
         There are two key changes:
         
         1) here is that there is now only one LRU queue.  There's no benefit to maintaining two queues, but offers simpler code.
         2) do_add_delta takes a key/nkey pair instead of an item pointer.  this has two key advantages:
         a) it avoids us having to look up the item again when we don't do an update-in-place, since there is no race condition between the item_get and the add_delta locks.
         b) it is enormously helpful when we allow the splitting of keys across data chunks, as now we have a contiguous copy of the key.
         
         Other changes include:
         - warnings are now errors, except "deprecated" warnings (generated by OSX, ugh) which are ignored.
         - assoc_delete no longer requires that the item we delete be the one expected, since this assumption can be wrong when there are race conditions.
         - prefix stats now take a key/nkey pair instead of just key, since keys are no longer null-terminated.
         - append_to_buffer didn't properly reserve space for the terminator.

Reviewed By: marc

Test Plan: stress test passes.
           libmcc tests pass.
           flat allocator unit tests pass.
           ran in production.

Revert: OK


git-svn-id: http://svn.facebook.com/svnroot/projects/memcached/trunk@108366 2c7ba8d8-a2f7-0310-a573-de162e16dcc7
  • Loading branch information...
commit 90e3f1814c21db1ea383a0e5ad7354c946171c8f 1 parent 5d88e64
ttung authored
View
2  Makefile.am
@@ -7,7 +7,7 @@ memcached_SOURCES = memcached.c slabs.c slabs.h \
sigseg.c sigseg.h conn_buffer.c conn_buffer.h \
memory_pool.h memory_pool_classes.h
memcached_debug_SOURCES = $(memcached_SOURCES)
-memcached_CFLAGS = -Wall
+memcached_CFLAGS = -Wall -Werror -Wno-deprecated-declarations
memcached_CPPFLAGS = -DNDEBUG -DNO_CPP_DEMANGLE
memcached_LDADD = @LIBOBJS@
memcached_LDFLAGS = -rdynamic
View
3  assoc.c
@@ -626,9 +626,8 @@ item* assoc_update(item *it) {
}
-void assoc_delete(const char *key, const size_t nkey, item_ptr_t to_be_deleted) {
+void assoc_delete(const char *key, const size_t nkey) {
item_ptr_t* before = _hashitem_before(key, nkey);
- assert(*before == to_be_deleted);
if (*before) {
item_ptr_t next = ITEM_PTR_h_next(*before);
View
2  assoc.h
@@ -9,7 +9,7 @@ void assoc_init(void);
item *assoc_find(const char *key, const size_t nkey);
int assoc_insert(item *item);
item* assoc_update(item *it);
-void assoc_delete(const char *key, const size_t nkey, item_ptr_t iptr);
+void assoc_delete(const char *key, const size_t nkey);
void do_assoc_move_next_bucket(void);
uint32_t hash( const void *key, size_t length, const uint32_t initval);
int do_assoc_expire_regex(char *pattern);
View
27 binary_sm.c
@@ -527,11 +527,12 @@ static inline bp_handler_res_t handle_direct_receive(conn* c)
value_len -= c->u.key_value_req.keylen;
if (settings.detail_enabled) {
- stats_prefix_record_set(c->bp_key);
+ stats_prefix_record_set(c->bp_key, c->u.key_value_req.keylen);
}
if (settings.verbose > 1) {
- fprintf(stderr, ">%d receiving key %s\n", c->sfd, c->bp_key);
+ fprintf(stderr, ">%d receiving key %*s\n", c->sfd,
+ c->u.key_value_req.keylen, c->bp_key);
}
it = item_alloc(c->bp_key, c->u.key_value_req.keylen,
@@ -805,10 +806,11 @@ static void handle_get_cmd(conn* c)
{
value_rep_t* rep;
item* it;
+ size_t nkey = ntohl(c->u.key_req.body_length) -
+ (sizeof(key_req_t) - BINARY_PROTOCOL_REQUEST_HEADER_SZ);
// find the desired item.
- it = item_get(c->bp_key, ntohl(c->u.key_req.body_length) -
- (sizeof(key_req_t) - BINARY_PROTOCOL_REQUEST_HEADER_SZ));
+ it = item_get(c->bp_key, nkey);
// handle the counters. do this all together because lock/unlock is costly.
STATS_LOCK();
@@ -816,13 +818,14 @@ static void handle_get_cmd(conn* c)
if (it) {
stats.get_hits ++;
stats_get(ITEM_nkey(it) + ITEM_nbytes(it));
+ stats.get_bytes += ITEM_nbytes(it);
} else {
stats.get_misses ++;
}
- STATS_UNLOCK();
if (settings.detail_enabled) {
- stats_prefix_record_get(c->bp_key, NULL != it);
+ stats_prefix_record_get(c->bp_key, nkey, (NULL != it) ? ITEM_nbytes(it) : 0, NULL != it);
}
+ STATS_UNLOCK();
// we only need to reply if we have a hit or if it is a non-silent get.
if (it ||
@@ -971,14 +974,14 @@ static void handle_delete_cmd(conn* c)
{
empty_rep_t* rep;
item* it;
- size_t key_length = c->u.key_number_req.keylen;
+ size_t nkey = c->u.key_number_req.keylen;
time_t exptime = ntohl(c->u.key_number_req.number);
if (settings.detail_enabled) {
- stats_prefix_record_delete(c->bp_key);
+ stats_prefix_record_delete(c->bp_key, nkey);
}
- it = item_get(c->bp_key, key_length);
+ it = item_get(c->bp_key, nkey);
if (it ||
c->u.key_number_req.cmd == BP_DELETE_CMD) {
@@ -1043,11 +1046,11 @@ static void handle_arith_cmd(conn* c)
{
number_rep_t* rep;
item* it;
- size_t key_length = c->u.key_number_req.keylen;
+ size_t nkey = c->u.key_number_req.keylen;
uint32_t delta;
static char temp[32];
- it = item_get(c->bp_key, key_length);
+ it = item_get(c->bp_key, nkey);
if ((rep = ALLOCATE_REPLY_HEADER(c, number_rep_t, &c->u.key_number_req)) == NULL) {
bp_write_err_msg(c, "out of memory");
@@ -1061,7 +1064,7 @@ static void handle_arith_cmd(conn* c)
delta = ntohl(c->u.key_number_req.number);
- out = add_delta(it, (c->u.key_number_req.cmd == BP_INCR_CMD),
+ out = add_delta(c->bp_key, nkey, (c->u.key_number_req.cmd == BP_INCR_CMD),
delta, temp, &val, get_request_addr(c));
if (out != temp) {
View
2  configure.ac
@@ -1,5 +1,5 @@
AC_PREREQ(2.52)
-AC_INIT(memcached, 1.2.3d-cr0, brad@danga.com)
+AC_INIT(memcached, 1.2.3d-cr4, brad@danga.com)
AC_CANONICAL_SYSTEM
AC_CONFIG_SRCDIR(memcached.c)
AM_INIT_AUTOMAKE(AC_PACKAGE_NAME, AC_PACKAGE_VERSION)
View
727 flat_storage.c
@@ -74,10 +74,8 @@ void flat_storage_init(size_t maxbytes) {
fsi.large_free_list_sz = 0;
fsi.small_free_list = NULL_CHUNKPTR;
fsi.small_free_list_sz = 0;
- fsi.large_lru_head = NULL_CHUNKPTR;
- fsi.large_lru_tail = NULL_CHUNKPTR;
- fsi.small_lru_head = NULL_CHUNKPTR;
- fsi.small_lru_tail = NULL_CHUNKPTR;
+ fsi.lru_head = NULL_CHUNKPTR;
+ fsi.lru_tail = NULL_CHUNKPTR;
/* shouldn't fail here.... right? */
flat_storage_alloc();
@@ -139,23 +137,6 @@ void item_init(void) {
}
-static inline size_t __fss_MIN(size_t a, size_t b) {
- if (a < b) {
- return a;
- } else {
- return b;
- }
-}
-
-static inline size_t __fss_MAX(size_t a, size_t b) {
- if (a > b) {
- return a;
- } else {
- return b;
- }
-}
-
-
/* initialize at least nbytes more memory and add them as large chunks to the
* free list. */
STATIC bool flat_storage_alloc(void) {
@@ -409,87 +390,22 @@ static void unbreak_large_chunk(large_chunk_t* lc, bool mandatory) {
/*
- * gets the oldest item on the LRU with refcount == 0. if chunk_type is
- * SMALL_CHUNK, the search will start at start instead of the LRU tail.
+ * gets the oldest item on the LRU with refcount == 0.
*/
-STATIC item* get_lru_item(chunk_type_t chunk_type, small_title_chunk_t* start) {
- switch (chunk_type) {
- case SMALL_CHUNK:
- {
- small_chunk_t* small_chunk_iter;
- int i;
-
- if (start == NULL) {
- small_chunk_iter = (small_chunk_t*) fsi.small_lru_tail;
- } else {
- small_chunk_iter = (small_chunk_t*) start;
- }
-
- for (i = 0;
- i < SMALL_LRU_SEARCH_DEPTH && small_chunk_iter != NULL;
- i ++) {
- chunkptr_t prev_chunkptr;
- chunk_t* prev;
- small_title_chunk_t* small_title;
-
- assert(small_chunk_iter->flags == (SMALL_CHUNK_INITIALIZED |
- SMALL_CHUNK_USED |
- SMALL_CHUNK_TITLE));
-
- small_title = &small_chunk_iter->sc_title;
- if (small_title->refcount == 0) {
- /* it is silly that we have to make this typecast, but
- * there's no other way to make this assignment without
- * a cast, even though it ought to be possible. */
- item* small_item = get_item_from_small_title(small_title);
-
- return small_item;
- }
-
- prev_chunkptr = small_title->prev;
- if (prev_chunkptr == NULL_CHUNKPTR) {
- return NULL;
- }
- prev = get_chunk_address(prev_chunkptr);
- small_chunk_iter = &(prev->sc);
- }
+STATIC item* get_lru_item(void) {
+ int i;
+ item* iter, * prev;
+
+ for (i = 0,
+ iter = fsi.lru_tail;
+ i < LRU_SEARCH_DEPTH && iter != NULL_CHUNKPTR;
+ i ++, iter = prev) {
+ /* large chunk */
+ if (iter->empty_header.refcount == 0) {
+ return iter;
}
- break;
- case LARGE_CHUNK:
- {
- large_chunk_t* large_chunk_iter;
- int i;
-
- for (i = 0, large_chunk_iter = (large_chunk_t*) fsi.large_lru_tail;
- i < LARGE_LRU_SEARCH_DEPTH && large_chunk_iter != NULL;
- i ++) {
- chunkptr_t prev_chunkptr;
- chunk_t* prev;
- large_title_chunk_t* large_title;
-
- assert(large_chunk_iter->flags == (LARGE_CHUNK_INITIALIZED |
- LARGE_CHUNK_USED |
- LARGE_CHUNK_TITLE));
-
- large_title = &large_chunk_iter->lc_title;
- if (large_title->refcount == 0) {
- /* it is silly that we have to make this typecast, but
- * there's no other way to make this assignment without
- * a cast, even though it ought to be possible. */
- item* large_item = get_item_from_large_title(large_title);
- return large_item;
- }
-
- prev_chunkptr = large_title->prev;
- if (prev_chunkptr == NULL_CHUNKPTR) {
- return NULL;
- }
- prev = get_chunk_address(prev_chunkptr);
- large_chunk_iter = &(prev->lc);
- }
- }
- break;
+ prev = get_item_from_chunk(get_chunk_address(iter->empty_header.prev));
}
return NULL;
@@ -559,333 +475,205 @@ static large_chunk_t* find_unreferenced_broken_chunk(size_t search_depth) {
/*
- * coalesce small free chunks to form a single large free chunk. two things are
- * needed to perform this operation:
+ * coalesce as many small free chunks as we can find to form large free chunks.
+ * two things are needed to perform this operation:
* 1) at least SMALL_CHUNKS_PER_LARGE_CHUNK free small chunks.
* 2) a large broken chunk that has refcount == 0 so we can move items off of it.
*
- * if there aren't at least SMALL_CHUNKS_PER_LARGE_CHUNK, free enough small
- * chunks to satisfy that requirement. if we have accidentally freed up a large
- * free chunk, then return immediately.
- *
- * once the first criteria is satisfied, search through the freelist to find a
- * parent chunk that has refcount == 0. if that fails, search through the LRU
- * (search limited by SMALL_LRU_SEARCH_DEPTH) for such a chunk.
- *
- * if during the freeing process, the timestamp on the LRU small item is newer
- * than large_lru_item_timestamp, then stop.
- *
* returns COALESCE_NO_PROGRESS if no forward progress was made.
- * COALESCE_LARGE_CHUNK_FORMED if a large chunk was formed.
- * COALESCE_FORWARD_PROGRESS if a large chunk was not formed, but
- * forward progress was made.
+ * COALESCE_LARGE_CHUNK_FORMED if large chunks was formed.
*/
-static coalesce_progress_t coalesce_free_small_chunks(rel_time_t large_lru_item_timestamp) {
- size_t large_free_list_sz_pre = fsi.large_free_list_sz,
- small_free_list_sz_pre = fsi.small_free_list_sz;
- large_chunk_t* lc;
- int i;
- item* small_lru_item;
-
- while (fsi.small_free_list_sz < SMALL_CHUNKS_PER_LARGE_CHUNK) {
- small_lru_item = get_lru_item(SMALL_CHUNK, NULL);
- if (small_lru_item == NULL ||
- small_lru_item->small_title.time > large_lru_item_timestamp) {
- /* nothing more to free or large LRU is newer, so go free that */
- /* instead. */
- return ( small_free_list_sz_pre == fsi.small_free_list_sz ) ?
- COALESCE_NO_PROGRESS : COALESCE_FORWARD_PROGRESS;
- }
+static coalesce_progress_t coalesce_free_small_chunks(void) {
+ coalesce_progress_t retval = COALESCE_NO_PROGRESS;
- assert(small_lru_item->empty_header.refcount == 0);
- do_item_unlink(small_lru_item, UNLINK_MAYBE_EVICT);
+ while (fsi.small_free_list_sz >= SMALL_CHUNKS_PER_LARGE_CHUNK) {
+ large_chunk_t* lc;
+ unsigned i;
- if (fsi.large_free_list_sz > large_free_list_sz_pre) {
- /* the prior free released an entire large chunk. */
- return COALESCE_LARGE_CHUNK_FORMED;
+ lc = find_unreferenced_broken_chunk(0);
+ if (lc == NULL) {
+ /* we don't want to be stuck in an infinite loop if we can't find a
+ * large unreferenced chunk, so just report no progress. */
+ return retval;
}
- }
-
- lc = find_unreferenced_broken_chunk(0);
- if (lc == NULL) {
- /* we don't want to be stuck in an infinite loop if we can't find a
- * large unreferenced chunk, so just report no progress. */
- return COALESCE_NO_PROGRESS;
- }
-
- /* STATS: update */
- fsi.stats.broken_chunk_histogram[lc->lc_broken.small_chunks_allocated] --;
- fsi.stats.migrates += lc->lc_broken.small_chunks_allocated;
- if (lc->lc_broken.small_chunks_allocated != 0) {
- /* any free small chunks that belong to the same parent chunk should be
- * removed from the free list. this is to ensure that we don't pick them
- * up as replacement blocks. */
- for (i = 0; i < SMALL_CHUNKS_PER_LARGE_CHUNK; i ++) {
- small_chunk_t* iter = &(lc->lc_broken.lbc[i]);
+ /* STATS: update */
+ fsi.stats.broken_chunk_histogram[lc->lc_broken.small_chunks_allocated] --;
+ fsi.stats.migrates += lc->lc_broken.small_chunks_allocated;
- if (iter->flags & SMALL_CHUNK_FREE) {
- /* need to remove this from the free list */
- small_chunk_t** prev_next;
+ if (lc->lc_broken.small_chunks_allocated != 0) {
+ /* any free small chunks that belong to the same parent chunk should be
+ * removed from the free list. this is to ensure that we don't pick them
+ * up as replacement blocks. */
+ for (i = 0; i < SMALL_CHUNKS_PER_LARGE_CHUNK; i ++) {
+ small_chunk_t* iter = &(lc->lc_broken.lbc[i]);
+
+ if (iter->flags & SMALL_CHUNK_FREE) {
+ /* need to remove this from the free list */
+ small_chunk_t** prev_next;
+
+ prev_next = iter->sc_free.prev_next;
+ assert(*prev_next == iter);
+ *(prev_next) = iter->sc_free.next;
+
+ if (iter->sc_free.next != NULL_CHUNKPTR) {
+ small_chunk_t* next = iter->sc_free.next;
+ next->sc_free.prev_next = prev_next;
+ }
- prev_next = iter->sc_free.prev_next;
- assert(*prev_next == iter);
- *(prev_next) = iter->sc_free.next;
+ iter->flags &= ~(SMALL_CHUNK_FREE);
+ iter->flags |= SMALL_CHUNK_COALESCE_PENDING;
- if (iter->sc_free.next != NULL_CHUNKPTR) {
- small_chunk_t* next = iter->sc_free.next;
- next->sc_free.prev_next = prev_next;
+ fsi.small_free_list_sz --;
}
-
- iter->flags &= ~(SMALL_CHUNK_FREE);
- iter->flags |= SMALL_CHUNK_COALESCE_PENDING;
-
- fsi.small_free_list_sz --;
}
- }
- for (i = 0; i < SMALL_CHUNKS_PER_LARGE_CHUNK; i ++) {
- small_chunk_t* iter = &(lc->lc_broken.lbc[i]);
- chunk_t* old_chunk = (chunk_t*) iter;
- (void) old_chunk; /* when optimizing, old_chunk is not
- * used. this is to quiesce the
- * compiler warning. */
-
- assert( (iter->flags & SMALL_CHUNK_INITIALIZED) ==
- SMALL_CHUNK_INITIALIZED);
-
- if (iter->flags & SMALL_CHUNK_USED) {
- /* title block */
- chunk_t* _replacement = free_list_pop(SMALL_CHUNK);
- small_chunk_t* replacement;
- chunkptr_t replacement_chunkptr;
- assert(_replacement != NULL);
-
- replacement = &(_replacement->sc);
- assert(replacement->flags == (SMALL_CHUNK_INITIALIZED));
- memcpy(replacement, iter, sizeof(small_chunk_t));
- replacement_chunkptr = get_chunkptr(_replacement);
-
- if (iter->flags & SMALL_CHUNK_TITLE) {
- item* new_it;
- chunk_t* next, * prev;
- small_chunk_t* next_chunk;
-
- new_it = get_item_from_small_title(&(replacement->sc_title));
-
- /* edit the forward and backward links. */
- if (replacement->sc_title.next != NULL_CHUNKPTR) {
- next = get_chunk_address(replacement->sc_title.next);
- assert(next->sc.sc_title.prev == get_chunkptr(old_chunk));
- next->sc.sc_title.prev = replacement_chunkptr;
+ for (i = 0; i < SMALL_CHUNKS_PER_LARGE_CHUNK; i ++) {
+ small_chunk_t* iter = &(lc->lc_broken.lbc[i]);
+ chunk_t* old_chunk = (chunk_t*) iter;
+ (void) old_chunk; /* when optimizing, old_chunk is not
+ * used. this is to quiesce the
+ * compiler warning. */
+
+ assert( (iter->flags & SMALL_CHUNK_INITIALIZED) ==
+ SMALL_CHUNK_INITIALIZED);
+
+ if (iter->flags & SMALL_CHUNK_USED) {
+ /* title block */
+ chunk_t* _replacement = free_list_pop(SMALL_CHUNK);
+ small_chunk_t* replacement;
+ chunkptr_t replacement_chunkptr;
+ assert(_replacement != NULL);
+
+ replacement = &(_replacement->sc);
+ assert(replacement->flags == (SMALL_CHUNK_INITIALIZED));
+ memcpy(replacement, iter, sizeof(small_chunk_t));
+ replacement_chunkptr = get_chunkptr(_replacement);
+
+ if (iter->flags & SMALL_CHUNK_TITLE) {
+ item* new_it;
+ chunk_t* next, * prev;
+ small_chunk_t* next_chunk;
+
+ new_it = get_item_from_small_title(&(replacement->sc_title));
+
+ /* edit the forward and backward links. */
+ if (replacement->sc_title.next != NULL_CHUNKPTR) {
+ next = get_chunk_address(replacement->sc_title.next);
+ assert(next->sc.sc_title.prev == get_chunkptr(old_chunk));
+ next->sc.sc_title.prev = replacement_chunkptr;
+ } else {
+ assert(fsi.lru_tail == get_item_from_small_title(&old_chunk->sc.sc_title));
+ fsi.lru_tail = get_item_from_small_title(&replacement->sc_title);
+ }
+
+ if (replacement->sc_title.prev != NULL_CHUNKPTR) {
+ prev = get_chunk_address(replacement->sc_title.prev);
+ assert(prev->sc.sc_title.next == get_chunkptr(old_chunk));
+ prev->sc.sc_title.next = replacement_chunkptr;
+ } else {
+ assert(fsi.lru_head == get_item_from_small_title(&old_chunk->sc.sc_title));
+ fsi.lru_head = get_item_from_small_title(&replacement->sc_title);
+ }
+
+ /* edit the next_chunk's prev_chunk link */
+ next_chunk = &(get_chunk_address(replacement->sc_title.next_chunk))->sc;
+ if (next_chunk != NULL) {
+ assert(next_chunk->sc_body.prev_chunk == get_chunkptr(old_chunk));
+ next_chunk->sc_body.prev_chunk = replacement_chunkptr;
+ }
+
+ /* update flags */
+ replacement->flags |= (SMALL_CHUNK_USED | SMALL_CHUNK_TITLE);
+
+ /* do the replacement in the mapping. */
+ assoc_update(new_it);
} else {
- assert(fsi.small_lru_tail == get_item_from_small_title(&old_chunk->sc.sc_title));
- fsi.small_lru_tail = get_item_from_small_title(&replacement->sc_title);
+ /* body block. this is more straightforward */
+ small_chunk_t* prev_chunk = &(get_chunk_address(replacement->sc_body.prev_chunk))->sc;
+ small_chunk_t* next_chunk = &(get_chunk_address(replacement->sc_body.next_chunk))->sc;
+
+ /* update the previous block's next pointer */
+ if (prev_chunk->flags & SMALL_CHUNK_TITLE) {
+ prev_chunk->sc_title.next_chunk = replacement_chunkptr;
+ } else {
+ prev_chunk->sc_body.next_chunk = replacement_chunkptr;
+ }
+
+ /* edit the next_chunk's prev_chunk link */
+ if (next_chunk != NULL) {
+ assert(next_chunk->sc_body.prev_chunk == get_chunkptr(old_chunk));
+ next_chunk->sc_body.prev_chunk = replacement_chunkptr;
+ }
+
+ /* update flags */
+ replacement->flags |= (SMALL_CHUNK_USED);
}
- if (replacement->sc_title.prev != NULL_CHUNKPTR) {
- prev = get_chunk_address(replacement->sc_title.prev);
- assert(prev->sc.sc_title.next == get_chunkptr(old_chunk));
- prev->sc.sc_title.next = replacement_chunkptr;
- } else {
- assert(fsi.small_lru_head == get_item_from_small_title(&old_chunk->sc.sc_title));
- fsi.small_lru_head = get_item_from_small_title(&replacement->sc_title);
- }
-
- /* edit the next_chunk's prev_chunk link */
- next_chunk = &(get_chunk_address(replacement->sc_title.next_chunk))->sc;
- if (next_chunk != NULL) {
- assert(next_chunk->sc_body.prev_chunk == get_chunkptr(old_chunk));
- next_chunk->sc_body.prev_chunk = replacement_chunkptr;
- }
+ /* don't push this onto the free list. if we do, we'll immediately
+ * pick it up when finding a replacement block. instead, just mark
+ * it coalesce-pending. */
+ iter->flags = SMALL_CHUNK_INITIALIZED | SMALL_CHUNK_COALESCE_PENDING;
- /* update flags */
- replacement->flags |= (SMALL_CHUNK_USED | SMALL_CHUNK_TITLE);
-
- /* do the replacement in the mapping. */
- assoc_update(new_it);
- } else {
- /* body block. this is more straightforward */
- small_chunk_t* prev_chunk = &(get_chunk_address(replacement->sc_body.prev_chunk))->sc;
- small_chunk_t* next_chunk = &(get_chunk_address(replacement->sc_body.next_chunk))->sc;
-
- /* update the previous block's next pointer */
- if (prev_chunk->flags & SMALL_CHUNK_TITLE) {
- prev_chunk->sc_title.next_chunk = replacement_chunkptr;
- } else {
- prev_chunk->sc_body.next_chunk = replacement_chunkptr;
- }
-
- /* edit the next_chunk's prev_chunk link */
- if (next_chunk != NULL) {
- assert(next_chunk->sc_body.prev_chunk == get_chunkptr(old_chunk));
- next_chunk->sc_body.prev_chunk = replacement_chunkptr;
- }
-
- /* update flags */
- replacement->flags |= (SMALL_CHUNK_USED);
+ /* decrement the number of blocks allocated */
+ lc->lc_broken.small_chunks_allocated --;
}
-
- /* don't push this onto the free list. if we do, we'll immediately
- * pick it up when finding a replacement block. instead, just mark
- * it coalesce-pending. */
- iter->flags = SMALL_CHUNK_INITIALIZED | SMALL_CHUNK_COALESCE_PENDING;
-
- /* decrement the number of blocks allocated */
- lc->lc_broken.small_chunks_allocated --;
}
}
- }
- /* STATS: update */
- fsi.stats.broken_chunk_histogram[0] ++;
+ /* STATS: update */
+ fsi.stats.broken_chunk_histogram[0] ++;
+
+ unbreak_large_chunk(lc, true);
- unbreak_large_chunk(lc, true);
+ retval = COALESCE_LARGE_CHUNK_FORMED;
+ }
- return COALESCE_LARGE_CHUNK_FORMED;
+ return retval;
}
static bool flat_storage_lru_evict(chunk_type_t chunk_type, size_t nchunks) {
while (1) {
- bool release_small = false, release_large = false;
- rel_time_t * small_lru_item_timestamp, * large_lru_item_timestamp;
- item* small_lru_item = NULL, * large_lru_item = NULL;
- size_t nreleased; /* number of chunks freed as a result
- * of the release. */
-
- /* get the LRU items */
- small_lru_item = get_lru_item(SMALL_CHUNK, NULL);
- large_lru_item = get_lru_item(LARGE_CHUNK, NULL);
-
- /* it is possible that get_lru_item(..) will return NULL, but we'll
- * never use these pointers without checking the return values. this
- * is just an elaborate way to circumvent the compiler's warning that
- * large_lru_item_timestamp may be used uninitialized. */
- small_lru_item_timestamp = &small_lru_item->small_title.time;
- large_lru_item_timestamp = &large_lru_item->large_title.time;
-
- /* have the items, figure out which item to release. */
- if (small_lru_item == NULL &&
- large_lru_item == NULL) {
+ /* release one item from the LRU... */
+ item* lru_item;
+
+ lru_item = get_lru_item();
+ if (lru_item == NULL) {
+ /* nothing to release, so we just fail. */
return false;
- } else if (small_lru_item == NULL && large_lru_item != NULL) {
- release_large = true;
- } else if (small_lru_item != NULL && large_lru_item == NULL) {
- release_small = true;
- } else {
- /* tie goes to large items, because they're easier to deal with. in
- * any case, this is extraordinarily unlikely. */
- if (*small_lru_item_timestamp < *large_lru_item_timestamp) {
- release_small = true;
- } else {
- release_large = true;
- }
}
+ do_item_unlink(lru_item, UNLINK_MAYBE_EVICT);
+ /* do we have enough free chunks to leave this loop? */
switch (chunk_type) {
case SMALL_CHUNK:
- /* several possibilities exist:
- * 1) small chunks exist in LRU but not in the large LRU.
- * release the item at the tail. check if we've released
- * enough to satisfy the request.
- * 2) small chunks do not exist in LRU, but exist in large chunk
- * LRU. release the item at the tail. break up enough to
- * satisfy the request and see if we've released enough to
- * satisfy the request.
- * 3) both LRUs have items. if the item at the tail of the
- * small LRU is accessed less recently than the item at the
- * tail of the large LRU, release the item at the tail of the
- * small LRU. decrement nchunks by the number of chunks
- * we've freed up.
- * 4) both LRUs have items. if the item at the tail of the
- * large LRU is accessed less recently than the item at the
- * tail of the small LRU, release the item at the tail of the
- * large LRU. break up enough to satisfy the request and
- * decrement nchunks.
- */
-
- /* handles case 1 and 3 */
- if (release_small) {
- assert(small_lru_item->small_title.refcount == 0);
- nreleased = chunks_in_item(small_lru_item);
- do_item_unlink(small_lru_item, UNLINK_MAYBE_EVICT);
-
- if (fsi.small_free_list_sz +
- (fsi.large_free_list_sz * SMALL_CHUNKS_PER_LARGE_CHUNK) >=
- nchunks) {
- /* we have enough chunks for the allocation */
- return true;
- }
+ /* this is easier. if we numerically have enough chunks, then
+ * we pass. the caller will break the large chunks as
+ * necessary. */
+ if (((fsi.large_free_list_sz * SMALL_CHUNKS_PER_LARGE_CHUNK) +
+ fsi.small_free_list_sz) >= nchunks) {
+ return true;
}
-
- /* handles case 2 and 4 */
- if (release_large) {
- assert(large_lru_item->large_title.refcount == 0);
- nreleased = chunks_in_item(large_lru_item);
- do_item_unlink(large_lru_item, UNLINK_MAYBE_EVICT);
-
- while (fsi.large_free_list_sz > 0 &&
- fsi.small_free_list_sz < nchunks) {
- chunk_t* chunk = free_list_pop(LARGE_CHUNK);
-
- break_large_chunk(chunk);
- }
-
- if (fsi.small_free_list_sz >= nchunks) {
- return true;
- }
- }
-
- break;
+ break;
case LARGE_CHUNK:
- /* several possibilities exist:
- * 1) small chunks exist in LRU but not in the large LRU.
- * release the item at the tail. free small chunks until we
- * have enough to unbreak a large chunk.
- * 2) small chunks do not exist in LRU, but exist in large chunk
- * LRU. release the item at the tail. check if we've
- * released enough to satisfy the request.
- * 3) both LRUs have items. if the item at the tail of the
- * small LRU is accessed less recently than the item at the
- * tail of the large LRU, release the item at the tail of the
- * small LRU. continue freeing items until we have enough
- * space to unbreak a large chunk OR if the large LRU is
- * accessed less recently than the small LRU.
- * 4) both LRUs have items. if the item at the tail of the
- * large LRU is accessed less recently than the item at the
- * tail of the small LRU, release the item at the tail of the
- * large LRU.
- */
-
- if (release_small) {
- /* this is now painfully complicated. we need to coalesce
- * free small chunks to free up space to unbreak a large
- * chunk. if the large LRU has items, unbreak until the
- * large LRU is accessed less recently than the small LRU.
- * otherwise, just keep unbreaking. */
- rel_time_t stop_time;
- if (large_lru_item == NULL) {
- stop_time = current_time;
- } else {
- stop_time = *large_lru_item_timestamp;
- }
-
- if (coalesce_free_small_chunks(stop_time) == COALESCE_NO_PROGRESS) {
- return false;
- }
-
- if (fsi.large_free_list_sz >= nchunks) {
- return true;
- }
+ /* this is, not surprisingly, more complicated. if we have
+ * sufficient large chunks, pass immediately. if we have
+ * sufficient space, we can try a coalesce. if that succeeds,
+ * we can check again to see if we have enough. */
+ if (fsi.large_free_list_sz >= nchunks) {
+ return true;
}
- if (release_large) {
- assert(large_lru_item->large_title.refcount == 0);
- nreleased = chunks_in_item(large_lru_item);
- do_item_unlink(large_lru_item, UNLINK_MAYBE_EVICT);
+ if (((fsi.large_free_list_sz * SMALL_CHUNKS_PER_LARGE_CHUNK) +
+ fsi.small_free_list_sz) >= (nchunks * SMALL_CHUNKS_PER_LARGE_CHUNK)) {
+ /* try a coalesce */
+ if (coalesce_free_small_chunks() == COALESCE_NO_PROGRESS) {
+ continue;
+ }
+ /* we made progress, do we have what we need? */
if (fsi.large_free_list_sz >= nchunks) {
return true;
}
@@ -977,7 +765,7 @@ void do_try_item_stamp(item* it, rel_time_t now, const struct in_addr addr) {
/* allocates one item capable of storing a key of size nkey and a value field of
* size nbytes. stores the key, flags, and exptime. the value field is not
* initialized. if there is insufficient memory, NULL is returned. */
-item* do_item_alloc(char *key, const size_t nkey, const int flags, const rel_time_t exptime,
+item* do_item_alloc(const char *key, const size_t nkey, const int flags, const rel_time_t exptime,
const size_t nbytes, const struct in_addr addr) {
if (item_size_ok(nkey, flags, nbytes) == false) {
return NULL;
@@ -989,8 +777,8 @@ item* do_item_alloc(char *key, const size_t nkey, const int flags, const rel_tim
/* try various strategies to get a free item:
* 1) free_list
* 2) flat_storage_alloc
- * 3) if we have sufficient small free chunks to constitute a large free
- * chunk, do so.
+ * 3) if we have sufficient small free chunks + large free chunks to
+ * store the item, try a coalesce.
* 4) flat_storage_lru_evict
*/
size_t needed = chunks_needed(nkey, nbytes);
@@ -1009,11 +797,10 @@ item* do_item_alloc(char *key, const size_t nkey, const int flags, const rel_tim
continue;
}
- while (fsi.small_free_list_sz >= SMALL_CHUNKS_PER_LARGE_CHUNK &&
- fsi.large_free_list_sz < needed) {
- if (coalesce_free_small_chunks(current_time) == COALESCE_NO_PROGRESS) {
- break;
- }
+ if (((fsi.large_free_list_sz * SMALL_CHUNKS_PER_LARGE_CHUNK) +
+ fsi.small_free_list_sz) >= (needed * SMALL_CHUNKS_PER_LARGE_CHUNK)) {
+ /* try a coalesce */
+ coalesce_free_small_chunks();
}
if (prev_free != fsi.large_free_list_sz) {
continue;
@@ -1291,75 +1078,39 @@ static void item_link_q(item *it) {
assert(it->empty_header.next == NULL_CHUNKPTR);
assert(it->empty_header.prev == NULL_CHUNKPTR);
- if (is_item_large_chunk(it)) {
- assert( ((fsi.large_lru_head == NULL) ^ (fsi.large_lru_head == NULL)) == 0 );
- if (fsi.large_lru_head != NULL) {
- it->empty_header.next = get_chunkptr((chunk_t*) fsi.large_lru_head);
- fsi.large_lru_head->empty_header.prev = get_chunkptr((chunk_t*) it);
- }
- fsi.large_lru_head = it;
-
- if (fsi.large_lru_tail == NULL) {
- fsi.large_lru_tail = it;
- }
- } else {
- assert( ((fsi.small_lru_head == NULL) ^ (fsi.small_lru_head == NULL)) == 0 );
- if (fsi.small_lru_head != NULL) {
- it->empty_header.next = get_chunkptr((chunk_t*) fsi.small_lru_head);
- fsi.small_lru_head->empty_header.prev = get_chunkptr((chunk_t*) it);
- }
- fsi.small_lru_head = it;
+ assert( ((fsi.lru_head == NULL) ^ (fsi.lru_head == NULL)) == 0 );
+ if (fsi.lru_head != NULL) {
+ it->empty_header.next = get_chunkptr((chunk_t*) fsi.lru_head);
+ fsi.lru_head->empty_header.prev = get_chunkptr((chunk_t*) it);
+ }
+ fsi.lru_head = it;
- if (fsi.small_lru_tail == NULL) {
- fsi.small_lru_tail = it;
- }
+ if (fsi.lru_tail == NULL) {
+ fsi.lru_tail = it;
}
}
static void item_unlink_q(item* it) {
- if (is_item_large_chunk(it)) {
- item* next, * prev;
-
- next = get_item_from_chunk(get_chunk_address(it->empty_header.next));
- prev = get_item_from_chunk(get_chunk_address(it->empty_header.prev));
-
- if (it == fsi.large_lru_head) {
- assert(prev == NULL);
- fsi.large_lru_head = next;
- }
- if (it == fsi.large_lru_tail) {
- assert(next == NULL);
- fsi.large_lru_tail = prev;
- }
-
- if (next) {
- next->empty_header.prev = it->empty_header.prev;
- }
- if (prev) {
- prev->empty_header.next = it->empty_header.next;
- }
- } else {
- item* next, * prev;
+ item* next, * prev;
- next = get_item_from_chunk(get_chunk_address(it->empty_header.next));
- prev = get_item_from_chunk(get_chunk_address(it->empty_header.prev));
+ next = get_item_from_chunk(get_chunk_address(it->empty_header.next));
+ prev = get_item_from_chunk(get_chunk_address(it->empty_header.prev));
- if (it == fsi.small_lru_head) {
- assert(prev == NULL);
- fsi.small_lru_head = next;
- }
- if (it == fsi.small_lru_tail) {
- assert(next == NULL);
- fsi.small_lru_tail = prev;
- }
+ if (it == fsi.lru_head) {
+ assert(prev == NULL);
+ fsi.lru_head = next;
+ }
+ if (it == fsi.lru_tail) {
+ assert(next == NULL);
+ fsi.lru_tail = prev;
+ }
- if (next) {
- next->empty_header.prev = it->empty_header.prev;
- }
- if (prev) {
- prev->empty_header.next = it->empty_header.next;
- }
+ if (next) {
+ next->empty_header.prev = it->empty_header.prev;
+ }
+ if (prev) {
+ prev->empty_header.next = it->empty_header.next;
}
it->empty_header.prev = NULL_CHUNKPTR;
@@ -1422,10 +1173,10 @@ void do_item_unlink(item* it, long flags) {
stats_expire(ITEM_nkey(it) + ITEM_nbytes(it));
}
if (settings.detail_enabled) {
- stats_prefix_record_removal(ITEM_key(it), ITEM_nkey(it) + ITEM_nbytes(it), it->empty_header.time, flags);
+ stats_prefix_record_removal(ITEM_key(it), ITEM_nkey(it), ITEM_nkey(it) + ITEM_nbytes(it), it->empty_header.time, flags);
}
STATS_UNLOCK();
- assoc_delete(ITEM_key(it), ITEM_nkey(it), ITEM_PTR(it));
+ assoc_delete(ITEM_key(it), ITEM_nkey(it));
it->empty_header.h_next = NULL_ITEM_PTR;
item_unlink_q(it);
if (it->empty_header.refcount == 0) {
@@ -1468,19 +1219,11 @@ void do_item_update(item* it) {
int do_item_replace(item* it, item* new_it) {
int retval;
- // if item is already unlinked by another thread, we'd get the
- // current one
- if ((it->empty_header.it_flags & ITEM_LINKED) == 0) {
- it = assoc_find(ITEM_key(it), ITEM_nkey(it));
- }
-
- if (it != NULL) {
- // though there might not be a current one if the other thread did a
- // delete.
- assert((it->empty_header.it_flags & (ITEM_VALID | ITEM_LINKED)) ==
- (ITEM_VALID | ITEM_LINKED));
- do_item_unlink(it, UNLINK_NORMAL);
- }
+ // though there might not be a current one if the other thread did a
+ // delete.
+ assert((it->empty_header.it_flags & (ITEM_VALID | ITEM_LINKED)) ==
+ (ITEM_VALID | ITEM_LINKED));
+ do_item_unlink(it, UNLINK_NORMAL);
assert(new_it->empty_header.it_flags & ITEM_VALID);
retval = do_item_link(new_it);
@@ -1502,11 +1245,7 @@ char* do_item_cachedump(const chunk_type_t type, const unsigned int limit, unsig
if (buffer == 0) return NULL;
bufcurr = 0;
- if (type == LARGE_CHUNK) {
- it = fsi.large_lru_head;
- } else {
- it = fsi.small_lru_head;
- }
+ it = fsi.lru_head;
while (it != NULL && (limit == 0 || shown < limit)) {
memcpy(key_tmp, ITEM_key(it), ITEM_nkey(it));
@@ -1544,7 +1283,7 @@ char* do_item_stats_sizes(int* bytes) {
/* build the histogram */
memset(histogram, 0, (size_t)num_buckets * sizeof(int));
- item* iter = fsi.small_lru_head;
+ item* iter = fsi.lru_head;
while (iter) {
int ntotal = ITEM_ntotal(iter);
int bucket = ntotal / 32;
@@ -1553,7 +1292,7 @@ char* do_item_stats_sizes(int* bytes) {
iter = get_item_from_chunk(get_chunk_address(iter->small_title.next));
}
- iter = fsi.large_lru_head;
+ iter = fsi.lru_head;
while (iter) {
int ntotal = ITEM_ntotal(iter);
int bucket = ntotal / 32;
@@ -1580,7 +1319,7 @@ void do_item_flush_expired(void) {
if (settings.oldest_live == 0)
return;
- for (iter = fsi.small_lru_head;
+ for (iter = fsi.lru_head;
iter != NULL;
iter = next) {
if (iter->small_title.time >= settings.oldest_live) {
@@ -1594,7 +1333,7 @@ void do_item_flush_expired(void) {
}
}
- for (iter = fsi.large_lru_head;
+ for (iter = fsi.lru_head;
iter != NULL;
iter = next) {
if (iter->large_title.time >= settings.oldest_live) {
@@ -1664,8 +1403,7 @@ char* do_flat_allocator_stats(size_t* result_size) {
size_t bufsize = 2048, offset = 0, i;
char* buffer = malloc(bufsize);
char terminator[] = "END\r\n";
- item* small_lru_item = NULL, * large_lru_item = NULL;
- rel_time_t * small_lru_item_timestamp, * large_lru_item_timestamp;
+ item* lru_item = NULL;
rel_time_t oldest_item_lifetime;
if (buffer == NULL) {
@@ -1674,40 +1412,23 @@ char* do_flat_allocator_stats(size_t* result_size) {
}
/* get the LRU items */
- small_lru_item = get_lru_item(SMALL_CHUNK, NULL);
- large_lru_item = get_lru_item(LARGE_CHUNK, NULL);
-
- /* it is possible that get_lru_item(..) will return NULL, but we'll
- * never use these pointers without checking the return values. this
- * is just an elaborate way to circumvent the compiler's warning that
- * large_lru_item_timestamp may be used uninitialized. */
- small_lru_item_timestamp = &small_lru_item->small_title.time;
- large_lru_item_timestamp = &large_lru_item->large_title.time;
-
- /* have the items, figure out which item to release. */
- if (small_lru_item == NULL &&
- large_lru_item == NULL) {
+ lru_item = get_lru_item();
+ if (lru_item == NULL) {
oldest_item_lifetime = 0;
- } else if (small_lru_item == NULL && large_lru_item != NULL) {
- oldest_item_lifetime = current_time - *large_lru_item_timestamp;
- } else if (small_lru_item != NULL && large_lru_item == NULL) {
- oldest_item_lifetime = current_time - *small_lru_item_timestamp;
} else {
- /* tie goes to large items, because they're easier to deal with. in
- * any case, this is extraordinarily unlikely. */
- if (*small_lru_item_timestamp < *large_lru_item_timestamp) {
- oldest_item_lifetime = current_time - *small_lru_item_timestamp;
- } else {
- oldest_item_lifetime = current_time - *large_lru_item_timestamp;
- }
+ oldest_item_lifetime = current_time - lru_item->empty_header.time;
}
offset = append_to_buffer(buffer, bufsize, offset, sizeof(terminator),
+ "STAT large_chunk_sz %d\n"
+ "STAT small_chunk_sz %d\n"
"STAT large_title_chunks %" PRINTF_INT64_MODIFIER "u\n"
"STAT large_body_chunks %" PRINTF_INT64_MODIFIER "u\n"
"STAT large_broken_chunks %" PRINTF_INT64_MODIFIER "u\n"
"STAT small_title_chunks %" PRINTF_INT64_MODIFIER "u\n"
"STAT small_body_chunks %" PRINTF_INT64_MODIFIER "u\n",
+ LARGE_CHUNK_SZ,
+ SMALL_CHUNK_SZ,
fsi.stats.large_title_chunks,
fsi.stats.large_body_chunks,
fsi.stats.large_broken_chunks,
View
55 flat_storage.h
@@ -181,10 +181,8 @@ typedef enum chunk_type_e {
SMALL_TITLE_CHUNK_DATA_SZ : SMALL_BODY_CHUNK_DATA_SZ) /* this is the smallest number of data
* bytes a small chunk can hold. */
-#define LARGE_LRU_SEARCH_DEPTH 50 /* number of items we'll check in the
- * large LRU to find items to evict. */
-#define SMALL_LRU_SEARCH_DEPTH 50 /* number of items we'll check in the
- * small LRU to find items to evict. */
+#define LRU_SEARCH_DEPTH 50 /* number of items we'll check in the
+ * LRU to find items to evict. */
/**
* data types and structures
@@ -358,13 +356,9 @@ struct flat_storage_info_s {
small_chunk_t* small_free_list; // free list head.
size_t small_free_list_sz; // number of small free list chunks.
- // large chunk LRU.
- item* large_lru_head;
- item* large_lru_tail;
-
- // small chunk LRU.
- item* small_lru_head;
- item* small_lru_tail;
+ // LRU.
+ item* lru_head;
+ item* lru_tail;
bool initialized;
@@ -686,7 +680,7 @@ extern char* do_item_cachedump(const chunk_type_t type, const unsigned int limit
DECL_MT_FUNC(char*, flat_allocator_stats, (size_t* bytes));
STATIC_DECL(bool flat_storage_alloc(void));
-STATIC_DECL(item* get_lru_item(chunk_type_t chunk_type, small_title_chunk_t* start));
+STATIC_DECL(item* get_lru_item(void));
#if !defined(FLAT_STORAGE_MODULE)
#undef STATIC
@@ -696,6 +690,23 @@ STATIC_DECL(item* get_lru_item(chunk_type_t chunk_type, small_title_chunk_t* sta
#endif /* #if !defined(FLAT_STORAGE_MODULE) */
+static inline size_t __fs_MIN(size_t a, size_t b) {
+ if (a < b) {
+ return a;
+ } else {
+ return b;
+ }
+}
+
+static inline size_t __fs_MAX(size_t a, size_t b) {
+ if (a > b) {
+ return a;
+ } else {
+ return b;
+ }
+}
+
+
/* this macro walks over the item and calls applier with the following
* arguments:
* applier(it, ptr, bytes)
@@ -739,7 +750,7 @@ STATIC_DECL(item* get_lru_item(chunk_type_t chunk_type, small_title_chunk_t* sta
end_offset = LARGE_TITLE_CHUNK_DATA_SZ - \
((_it)->empty_header.nkey) - 1; \
} else { \
- end_offset = __fss_MIN((_offset) + (_nbytes), \
+ end_offset = __fs_MIN((_offset) + (_nbytes), \
start_offset + LARGE_TITLE_CHUNK_DATA_SZ - ((_it)->empty_header.nkey)) - 1; \
} \
to_scan = end_offset - start_offset + 1; \
@@ -757,7 +768,7 @@ STATIC_DECL(item* get_lru_item(chunk_type_t chunk_type, small_title_chunk_t* sta
if (next == NULL && (_beyond_item_boundary)) { \
end_offset = LARGE_BODY_CHUNK_DATA_SZ - 1; \
} else { \
- end_offset = __fss_MIN((_offset) + (_nbytes), \
+ end_offset = __fs_MIN((_offset) + (_nbytes), \
LARGE_BODY_CHUNK_DATA_SZ) - 1; \
} \
to_scan = end_offset - start_offset + 1; \
@@ -771,8 +782,8 @@ STATIC_DECL(item* get_lru_item(chunk_type_t chunk_type, small_title_chunk_t* sta
\
size_t work_start, work_end, work_len; \
\
- work_start = __fss_MAX((_offset), start_offset); \
- work_end = __fss_MIN((_offset) + (_nbytes) - 1, end_offset); \
+ work_start = __fs_MAX((_offset), start_offset); \
+ work_end = __fs_MIN((_offset) + (_nbytes) - 1, end_offset); \
work_len = work_end - work_start + 1; \
\
applier((_it), ptr + work_start - start_offset, work_len); \
@@ -793,7 +804,7 @@ STATIC_DECL(item* get_lru_item(chunk_type_t chunk_type, small_title_chunk_t* sta
(_beyond_item_boundary)) { \
end_offset = start_offset + LARGE_BODY_CHUNK_DATA_SZ - 1; \
} else { \
- end_offset = __fss_MIN((_offset) + (_nbytes), \
+ end_offset = __fs_MIN((_offset) + (_nbytes), \
start_offset + LARGE_BODY_CHUNK_DATA_SZ) - 1; \
} \
to_scan = end_offset - start_offset + 1; \
@@ -812,7 +823,7 @@ STATIC_DECL(item* get_lru_item(chunk_type_t chunk_type, small_title_chunk_t* sta
end_offset = SMALL_TITLE_CHUNK_DATA_SZ - \
((_it)->empty_header.nkey) - 1; \
} else { \
- end_offset = __fss_MIN((_offset) + (_nbytes), \
+ end_offset = __fs_MIN((_offset) + (_nbytes), \
start_offset + SMALL_TITLE_CHUNK_DATA_SZ - ((_it)->empty_header.nkey)) - 1; \
} \
to_scan = end_offset - start_offset + 1; \
@@ -830,7 +841,7 @@ STATIC_DECL(item* get_lru_item(chunk_type_t chunk_type, small_title_chunk_t* sta
if (next == NULL && (_beyond_item_boundary)) { \
end_offset = SMALL_BODY_CHUNK_DATA_SZ - 1; \
} else { \
- end_offset = __fss_MIN((_offset) + (_nbytes), \
+ end_offset = __fs_MIN((_offset) + (_nbytes), \
start_offset + SMALL_BODY_CHUNK_DATA_SZ) - 1; \
} \
to_scan = end_offset - start_offset + 1; \
@@ -846,8 +857,8 @@ STATIC_DECL(item* get_lru_item(chunk_type_t chunk_type, small_title_chunk_t* sta
\
size_t work_start, work_end, work_len; \
\
- work_start = __fss_MAX((_offset), start_offset); \
- work_end = __fss_MIN((_offset) + (_nbytes) - 1, end_offset); \
+ work_start = __fs_MAX((_offset), start_offset); \
+ work_end = __fs_MIN((_offset) + (_nbytes) - 1, end_offset); \
work_len = work_end - work_start + 1; \
\
applier((_it), ptr + work_start - start_offset, work_len); \
@@ -870,7 +881,7 @@ STATIC_DECL(item* get_lru_item(chunk_type_t chunk_type, small_title_chunk_t* sta
(_beyond_item_boundary)) { \
end_offset = start_offset + SMALL_BODY_CHUNK_DATA_SZ - 1; \
} else { \
- end_offset = __fss_MIN((_offset) + (_nbytes), \
+ end_offset = __fs_MIN((_offset) + (_nbytes), \
start_offset + SMALL_BODY_CHUNK_DATA_SZ) - 1; \
} \
/* printf(" cycling start_offset = %ld, end_offset = %ld\n", start_offset, end_offset); */ \
View
16 flat_storage_support.h
@@ -21,22 +21,6 @@
#include "memcached.h"
#include "conn_buffer.h"
-static inline size_t __fss_MIN(size_t a, size_t b) {
- if (a < b) {
- return a;
- } else {
- return b;
- }
-}
-
-static inline size_t __fss_MAX(size_t a, size_t b) {
- if (a > b) {
- return a;
- } else {
- return b;
- }
-}
-
static inline int add_item_to_iov(conn *c, const item* it, bool send_cr_lf) {
int retval;
View
2  items.h
@@ -27,7 +27,7 @@
extern void item_init(void);
/*@null@*/
extern void do_try_item_stamp(item* it, rel_time_t now, const struct in_addr addr);
-extern item* do_item_alloc(char *key, const size_t nkey,
+extern item* do_item_alloc(const char *key, const size_t nkey,
const int flags, const rel_time_t exptime, const size_t nbytes,
const struct in_addr addr);
extern bool item_size_ok(const size_t nkey, const int flags, const int nbytes);
View
95 memcached.c
@@ -142,11 +142,7 @@ size_t append_to_buffer(char* const buffer_start,
...) {
va_list ap;
ssize_t written;
- size_t left = buffer_size - buffer_off;
-
- if (left <= reserved) {
- return buffer_off;
- }
+ size_t left = buffer_size - buffer_off - reserved;
va_start(ap, fmt);
written = vsnprintf(&buffer_start[buffer_off], left, fmt, ap);
@@ -943,7 +939,13 @@ int do_store_item(item *it, int comm) {
STATS_LOCK();
if (settings.detail_enabled) {
- stats_prefix_record_byte_total_change(key, ITEM_nkey(it) + ITEM_nbytes(it));
+ int prefix_stats_flags = PREFIX_INCR_ITEM_COUNT;
+
+ if (old_it != NULL) {
+ prefix_stats_flags |= PREFIX_IS_OVERWRITE;
+ }
+ stats_prefix_record_byte_total_change(key, ITEM_nkey(it), ITEM_nkey(it) + ITEM_nbytes(it),
+ prefix_stats_flags);
}
stats_set(ITEM_nkey(it) + ITEM_nbytes(it),
@@ -1126,7 +1128,9 @@ static void process_stat(conn* c, token_t *tokens, const size_t ntokens) {
offset = append_to_buffer(temp, bufsize, offset, sizeof(terminator), "STAT evictions %" PRINTF_INT64_MODIFIER "u\r\n", stats.evictions);
offset = append_to_buffer(temp, bufsize, offset, sizeof(terminator), "STAT bytes_read %" PRINTF_INT64_MODIFIER "u\r\n", stats.bytes_read);
offset = append_to_buffer(temp, bufsize, offset, sizeof(terminator), "STAT bytes_written %" PRINTF_INT64_MODIFIER "u\r\n", stats.bytes_written);
- offset = append_to_buffer(temp, bufsize, offset, sizeof(terminator), "STAT limit_maxbytes %" PRINTF_INT64_MODIFIER "u\r\n", (uint64_t) settings.maxbytes);
+ offset = append_to_buffer(temp, bufsize, offset, sizeof(terminator), "STAT limit_maxbytes %lu\r\n", settings.maxbytes);
+ offset = append_to_buffer(temp, bufsize, offset, sizeof(terminator), "STAT get_bytes %" PRINTF_INT64_MODIFIER "u\r\n", stats.get_bytes);
+ offset = append_to_buffer(temp, bufsize, offset, sizeof(terminator), "STAT byte_seconds %" PRINTF_INT64_MODIFIER "u\r\n", stats.byte_seconds);
offset = append_to_buffer(temp, bufsize, offset, sizeof(terminator), "STAT threads %u\r\n", settings.num_threads);
offset = append_thread_stats(temp, bufsize, offset, sizeof(terminator));
#if defined(USE_SLAB_ALLOCATOR)
@@ -1490,8 +1494,9 @@ static inline void process_get_command(conn* c, token_t *tokens, size_t ntokens)
STATS_LOCK();
stats.get_cmds++;
if (settings.detail_enabled) {
- stats_prefix_record_get(key, NULL != it);
+ stats_prefix_record_get(key, nkey, (NULL != it) ? ITEM_nbytes(it) : 0, NULL != it);
}
+ stats.get_bytes += (NULL != it) ? ITEM_nbytes(it) : 0;
STATS_UNLOCK();
if (it) {
@@ -1695,11 +1700,11 @@ static void process_update_command(conn *c, token_t *tokens, const size_t ntoken
return;
}
- STATS_LOCK();
if (settings.detail_enabled) {
- stats_prefix_record_set(key);
+ STATS_LOCK();
+ stats_prefix_record_set(key, nkey);
+ STATS_UNLOCK();
}
- STATS_UNLOCK();
if (settings.managed) {
int bucket = c->bucket;
@@ -1741,7 +1746,6 @@ static void process_update_command(conn *c, token_t *tokens, const size_t ntoken
static void process_arithmetic_command(conn* c, token_t *tokens, const size_t ntokens, const int incr) {
char temp[32];
- item *it;
unsigned int delta;
char *key;
size_t nkey;
@@ -1779,22 +1783,7 @@ static void process_arithmetic_command(conn* c, token_t *tokens, const size_t nt
return;
}
- it = item_get(key, nkey);
- if (!it) {
- STATS_LOCK();
- stats.arith_cmds ++;
- STATS_UNLOCK();
- out_string(c, "NOT_FOUND");
- return;
- }
-
- STATS_LOCK();
- stats.arith_cmds ++;
- stats.arith_hits ++;
- STATS_UNLOCK();
-
- out_string(c, add_delta(it, incr, delta, temp, NULL, get_request_addr(c)));
- item_deref(it); /* release our reference */
+ out_string(c, add_delta(key, nkey, incr, delta, temp, NULL, get_request_addr(c)));
}
/*
@@ -1807,11 +1796,25 @@ static void process_arithmetic_command(conn* c, token_t *tokens, const size_t nt
*
* returns a response string to send back to the client.
*/
-char *do_add_delta(item *it, const int incr, const unsigned int delta, char *buf, uint32_t* res_val,
- const struct in_addr addr) {
+char *do_add_delta(const char* key, const size_t nkey, const int incr, const unsigned int delta,
+ char *buf, uint32_t* res_val, const struct in_addr addr) {
uint32_t value;
int res;
- rel_time_t now = current_time;
+ rel_time_t now;
+ item* it;
+
+ it = do_item_get_notedeleted(key, nkey, NULL);
+ if (!it) {
+ STATS_LOCK();
+ stats.arith_cmds ++;
+ if (settings.detail_enabled) {
+ stats_prefix_record_get(key, nkey, 0, false);
+ }
+ STATS_UNLOCK();
+ return "NOT_FOUND";
+ }
+
+ now = current_time;
/* the opengroup spec says that if we care about errno after strtol/strtoul, we have to zero it
* out beforehard. see http://www.opengroup.org/onlinepubs/000095399/functions/strtoul.html */
@@ -1833,18 +1836,38 @@ char *do_add_delta(item *it, const int incr, const unsigned int delta, char *buf
// arithmetic operations are essentially a set+get operation.
STATS_LOCK();
+ stats.arith_cmds ++;
+ stats.arith_hits ++;
+ stats.get_bytes += res;
stats_set(ITEM_nkey(it) + res, ITEM_nkey(it) + ITEM_nbytes(it));
stats_get(ITEM_nkey(it) + res);
+ if (settings.detail_enabled) {
+ stats_prefix_record_set(key, nkey);
+ stats_prefix_record_get(key, nkey, res, true);
+ if (res != ITEM_nbytes(it)) {
+ stats_prefix_record_byte_total_change(key, nkey, res - ITEM_nbytes(it), PREFIX_IS_OVERWRITE);
+ }
+ }
STATS_UNLOCK();
if (item_need_realloc(it, ITEM_nkey(it), ITEM_flags(it), res) ||
ITEM_refcount(it) > 1) {
/* need to realloc */
item *new_it;
- new_it = do_item_alloc(ITEM_key(it), ITEM_nkey(it),
+
+ if (settings.detail_enabled) {
+ STATS_LOCK();
+ /* because we're replacing an item, we need to bump the item count and
+ * re-add the byte count of the item block we're evicting.. */
+ stats_prefix_record_byte_total_change(key, nkey, ITEM_nkey(it) + ITEM_nbytes(it), PREFIX_INCR_ITEM_COUNT);
+ STATS_UNLOCK();
+ }
+
+ new_it = do_item_alloc(key, nkey,
ITEM_flags(it), ITEM_exptime(it),
res, addr);
if (new_it == 0) {
+ do_item_deref(it);
return "SERVER_ERROR out of memory";
}
item_memcpy_to(new_it, 0, buf, res, false);
@@ -1858,6 +1881,7 @@ char *do_add_delta(item *it, const int incr, const unsigned int delta, char *buf
do_try_item_stamp(it, now, addr);
}
+ do_item_deref(it);
return buf;
}
@@ -1905,7 +1929,7 @@ static void process_delete_command(conn* c, token_t *tokens, const size_t ntoken
STATS_LOCK();
if (settings.detail_enabled) {
- stats_prefix_record_delete(key);
+ stats_prefix_record_delete(key, nkey);
}
STATS_UNLOCK();
@@ -3019,6 +3043,10 @@ static void set_current_time(void) {
current_time = (rel_time_t) (time(0) - stats.started);
}
+static void update_stats(void) {
+ stats.byte_seconds += stats.item_total_size;
+}
+
static void clock_handler(const int fd, const short which, void *arg) {
struct timeval t = {.tv_sec = 1, .tv_usec = 0};
static bool initialized = false;
@@ -3035,6 +3063,7 @@ static void clock_handler(const int fd, const short which, void *arg) {
evtimer_add(&clockevent, &t);
set_current_time();
+ update_stats();
}
static struct event deleteevent;
View
9 memcached.h
@@ -134,6 +134,9 @@ struct stats_s {
uint64_t bytes_read;
uint64_t bytes_written;
+ uint64_t get_bytes;
+ uint64_t byte_seconds;
+
#define MEMORY_POOL(pool_enum, pool_counter, pool_string) uint64_t pool_counter;
#include "memory_pool_classes.h"
@@ -297,7 +300,8 @@ conn *do_conn_from_freelist();
bool do_conn_add_to_freelist(conn* c);
int do_defer_delete(item *item, time_t exptime);
void do_run_deferred_deletes(void);
-char *do_add_delta(item *item, int incr, const unsigned int delta, char *buf, uint32_t* res_val, const struct in_addr addr);
+char *do_add_delta(const char* key, const size_t nkey, const int incr, const unsigned int delta,
+ char *buf, uint32_t* res_val, const struct in_addr addr);
int do_store_item(item *item, int comm);
conn* conn_new(const int sfd, const int init_state, const int event_flags, const int read_buffer_size,
const bool is_udp, const bool is_binary,
@@ -345,7 +349,8 @@ void dispatch_conn_new(int sfd, int init_state, int event_flags,
const struct sockaddr* addr, socklen_t addrlen);
/* Lock wrappers for cache functions that are called from main loop. */
-char *mt_add_delta(item *item, const int incr, const unsigned int delta, char *buf, uint32_t *res_val, const struct in_addr addr);
+char *mt_add_delta(const char* key, const size_t nkey, const int incr, const unsigned int delta,
+ char *buf, uint32_t *res, const struct in_addr addr);
size_t mt_append_thread_stats(char* const buf, const size_t size, const size_t offset, const size_t reserved);
int mt_assoc_expire_regex(char *pattern);
void mt_assoc_move_next_bucket(void);
View
2  slabs.c
@@ -103,7 +103,7 @@ unsigned int slabs_clsid(const size_t size) {
* Given a slab class id, return the size of the chunk.
*/
unsigned int slabs_chunksize(const unsigned int clsid) {
- if (clsid > 0 && clsid < power_largest) {
+ if (clsid > 0 && clsid <= power_largest) {
return slabclass[clsid].size;
}
return 0;
View
21 slabs_items.c
@@ -116,7 +116,7 @@ void do_try_item_stamp(item* it, const rel_time_t now, const struct in_addr addr
/*@null@*/
-item *do_item_alloc(char *key, const size_t nkey, const int flags, const rel_time_t exptime,
+item *do_item_alloc(const char *key, const size_t nkey, const int flags, const rel_time_t exptime,
const size_t nbytes, const struct in_addr addr) {
item *it;
size_t ntotal = stritem_length + nkey + nbytes;
@@ -312,10 +312,10 @@ void do_item_unlink_impl(item *it, long flags, bool to_freelist) {
stats_expire(it->nkey + it->nbytes);
}
if (settings.detail_enabled) {
- stats_prefix_record_removal(ITEM_key(it), it->nkey + it->nbytes, it->time, flags);
+ stats_prefix_record_removal(ITEM_key(it), ITEM_nkey(it), it->nkey + it->nbytes, it->time, flags);
}
STATS_UNLOCK();
- assoc_delete(ITEM_key(it), it->nkey, it);
+ assoc_delete(ITEM_key(it), it->nkey);
item_unlink_q(it);
if (it->refcount == 0) {
item_free(it, to_freelist);
@@ -348,18 +348,9 @@ void do_item_update(item *it) {
}
int do_item_replace(item *it, item *new_it) {
- // If item is already unlinked by another thread, we'd get the current one.
- if ((it->it_flags & ITEM_LINKED) == 0) {
- it = assoc_find(ITEM_key(it), it->nkey);
- }
- // It's possible assoc_find at above finds no item associated with the key
- // any more. For example, when incr and delete is called at the same time,
- // item_get() gets an old item, but item is removed from assoc table in the
- // middle.
- if (it) {
- assert((it->it_flags & ITEM_SLABBED) == 0);
- do_item_unlink(it, UNLINK_NORMAL);
- }
+ assert((it->it_flags & ITEM_SLABBED) == 0);
+
+ do_item_unlink(it, UNLINK_NORMAL);
return do_item_link(new_it);
}
View
151 stats.c
@@ -32,12 +32,14 @@ struct _prefix_stats {
uint32_t num_items;
rel_time_t last_update;
uint64_t num_gets;
+ uint64_t num_hits;
uint64_t num_sets;
uint64_t num_deletes;
- uint64_t num_hits;
uint64_t num_evicts;
+ uint64_t num_overwrites;
+ uint64_t num_expires;
uint64_t num_bytes;
- uint64_t total_lifetime;
+ uint64_t bytes_txed;
uint64_t total_byte_seconds;
PREFIX_STATS *next;
};
@@ -109,25 +111,26 @@ void stats_prefix_clear() {
* in the list.
*/
/*@null@*/
-static PREFIX_STATS *stats_prefix_find(const char *key) {
+static PREFIX_STATS *stats_prefix_find(const char *key, const size_t nkey) {
PREFIX_STATS *pfs;
uint32_t hashval;
size_t length;
assert(key != NULL);
- for (length = 0; key[length] != '\0'; length++)
+ for (length = 0; length < nkey; length++)
if (key[length] == settings.prefix_delimiter)
break;
- if (key[length] == '\0') {
+ if (length == nkey) {
return &wildcard;
}
hashval = hash(key, length, 0) % PREFIX_HASH_SIZE;
for (pfs = prefix_stats[hashval]; NULL != pfs; pfs = pfs->next) {
- if (strncmp(pfs->prefix, key, length) == 0)
+ if (length == pfs->prefix_len &&
+ (strncmp(pfs->prefix, key, length) == 0))
return pfs;
}
@@ -137,7 +140,7 @@ static PREFIX_STATS *stats_prefix_find(const char *key) {
return NULL;
}
- pfs->prefix = pool_malloc_locking(false, length + 1, STATS_PREFIX_POOL);
+ pfs->prefix = pool_malloc_locking(false, length, STATS_PREFIX_POOL);
if (NULL == pfs->prefix) {
perror("Can't allocate space for copy of prefix: malloc");
pool_free_locking(false, pfs, sizeof(PREFIX_STATS) * 1, STATS_PREFIX_POOL);
@@ -145,7 +148,6 @@ static PREFIX_STATS *stats_prefix_find(const char *key) {
}
strncpy(pfs->prefix, key, length);
- pfs->prefix[length] = '\0'; /* because strncpy() sucks */
pfs->prefix_len = length;
pfs->next = prefix_stats[hashval];
@@ -160,14 +162,15 @@ static PREFIX_STATS *stats_prefix_find(const char *key) {
/*
* Records a "get" of a key.
*/
-void stats_prefix_record_get(const char *key, const bool is_hit) {
+void stats_prefix_record_get(const char *key, const size_t nkey, const size_t nbytes, const bool is_hit) {
PREFIX_STATS *pfs;
- pfs = stats_prefix_find(key);
+ pfs = stats_prefix_find(key, nkey);
if (NULL != pfs) {
pfs->num_gets++;
if (is_hit) {
pfs->num_hits++;
+ pfs->bytes_txed += nbytes;
}
}
}
@@ -175,10 +178,10 @@ void stats_prefix_record_get(const char *key, const bool is_hit) {
/*
* Records a "delete" of a key.
*/
-void stats_prefix_record_delete(const char *key) {
+void stats_prefix_record_delete(const char *key, const size_t nkey) {
PREFIX_STATS *pfs;
- pfs = stats_prefix_find(key);
+ pfs = stats_prefix_find(key, nkey);
if (NULL != pfs) {
pfs->num_deletes++;
}
@@ -187,10 +190,10 @@ void stats_prefix_record_delete(const char *key) {
/*
* Records a "set" of a key.
*/
-void stats_prefix_record_set(const char *key) {
+void stats_prefix_record_set(const char *key, const size_t nkey) {
PREFIX_STATS *pfs;
- pfs = stats_prefix_find(key);
+ pfs = stats_prefix_find(key, nkey);
if (NULL != pfs) {
/* item count cannot be incremented here because the set/add/replace may
* yet fail. */
@@ -201,61 +204,66 @@ void stats_prefix_record_set(const char *key) {
/*
* Records the change in byte total due to a "set" of a key.
*/
-void stats_prefix_record_byte_total_change(char *key, long bytes) {
+void stats_prefix_record_byte_total_change(const char *key, const size_t nkey, long bytes, int prefix_stats_flags) {
PREFIX_STATS *pfs;
- pfs = stats_prefix_find(key);
+ pfs = stats_prefix_find(key, nkey);
if (NULL != pfs) {
rel_time_t now = current_time;
- /*
- * increment total byte-seconds to reflect time elapsed since last
- * update.
- */
- pfs->total_byte_seconds += pfs->num_bytes * (now - pfs->last_update);
+ if (now != pfs->last_update) {
+ /*
+ * increment total byte-seconds to reflect time elapsed since last
+ * update.
+ */
+ pfs->total_byte_seconds += pfs->num_bytes * (now - pfs->last_update);
- /*
- * increment total lifetime to reflect time elapsed since last update.
- */
- pfs->total_lifetime += pfs->num_items * (now - pfs->last_update);
- pfs->last_update = now;
+ pfs->last_update = now;
+ }
/* add the byte count of the object that we're booting out. */
pfs->num_bytes += bytes;
- /* increment item count. */
- pfs->num_items ++;
+ if (prefix_stats_flags & PREFIX_INCR_ITEM_COUNT) {
+ /* increment item count. */
+ pfs->num_items ++;
+ }
+ if (prefix_stats_flags & PREFIX_IS_OVERWRITE) {
+ /* increment overwrite count. */
+ pfs->num_overwrites ++;
+ }
}
}
/*
* Records a "removal" of a key.
*/
-void stats_prefix_record_removal(char *key, size_t bytes, rel_time_t time, long flags) {
+void stats_prefix_record_removal(const char *key, const size_t nkey, size_t bytes, rel_time_t time, long flags) {
PREFIX_STATS *pfs;
- pfs = stats_prefix_find(key);
+ pfs = stats_prefix_find(key, nkey);
if (NULL != pfs) {
rel_time_t now = current_time;
if (flags & UNLINK_IS_EVICT) {
- pfs->num_evicts++;
+ pfs->num_evicts ++;
+ } else if (flags & UNLINK_IS_EXPIRED) {
+ pfs->num_expires ++;
}
- /*
- * increment total byte-seconds to reflect time elapsed since last
- * update.
- */
- pfs->total_byte_seconds += pfs->num_bytes * (now - pfs->last_update);
+ if (now != pfs->last_update) {
+ /*
+ * increment total byte-seconds to reflect time elapsed since last
+ * update.
+ */
+ pfs->total_byte_seconds += pfs->num_bytes * (now - pfs->last_update);
- /* increment total lifetime to reflect time elapsed since last update. */
- pfs->total_lifetime += pfs->num_items * (now - pfs->last_update);
- pfs->last_update = now;
+ pfs->last_update = now;
+ }
/* remove the byte count and the lifetime of the object that we're
* booting out. */
pfs->num_bytes -= bytes;
- pfs->total_lifetime -= (now - time);
/* increment item count. */
pfs->num_items --;
@@ -267,18 +275,19 @@ void stats_prefix_record_removal(char *key, size_t bytes, rel_time_t time, long
*/
/*@null@*/
char *stats_prefix_dump(int *length) {
- const char *format = "PREFIX %s item %u get %" PRINTF_INT64_MODIFIER \
+ const char *format = "PREFIX %*s item %u get %" PRINTF_INT64_MODIFIER \
"u hit %" PRINTF_INT64_MODIFIER "u set %" PRINTF_INT64_MODIFIER \
"u del %" PRINTF_INT64_MODIFIER "u evict %" PRINTF_INT64_MODIFIER \
- "u bytes %" PRINTF_INT64_MODIFIER "u avrg lifetime %" PRINTF_INT64_MODIFIER \
+ "u ov %" PRINTF_INT64_MODIFIER "u exp %" PRINTF_INT64_MODIFIER \
+ "u bytes %" PRINTF_INT64_MODIFIER "u txed %" PRINTF_INT64_MODIFIER \
"u byte-seconds %" PRINTF_INT64_MODIFIER "u\r\n";
PREFIX_STATS *pfs;
char *buf;
int i;
size_t size, offset = 0;
- uint64_t lifetime;
const int format_len = sizeof("%" PRINTF_INT64_MODIFIER "u") - sizeof("");
char terminator[] = "END\r\n";
+ char wildcard_name[] = "*wildcard*";
rel_time_t now = current_time;
/*
@@ -288,10 +297,10 @@ char *stats_prefix_dump(int *length) {
* plus space for the "END" at the end.
*/
STATS_LOCK();
- size = strlen(format) + total_prefix_size +
- (num_prefixes + 1) * (strlen(format) - 2 /* %s */
- + 9 * (20 - format_len)) /* %llu replaced by 20-digit num */
- + sizeof("*wildcard*")
+ size = total_prefix_size +
+ (num_prefixes + 1) * (strlen(format)
+ + 11 * (20 - format_len)) /* %llu replaced by 20-digit num */
+ + sizeof(wildcard_name)
+ sizeof("END\r\n");
buf = malloc(size);
if (NULL == buf) {
@@ -303,57 +312,39 @@ char *stats_prefix_dump(int *length) {
for (i = 0; i < PREFIX_HASH_SIZE; i++) {
for (pfs = prefix_stats[i]; NULL != pfs; pfs = pfs->next) {
/*
- * increment total lifetime to reflect time elapsed since last update.
- * item count cannot be incremented here because the set/add/replace may
- * fail.
+ * increment total byte-seconds to reflect time elapsed since last
+ * update.
*/
- pfs->total_lifetime += pfs->num_items * (now - pfs->last_update);
pfs->total_byte_seconds += pfs->num_bytes * (now - pfs->last_update);
pfs->last_update = now;
- if (pfs->num_items == 0) {
- lifetime = 0;
- } else {
- lifetime = pfs->total_lifetime / pfs->num_items;
- }
-
offset = append_to_buffer(buf, size, offset, sizeof(terminator),
- format,
+ format, pfs->prefix_len,
pfs->prefix, pfs->num_items, pfs->num_gets, pfs->num_hits,
pfs->num_sets, pfs->num_deletes, pfs->num_evicts,
- pfs->num_bytes, lifetime, pfs->total_byte_seconds);
+ pfs->num_overwrites, pfs->num_expires,
+ pfs->num_bytes, pfs->bytes_txed,
+ pfs->total_byte_seconds);
}
}
/*
- * increment total lifetime to reflect time elapsed since last update.
- * item count cannot be incremented here because the set/add/replace may
- * fail.
+ * increment total byte-seconds to reflect time elapsed since last update.
*/
- wildcard.total_lifetime += wildcard.num_items * (now - wildcard.last_update);
wildcard.total_byte_seconds += wildcard.num_bytes * (now - wildcard.last_update);
wildcard.last_update = now;
- if (wildcard.num_items == 0) {
- lifetime = 0;
- } else {
- lifetime = wildcard.total_lifetime / wildcard.num_items;
- }
-
- if (wildcard.num_items != 0 ||
- wildcard.num_gets != 0 ||
- wildcard.num_hits != 0 ||
+ if (wildcard.num_gets != 0 ||
wildcard.num_sets != 0 ||
- wildcard.num_deletes != 0 ||
- wildcard.num_evicts != 0 ||
- wildcard.num_bytes != 0 ||
- lifetime != 0 ||
- wildcard.total_byte_seconds != 0) {
+ wildcard.num_deletes != 0) {
offset = append_to_buffer(buf, size, offset, sizeof(terminator),
- format,
- "*wildcard*", wildcard.num_items, wildcard.num_gets, wildcard.num_hits,
+ format, sizeof(wildcard_name) - 1,
+ wildcard_name, wildcard.num_items,
+ wildcard.num_gets, wildcard.num_hits,
wildcard.num_sets, wildcard.num_deletes, wildcard.num_evicts,
- wildcard.num_bytes, lifetime, wildcard.total_byte_seconds);
+ wildcard.num_overwrites, wildcard.num_expires,
+ wildcard.num_bytes, wildcard.bytes_txed,
+ wildcard.total_byte_seconds);
}
STATS_UNLOCK();
View
16 stats.h
@@ -5,14 +5,20 @@
#include <assert.h>
+typedef enum prefix_stats_flags_e prefix_stats_flags_t;
+enum prefix_stats_flags_e {
+ PREFIX_INCR_ITEM_COUNT = 0x1,
+ PREFIX_IS_OVERWRITE = 0x2,
+};
+
/* stats */
extern void stats_prefix_init(void);
extern void stats_prefix_clear(void);
-extern void stats_prefix_record_get(const char *key, const bool is_hit);
-extern void stats_prefix_record_delete(const char *key);
-extern void stats_prefix_record_set(const char *key);
-extern void stats_prefix_record_byte_total_change(char *key, long bytes);
-extern void stats_prefix_record_removal(char *key, size_t bytes, rel_time_t time, long flags);
+extern void stats_prefix_record_get(const char *key, const size_t nkey, const size_t nbytes, const bool is_hit);
+extern void stats_prefix_record_delete(const char *key, const size_t nkey);
+extern void stats_prefix_record_set(const char *key, const size_t nkey);
+extern void stats_prefix_record_byte_total_change(const char *key, const size_t nkey, const long bytes, const int prefix_stats_flags);
+extern void stats_prefix_record_removal(const char *key, const size_t nkey, const size_t bytes, const rel_time_t time, const long flags);
/*@null@*/
extern char *stats_prefix_dump(int *length);
View
2  t/lib/MemcachedTest.pm
@@ -82,7 +82,7 @@ sub new_memcached {
my $args = shift || "";
my $port = free_port();
my $udpport = free_port("udp");
- $args .= " -p $port";
+ $args .= " -l 127.0.0.1 -p $port";
if (supports_udp()) {
$args .= " -U $udpport";
}
View
5 thread.c
@@ -469,11 +469,12 @@ int mt_defer_delete(item *item, time_t exptime) {
/*
* Does arithmetic on a numeric item value.
*/
-char *mt_add_delta(item *item, int incr, const unsigned int delta, char *buf, uint32_t *res, const struct in_addr addr) {
+char *mt_add_delta(const char* key, const size_t nkey, const int incr, const unsigned int delta,
+ char *buf, uint32_t *res, const struct in_addr addr) {
char *ret;
pthread_mutex_lock(&cache_lock);
- ret = do_add_delta(item, incr, delta, buf, res, addr);
+ ret = do_add_delta(key, nkey, incr, delta, buf, res, addr);
pthread_mutex_unlock(&cache_lock);
return ret;
}
Please sign in to comment.
Something went wrong with that request. Please try again.