Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

the flat allocator uses space inefficiently when there are keys that …

…don't fit in a small chunk

Summary: If we size small chunks too big, we're wasting space on small items.  If we size small chunks too small, then keys that don't fit in a small chunk get autopromoted to a large chunk.  That's pretty wasteful too.  So this change allows us to split keys across multiple chunks.
         
         minor tweaks:
         - use memcmp instead of strncmp (so I can stop having to look up the definition of strncmp with respect to null termination).
         - #defined the format string for stats_prefix_dump so we can get compiler warnings when the format makes no sense.

Reviewed By: ps

Test Plan: libmcc test/test.py passes.
           flat storage unit tests pass.
           stress test passes.
           ran in production for a few days without any issues.

Revert: OK


git-svn-id: http://svn.facebook.com/svnroot/projects/memcached/trunk@112894 2c7ba8d8-a2f7-0310-a573-de162e16dcc7
  • Loading branch information...
commit 65292cdcc2baed6e52ee5abbcc4abe93d8063c91 1 parent 90e3f18
ttung authored
View
127 assoc.c
@@ -505,8 +505,7 @@ item *assoc_find(const char *key, const size_t nkey) {
}
while (iptr) {
- if ((nkey == ITEM_nkey(ITEM(iptr))) &&
- (memcmp(key, ITEM_key(ITEM(iptr)), nkey) == 0)) {
+ if (item_key_compare(ITEM(iptr), key, nkey) == 0) {
return ITEM(iptr);
}
iptr = ITEM_PTR_h_next(iptr);
@@ -530,12 +529,58 @@ static item_ptr_t* _hashitem_before (const char *key, const size_t nkey) {
pos = &primary_hashtable[hv & hashmask(hashpower)];
}
- while (*pos && ((nkey != ITEM_nkey(ITEM(*pos))) || memcmp(key, ITEM_key(ITEM(*pos)), nkey))) {
+ while (*pos && item_key_compare(ITEM(*pos), key, nkey)) {
pos = ITEM_h_next_p(ITEM(*pos));
}
return pos;
}
+
+/* returns the address of the item pointer before it. if *item == 0,
+ the item wasn't found */
+static item_ptr_t* _hashitem_before_item (item* it) {
+ /* this is one of the few times we totally break the storage layer
+ * abstraction. the only way we could do this cleanly is to either:
+ *
+ * 1) Have the flat allocator malloc a block of memory and use that to
+ * duplicate the key. This is inefficient because malloc is an overkill
+ * for this.
+ *
+ * 2) Have the slab allocator copy out the key as well. This is
+ * inefficient because it is totally unnecessary and unfairly punishes
+ * the slab allocator.
+ */
+#if defined(USE_FLAT_ALLOCATOR)
+ char key_temp[KEY_MAX_LENGTH];
+#endif /* #if defined(USE_FLAT_ALLOCATOR) */
+ const char* key;
+ uint32_t hv;
+ item_ptr_t* pos;
+ unsigned int oldbucket;
+
+#if defined(USE_FLAT_ALLOCATOR)
+ key = item_key_copy(it, key_temp);
+#endif /* #if defined(USE_FLAT_ALLOCATOR) */
+#if defined(USE_SLAB_ALLOCATOR)
+ key = ITEM_key(it);
+#endif /* #if defined(USE_SLAB_ALLOCATOR) */
+ hv = hash(key, ITEM_nkey(it), 0);
+
+ if (expanding &&
+ (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket)
+ {
+ pos = &old_hashtable[oldbucket];
+ } else {
+ pos = &primary_hashtable[hv & hashmask(hashpower)];
+ }
+
+ while (*pos && (ITEM(*pos) != it)) {
+ pos = ITEM_h_next_p(ITEM(*pos));
+ }
+ return pos;
+}
+
+
/* grows the hashtable to the next power of 2. */
static void assoc_expand(void) {
old_hashtable = primary_hashtable;
@@ -558,12 +603,34 @@ static void assoc_expand(void) {
void do_assoc_move_next_bucket(void) {
item_ptr_t iptr, next;
int bucket;
+ /* this is one of the few times we totally break the storage layer
+ * abstraction. the only way we could do this cleanly is to either:
+ *
+ * 1) Have the flat allocator malloc a block of memory and use that to
+ * duplicate the key. This is inefficient because malloc is an overkill
+ * for this.
+ *
+ * 2) Have the slab allocator copy out the key as well. This is
+ * inefficient because it is totally unnecessary and unfairly punishes
+ * the slab allocator.
+ */
+#if defined(USE_FLAT_ALLOCATOR)
+ char key_temp[KEY_MAX_LENGTH];
+#endif /* #if defined(USE_FLAT_ALLOCATOR) */
+ const char* key;
if (expanding) {
for (iptr = old_hashtable[expand_bucket]; ITEM_PTR_IS_NULL(iptr); iptr = next) {
next = ITEM_PTR_h_next(iptr);
- bucket = hash(ITEM_key(ITEM(iptr)), ITEM_nkey(ITEM(iptr)), 0) & hashmask(hashpower);
+#if defined(USE_FLAT_ALLOCATOR)
+ key = item_key_copy(ITEM(iptr), key_temp);
+#endif /* #if defined(USE_FLAT_ALLOCATOR) */
+#if defined(USE_SLAB_ALLOCATOR)
+ key = ITEM_key(ITEM(iptr));
+#endif /* #if defined(USE_SLAB_ALLOCATOR) */
+
+ bucket = hash(key, ITEM_nkey(ITEM(iptr)), 0) & hashmask(hashpower);
ITEM_set_h_next(ITEM(iptr), primary_hashtable[bucket]);
primary_hashtable[bucket] = iptr;
}
@@ -583,13 +650,13 @@ void do_assoc_move_next_bucket(void) {
}
/* Note: this isn't an assoc_update. The key must not already exist to call this */
-int assoc_insert(item *it) {
+int assoc_insert(item *it, const char* key) {
uint32_t hv;
unsigned int oldbucket;
- assert(assoc_find(ITEM_key(it), ITEM_nkey(it)) == 0); /* shouldn't have duplicately named things defined */
+ assert(assoc_find(key, ITEM_nkey(it)) == 0); /* shouldn't have duplicately named things defined */
- hv = hash(ITEM_key(it), ITEM_nkey(it), 0);
+ hv = hash(key, ITEM_nkey(it), 0);
if (expanding &&
(oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket)
{
@@ -612,17 +679,12 @@ int assoc_insert(item *it) {
/* given item it, replace the mapping from (ITEM_key(it), ITEM_nkey(it)) ->
* old_it with (ITEM_key(it), ITEM_nkey(it)) -> it. returns old_it.
*/
-item* assoc_update(item *it) {
- item_ptr_t* before = _hashitem_before(ITEM_key(it), ITEM_nkey(it));
- item* old_it;
-
- assert(before != NULL);
-
- old_it = ITEM(*before);
+void assoc_update(item* old_it, item *it) {
+ item_ptr_t* before = _hashitem_before_item(old_it);
+ assert(before != NULL &&
+ ITEM(*before) == old_it);
*before = ITEM_PTR(it);
-
- return old_it;
}
@@ -646,12 +708,34 @@ int do_assoc_expire_regex(char *pattern) {
regex_t regex;
int bucket;
item_ptr_t iptr;
+ /* this is one of the few times we totally break the storage layer
+ * abstraction. the only way we could do this cleanly is to either:
+ *
+ * 1) Have the flat allocator malloc a block of memory and use that to
+ * duplicate the key. This is inefficient because malloc is an overkill
+ * for this.
+ *
+ * 2) Have the slab allocator copy out the key as well. This is
+ * inefficient because it is totally unnecessary and unfairly punishes
+ * the slab allocator.
+ */
+#if defined(USE_FLAT_ALLOCATOR)
+ char key_temp[KEY_MAX_LENGTH];
+#endif /* #if defined(USE_FLAT_ALLOCATOR) */
+ const char* key;
if (regcomp(&regex, pattern, REG_EXTENDED | REG_NOSUB))
return 0;
for (bucket = 0; bucket < hashsize(hashpower); bucket++) {
for (iptr = primary_hashtable[bucket]; ITEM_PTR_IS_NULL(iptr); iptr = ITEM_PTR_h_next(iptr)) {
- if (regexec(&regex, ITEM_key(ITEM(iptr)), 0, NULL, 0) == 0) {
+#if defined(USE_FLAT_ALLOCATOR)
+ key = item_key_copy(ITEM(iptr), key_temp);
+#endif /* #if defined(USE_FLAT_ALLOCATOR) */
+#if defined(USE_SLAB_ALLOCATOR)
+ key = ITEM_key(ITEM(iptr));
+#endif /* #if defined(USE_SLAB_ALLOCATOR) */
+
+ if (regexec(&regex, key, 0, NULL, 0) == 0) {
/* the item matches; mark it expired. */
ITEM_set_exptime(ITEM(iptr), 1);
}
@@ -660,7 +744,14 @@ int do_assoc_expire_regex(char *pattern) {
if (expanding) {
for (bucket = expand_bucket; bucket < hashsize(hashpower-1); bucket++) {
for (iptr = old_hashtable[bucket]; ITEM_PTR_IS_NULL(iptr); iptr = ITEM_PTR_h_next(iptr)) {
- if (regexec(&regex, ITEM_key(ITEM(iptr)), 0, NULL, 0) == 0) {
+#if defined(USE_FLAT_ALLOCATOR)
+ key = item_key_copy(ITEM(iptr), key_temp);
+#endif /* #if defined(USE_FLAT_ALLOCATOR) */
+#if defined(USE_SLAB_ALLOCATOR)
+ key = ITEM_key(ITEM(iptr));
+#endif /* #if defined(USE_SLAB_ALLOCATOR) */
+
+ if (regexec(&regex, key, 0, NULL, 0) == 0) {
/* the item matches; mark it expired. */
ITEM_set_exptime(ITEM(iptr), 1);
}
View
4 assoc.h
@@ -7,8 +7,8 @@
/* associative array */
void assoc_init(void);
item *assoc_find(const char *key, const size_t nkey);
-int assoc_insert(item *item);
-item* assoc_update(item *it);
+int assoc_insert(item *item, const char* key);
+void assoc_update(item* old_it, item *it);
void assoc_delete(const char *key, const size_t nkey);
void do_assoc_move_next_bucket(void);
uint32_t hash( const void *key, size_t length, const uint32_t initval);
View
12 binary_sm.c
@@ -531,7 +531,7 @@ static inline bp_handler_res_t handle_direct_receive(conn* c)
}
if (settings.verbose > 1) {
- fprintf(stderr, ">%d receiving key %*s\n", c->sfd,
+ fprintf(stderr, ">%d receiving key %.*s\n", c->sfd,
c->u.key_value_req.keylen, c->bp_key);
}
@@ -868,13 +868,13 @@ static void handle_get_cmd(conn* c)
ITEM_nbytes(it)); // chop off the '\r\n'
if (add_iov(c, rep, sizeof(value_rep_t), true) ||
- add_item_to_iov(c, it, false /* don't send cr-lf */)) {
+ add_item_value_to_iov(c, it, false /* don't send cr-lf */)) {
bp_write_err_msg(c, "couldn't build response");
return;
}
if (settings.verbose > 1) {
- fprintf(stderr, ">%d sending key %s\n", c->sfd, ITEM_key(it));
+ fprintf(stderr, ">%d sending key %*s\n", c->sfd, (int) nkey, c->bp_key);
}
} else {
if (c->u.key_req.cmd == BP_GET_CMD) {
@@ -944,9 +944,9 @@ static void handle_update_cmd(conn* c)
}
if (settings.verbose > 1) {
- fprintf(stderr, ">%d received key %s\n", c->sfd, c->bp_key);
+ fprintf(stderr, ">%d received key %*s\n", c->sfd, c->u.key_value_req.keylen, c->bp_key);
}
- if (store_item(it, comm)) {
+ if (store_item(it, comm, c->bp_key)) {
rep->status = mcc_res_stored;
} else {
rep->status = mcc_res_notstored;
@@ -1003,7 +1003,7 @@ static void handle_delete_cmd(conn* c)
stats_delete(ITEM_nkey(it) + ITEM_nbytes(it));
STATS_UNLOCK();
- item_unlink(it, UNLINK_NORMAL);
+ item_unlink(it, UNLINK_NORMAL, c->bp_key);
item_deref(it); // release our reference
rep->status = mcc_res_deleted;
} else {
View
2  configure.ac
@@ -1,5 +1,5 @@
AC_PREREQ(2.52)
-AC_INIT(memcached, 1.2.3d-cr4, brad@danga.com)
+AC_INIT(memcached, 1.2.3d, brad@danga.com)
AC_CANONICAL_SYSTEM
AC_CONFIG_SRCDIR(memcached.c)
AM_INIT_AUTOMAKE(AC_PACKAGE_NAME, AC_PACKAGE_VERSION)
View
135 flat_storage.c
@@ -551,11 +551,12 @@ static coalesce_progress_t coalesce_free_small_chunks(void) {
replacement_chunkptr = get_chunkptr(_replacement);
if (iter->flags & SMALL_CHUNK_TITLE) {
- item* new_it;
+ item* new_it, * old_it;
chunk_t* next, * prev;
small_chunk_t* next_chunk;
new_it = get_item_from_small_title(&(replacement->sc_title));
+ old_it = get_item_from_small_title(&(iter->sc_title));
/* edit the forward and backward links. */
if (replacement->sc_title.next != NULL_CHUNKPTR) {
@@ -587,7 +588,7 @@ static coalesce_progress_t coalesce_free_small_chunks(void) {
replacement->flags |= (SMALL_CHUNK_USED | SMALL_CHUNK_TITLE);
/* do the replacement in the mapping. */
- assoc_update(new_it);
+ assoc_update(old_it, new_it);
} else {
/* body block. this is more straightforward */
small_chunk_t* prev_chunk = &(get_chunk_address(replacement->sc_body.prev_chunk))->sc;
@@ -643,7 +644,7 @@ static bool flat_storage_lru_evict(chunk_type_t chunk_type, size_t nchunks) {
/* nothing to release, so we just fail. */
return false;
}
- do_item_unlink(lru_item, UNLINK_MAYBE_EVICT);
+ do_item_unlink(lru_item, UNLINK_MAYBE_EVICT, NULL);
/* do we have enough free chunks to leave this loop? */
switch (chunk_type) {
@@ -714,7 +715,7 @@ void item_memcpy_to(item* it, size_t offset, const void* src, size_t nbytes,
memcpy((ptr), src, bytes); \
src += bytes;
- ITEM_WALK(it, offset, nbytes, beyond_item_boundary, MEMCPY_TO_APPLIER, );
+ ITEM_WALK(it, it->empty_header.nkey + offset, nbytes, beyond_item_boundary, MEMCPY_TO_APPLIER, );
#undef MEMCPY_TO_APPLIER
}
@@ -725,12 +726,35 @@ void item_memcpy_from(void* dst, const item* it, size_t offset, size_t nbytes,
memcpy(dst, (ptr), bytes); \
dst += bytes;
- ITEM_WALK(it, offset, nbytes, beyond_item_boundary, MEMCPY_FROM_APPLIER, const);
+ ITEM_WALK(it, it->empty_header.nkey + offset, nbytes, beyond_item_boundary, MEMCPY_FROM_APPLIER, const);
#undef MEMCPY_FROM_APPLIER
}
+int item_key_compare(const item* it, const char* key, const size_t nkey) {
+ if (nkey != it->empty_header.nkey) {
+ return it->empty_header.nkey - nkey;
+ }
+
+#define ITEM_KEY_COMPARE_APPLIER(it, ptr, bytes) \
+ do { \
+ int retval; \
+ \
+ if ((retval = memcmp(ptr, key, bytes)) != 0) { \
+ return retval; \
+ } \
+ \
+ key += bytes; \
+ } while (0);
+
+ ITEM_WALK(it, 0, nkey, 0, ITEM_KEY_COMPARE_APPLIER, const);
+#undef ITEM_KEY_COMPARE_APPLIER
+
+ return 0;
+}
+
+
void do_try_item_stamp(item* it, rel_time_t now, const struct in_addr addr) {
int slack;
size_t offset = 0;
@@ -788,6 +812,7 @@ item* do_item_alloc(const char *key, const size_t nkey, const int flags, const r
large_body_chunk_t* body;
chunkptr_t* prev_next;
size_t write_offset = nkey + nbytes;
+ size_t key_left = nkey, key_write;
while (fsi.large_free_list_sz < needed) {
assert(prev_free != fsi.large_free_list_sz);
@@ -826,11 +851,15 @@ item* do_item_alloc(const char *key, const size_t nkey, const int flags, const r
title->it_flags = ITEM_VALID;
title->nkey = nkey;
title->nbytes = nbytes;
- memcpy(title->data, key, nkey);
title->exptime = exptime;
title->flags = flags;
prev_next = &title->next_chunk;
+ key_write = __fs_MIN(LARGE_TITLE_CHUNK_DATA_SZ, key_left);
+ memcpy(title->data, key, key_write);
+ key_left -= key_write;
+ key += key_write;
+
if (needed == 1) {
title->it_flags |= do_stamp_on_block(title->data, write_offset, LARGE_TITLE_CHUNK_DATA_SZ,
current_time, addr);
@@ -851,6 +880,11 @@ item* do_item_alloc(const char *key, const size_t nkey, const int flags, const r
*(prev_next) = get_chunkptr(temp);
prev_next = &body->next_chunk;
+ key_write = __fs_MIN(LARGE_BODY_CHUNK_DATA_SZ, key_left);
+ memcpy(body->data, key, key_write);
+ key_left -= key_write;
+ key += key_write;
+
if (needed == 1) {
title->it_flags |= do_stamp_on_block(body->data, write_offset, LARGE_BODY_CHUNK_DATA_SZ,
current_time, addr);
@@ -880,6 +914,7 @@ item* do_item_alloc(const char *key, const size_t nkey, const int flags, const r
chunkptr_t prev;
chunkptr_t* prev_next;
size_t write_offset = nkey + nbytes;
+ size_t key_left = nkey, key_write;
while (fsi.small_free_list_sz < needed) {
assert(small_prev_free != fsi.small_free_list_sz ||
@@ -919,12 +954,16 @@ item* do_item_alloc(const char *key, const size_t nkey, const int flags, const r
title->it_flags = ITEM_VALID;
title->nkey = nkey;
title->nbytes = nbytes;
- memcpy(title->data, key, nkey);
title->exptime = exptime;
title->flags = flags;
prev = get_chunkptr(temp);
prev_next = &title->next_chunk;
+ key_write = __fs_MIN(SMALL_TITLE_CHUNK_DATA_SZ, key_left);
+ memcpy(title->data, key, key_write);
+ key_left -= key_write;
+ key += key_write;
+
if (needed == 1) {
title->it_flags |= do_stamp_on_block(title->data, write_offset, SMALL_TITLE_CHUNK_DATA_SZ,
current_time, addr);
@@ -950,6 +989,11 @@ item* do_item_alloc(const char *key, const size_t nkey, const int flags, const r
prev_next = &body->next_chunk;
prev = current_chunkptr;
+ key_write = __fs_MIN(SMALL_BODY_CHUNK_DATA_SZ, key_left);
+ memcpy(body->data, key, key_write);
+ key_left -= key_write;
+ key += key_write;
+
if (needed == 1) {
title->it_flags |= do_stamp_on_block(body->data, write_offset, SMALL_BODY_CHUNK_DATA_SZ,
current_time, addr);
@@ -1121,13 +1165,13 @@ static void item_unlink_q(item* it) {
/**
* adds the item to the LRU.
*/
-int do_item_link(item* it) {
+int do_item_link(item* it, const char* key) {
assert(it->empty_header.it_flags & ITEM_VALID);
assert((it->empty_header.it_flags & ITEM_LINKED) == 0);
it->empty_header.it_flags |= ITEM_LINKED;
it->empty_header.time = current_time;
- assoc_insert(it);
+ assoc_insert(it, key);
STATS_LOCK();
stats.item_total_size += ITEM_nkey(it) + ITEM_nbytes(it);
@@ -1141,7 +1185,18 @@ int do_item_link(item* it) {
}
-void do_item_unlink(item* it, long flags) {
+/*
+ * unlink an item from the LRU and the assoc table. because there is a race
+ * condition between item_get(..) and item_unlink(..) in
+ * process_delete_command(..), we must use the key to look up in the assoc table
+ * to ensure that we are deleting the correct item.
+ */
+void do_item_unlink(item* it, long flags, const char* key) {
+ char key_temp[KEY_MAX_LENGTH];
+ if (key == NULL) {
+ key = item_key_copy(it, key_temp);
+ }
+
assert(it->empty_header.it_flags & ITEM_VALID);
/*
* this test (& ITEM_LINKED) must be here because the cache lock is not held
@@ -1173,10 +1228,10 @@ void do_item_unlink(item* it, long flags) {
stats_expire(ITEM_nkey(it) + ITEM_nbytes(it));
}
if (settings.detail_enabled) {
- stats_prefix_record_removal(ITEM_key(it), ITEM_nkey(it), ITEM_nkey(it) + ITEM_nbytes(it), it->empty_header.time, flags);
+ stats_prefix_record_removal(key, ITEM_nkey(it), ITEM_nkey(it) + ITEM_nbytes(it), it->empty_header.time, flags);
}
STATS_UNLOCK();
- assoc_delete(ITEM_key(it), ITEM_nkey(it));
+ assoc_delete(key, ITEM_nkey(it));
it->empty_header.h_next = NULL_ITEM_PTR;
item_unlink_q(it);
if (it->empty_header.refcount == 0) {
@@ -1216,17 +1271,15 @@ void do_item_update(item* it) {
}
}
-int do_item_replace(item* it, item* new_it) {
+int do_item_replace(item* it, item* new_it, const char* key) {
int retval;
- // though there might not be a current one if the other thread did a
- // delete.
assert((it->empty_header.it_flags & (ITEM_VALID | ITEM_LINKED)) ==
(ITEM_VALID | ITEM_LINKED));
- do_item_unlink(it, UNLINK_NORMAL);
+ do_item_unlink(it, UNLINK_NORMAL, key);
assert(new_it->empty_header.it_flags & ITEM_VALID);
- retval = do_item_link(new_it);
+ retval = do_item_link(new_it, key);
return retval;
}
@@ -1239,7 +1292,8 @@ char* do_item_cachedump(const chunk_type_t type, const unsigned int limit, unsig
unsigned int len;
unsigned int shown = 0;
char temp[512];
- char key_tmp[KEY_MAX_LENGTH + 1 /* for null terminator */];
+ char key_temp[KEY_MAX_LENGTH];
+ const char* key;
buffer = malloc((size_t)memlimit);
if (buffer == 0) return NULL;
@@ -1248,9 +1302,10 @@ char* do_item_cachedump(const chunk_type_t type, const unsigned int limit, unsig
it = fsi.lru_head;
while (it != NULL && (limit == 0 || shown < limit)) {
- memcpy(key_tmp, ITEM_key(it), ITEM_nkey(it));
- key_tmp[ITEM_nkey(it)] = 0; /* null terminate */
- len = snprintf(temp, sizeof(temp), "ITEM %s [%d b; %lu s]\r\n", key_tmp, ITEM_nbytes(it), it->empty_header.time + stats.started);
+ key = item_key_copy(it, key_temp);
+ len = snprintf(temp, sizeof(temp), "ITEM %*s [%d b; %lu s]\r\n",
+ ITEM_nkey(it), key,
+ ITEM_nbytes(it), it->empty_header.time + stats.started);
if (bufcurr + len + 6 > memlimit) /* 6 is END\r\n\0 */
break;
strcpy(buffer + bufcurr, temp);
@@ -1326,7 +1381,7 @@ void do_item_flush_expired(void) {
next = get_item_from_chunk(get_chunk_address(iter->small_title.next));
assert( (iter->empty_header.it_flags & (ITEM_VALID | ITEM_LINKED)) ==
(ITEM_VALID | ITEM_LINKED) );
- do_item_unlink(iter, UNLINK_IS_EXPIRED);
+ do_item_unlink(iter, UNLINK_IS_EXPIRED, NULL);
} else {
/* We've hit the first old item. Continue to the next queue. */
break;
@@ -1340,7 +1395,7 @@ void do_item_flush_expired(void) {
next = get_item_from_chunk(get_chunk_address(iter->large_title.next));
assert( (iter->empty_header.it_flags & (ITEM_VALID | ITEM_LINKED)) ==
(ITEM_VALID | ITEM_LINKED) );
- do_item_unlink(iter, UNLINK_IS_EXPIRED);
+ do_item_unlink(iter, UNLINK_IS_EXPIRED, NULL);
} else {
/* We've hit the first old item. Continue to the next queue. */
break;
@@ -1368,11 +1423,11 @@ item* do_item_get_notedeleted(const char* key, const size_t nkey, bool* delete_l
}
if (it != NULL && settings.oldest_live != 0 && settings.oldest_live <= current_time &&
it->empty_header.time <= settings.oldest_live) {
- do_item_unlink(it, UNLINK_IS_EXPIRED); /* MTSAFE - cache_lock held */
+ do_item_unlink(it, UNLINK_IS_EXPIRED, key); /* MTSAFE - cache_lock held */
it = NULL;
}
if (it != NULL && it->empty_header.exptime != 0 && it->empty_header.exptime <= current_time) {
- do_item_unlink(it, UNLINK_IS_EXPIRED); /* MTSAFE - cache_lock held */
+ do_item_unlink(it, UNLINK_IS_EXPIRED, key); /* MTSAFE - cache_lock held */
it = NULL;
}
@@ -1399,6 +1454,36 @@ bool item_delete_lock_over(item* it) {
}
+/**
+ * returns a pointer to the key, flattened into a single array. if the key
+ * spans multiple chunks, it is copied into space pointed to by keyptr.
+ * otherwise, the key is returned directly.
+ */
+const char* item_key_copy(const item* it, char* keyptr) {
+ const char* retval = keyptr;
+ size_t title_data_size;
+
+ if (is_item_large_chunk(it)) {
+ title_data_size = LARGE_TITLE_CHUNK_DATA_SZ;
+ if (it->large_title.nkey <= title_data_size) {
+ return &it->large_title.data[0];
+ }
+ } else {
+ title_data_size = SMALL_TITLE_CHUNK_DATA_SZ;
+ if (it->small_title.nkey <= title_data_size) {
+ return &it->small_title.data[0];
+ }
+ }
+#define ITEM_key_copy_applier(it, ptr, bytes) \
+ memcpy(keyptr, ptr, bytes); \
+ keyptr += bytes;
+
+ ITEM_WALK(it, 0, it->empty_header.nkey, false, ITEM_key_copy_applier, const);
+
+ return retval;
+}
+
+
char* do_flat_allocator_stats(size_t* result_size) {
size_t bufsize = 2048, offset = 0, i;
char* buffer = malloc(bufsize);
View
139 flat_storage.h
@@ -392,10 +392,6 @@ static inline void DEBUG_CLEAR(void* ptr, const size_t bytes) {
static inline bool is_large_chunk(const size_t nkey, const size_t nbytes) {
size_t small_chunks_max_size;
- if (nkey > SMALL_TITLE_CHUNK_DATA_SZ) {
- return true;
- }
-
// calculate how many bytes (SMALL_CHUNKS_PER_LARGE_CHUNK - 1) small chunks
// can hold. any larger and it is simpler and better to use a large chunk.
// note that one of the small chunks is taken up by the header.
@@ -447,24 +443,32 @@ static inline size_t chunks_in_item(const item* it) {
/* returns the number of chunks in the item. */
static inline size_t data_chunks_in_item(const item* it) {
size_t count = chunks_in_item(it);
- size_t title_data_size;
+ size_t key_only_chunks;
+ size_t title_data_sz;
/* if we have no data, return 0. */
if (it->empty_header.nbytes == 0) {
return 0;
}
+ /* exclude chunks taken up entirely by the key */
if (is_item_large_chunk(it)) {
- title_data_size = LARGE_TITLE_CHUNK_DATA_SZ;
+ title_data_sz = LARGE_TITLE_CHUNK_DATA_SZ;
+ if (it->empty_header.nkey < title_data_sz) {
+ key_only_chunks = 0;
+ } else {
+ key_only_chunks = 1 + ((it->empty_header.nkey - LARGE_TITLE_CHUNK_DATA_SZ) / LARGE_BODY_CHUNK_DATA_SZ);
+ }
} else {
- title_data_size = SMALL_TITLE_CHUNK_DATA_SZ;
+ title_data_sz = SMALL_TITLE_CHUNK_DATA_SZ;
+ if (it->empty_header.nkey < title_data_sz) {
+ key_only_chunks = 0;
+ } else {
+ key_only_chunks = 1 + ((it->empty_header.nkey - SMALL_TITLE_CHUNK_DATA_SZ) / SMALL_BODY_CHUNK_DATA_SZ);
+ }
}
- /* if the key takes the entirety of the title block, then we don't count
- * that one. */
- if (title_data_size == it->empty_header.nkey) {
- count --;
- }
+ count -= key_only_chunks;
return count;
}
@@ -612,13 +616,6 @@ static inline const chunk_t* get_chunk_from_small_chunk_const(const small_chunk_
static inline item* ITEM(item_ptr_t iptr) { return get_item_from_chunk(get_chunk_address( (chunkptr_t) iptr)); }
static inline item_ptr_t ITEM_PTR(item* it) { return (item_ptr_t) get_chunkptr(get_chunk_from_item(it)); }
static inline bool ITEM_PTR_IS_NULL(item_ptr_t iptr) { return iptr != NULL_ITEM_PTR; }
-static inline char* ITEM_key(item* it) {
- if (is_item_large_chunk(it)) {
- return it->large_title.data;
- } else {
- return it->small_title.data;
- }
-}
static inline uint8_t ITEM_nkey(item* it) { return it->empty_header.nkey; }
static inline int ITEM_nbytes(item* it) { return it->empty_header.nbytes; }
@@ -676,6 +673,7 @@ static inline void ITEM_clear_has_ip_address(item* it) { it->empty_header.it_f
extern void flat_storage_init(size_t maxbytes);
extern char* do_item_cachedump(const chunk_type_t type, const unsigned int limit, unsigned int *bytes);
+extern const char* item_key_copy(const item* it, char* keyptr);
DECL_MT_FUNC(char*, flat_allocator_stats, (size_t* bytes));
@@ -722,57 +720,28 @@ static inline size_t __fs_MAX(size_t a, size_t b) {
/* these are the offsets to the start of the data value. */ \
size_t start_offset, end_offset; \
size_t left = (_nbytes); /* bytes left to copy */ \
- /* this is a stupid kludge because if we directly test */ \
- /* nkey < LARGE_TITLE_CHUNK_DATA_SZ, it might always return */ \
- /* true. this offends the compiler, so here we go. */ \
- size_t title_data_size; \
\
- assert((_it)->empty_header.nbytes >= (_offset) + (_nbytes) || (_beyond_item_boundary)); \
+ assert((_it)->empty_header.nkey + (_it)->empty_header.nbytes >= \
+ (_offset) + (_nbytes) || (_beyond_item_boundary)); \
\
- /* if we have no bytes in the item and we have no bytes to */ \
- /* copy, then skip. */ \
- if ((_it)->empty_header.nbytes == 0 && \
- left == 0) { \
+ /* if we have no to copy, then skip. */ \
+ if (left == 0) { \
break; \
} \
\
if (is_item_large_chunk((_it))) { \
/* large chunk handling code. */ \
\
- title_data_size = LARGE_TITLE_CHUNK_DATA_SZ; \
- /* is there any data in the title block? */ \
- if ((_it)->empty_header.nkey < title_data_size) { \
- /* some data in the title block. */ \
- next = get_chunk_address((_it)->empty_header.next_chunk); \
- ptr = &(_it)->large_title.data[(_it)->empty_header.nkey]; \
- start_offset = 0; \
- if (next == NULL && (_beyond_item_boundary)) { \
- end_offset = LARGE_TITLE_CHUNK_DATA_SZ - \
- ((_it)->empty_header.nkey) - 1; \
- } else { \
- end_offset = __fs_MIN((_offset) + (_nbytes), \
- start_offset + LARGE_TITLE_CHUNK_DATA_SZ - ((_it)->empty_header.nkey)) - 1; \
- } \
- to_scan = end_offset - start_offset + 1; \
+ next = get_chunk_address((_it)->empty_header.next_chunk); \
+ ptr = &(_it)->large_title.data[0]; \
+ start_offset = 0; \
+ if (next == NULL && (_beyond_item_boundary)) { \
+ end_offset = LARGE_TITLE_CHUNK_DATA_SZ - 1; \
} else { \
- /* no data in the title block, that means the key is */ \
- /* exactly the same size as */ \
- /* LARGE_TITLE_CHUNK_DATA_SZ. */ \
- next = get_chunk_address((_it)->empty_header.next_chunk); \
- assert( (LARGE_CHUNK_INITIALIZED | LARGE_CHUNK_USED) == next->lc.flags ); \
- ptr = next->lc.lc_body.data; \
- start_offset = 0; \
- \
- /* move the next pointer. */ \
- next = get_chunk_address(next->lc.lc_body.next_chunk); \
- if (next == NULL && (_beyond_item_boundary)) { \
- end_offset = LARGE_BODY_CHUNK_DATA_SZ - 1; \
- } else { \
- end_offset = __fs_MIN((_offset) + (_nbytes), \
- LARGE_BODY_CHUNK_DATA_SZ) - 1; \
- } \
- to_scan = end_offset - start_offset + 1; \
+ end_offset = __fs_MIN((_offset) + (_nbytes), \
+ start_offset + LARGE_TITLE_CHUNK_DATA_SZ) - 1; \
} \
+ to_scan = end_offset - start_offset + 1; \
\
/* advance over pages writing while doing the requested action. */ \
do { \
@@ -808,46 +777,21 @@ static inline size_t __fs_MAX(size_t a, size_t b) {
start_offset + LARGE_BODY_CHUNK_DATA_SZ) - 1; \
} \
to_scan = end_offset - start_offset + 1; \
- } while (start_offset <= (_it)->empty_header.nbytes); \
+ } while (start_offset <= ((_it)->empty_header.nkey + \
+ (_it)->empty_header.nbytes)); \
} else { \
/* small chunk handling code. */ \
\
- title_data_size = SMALL_TITLE_CHUNK_DATA_SZ; \
- /* is there any data in the title block? */ \
- if ((_it)->empty_header.nkey < title_data_size) { \
- /* some data in the title block. */ \
- next = get_chunk_address((_it)->empty_header.next_chunk); \
- ptr = &(_it)->small_title.data[(_it)->empty_header.nkey]; \
- start_offset = 0; \
- if (next == NULL && (_beyond_item_boundary)) { \
- end_offset = SMALL_TITLE_CHUNK_DATA_SZ - \
- ((_it)->empty_header.nkey) - 1; \
- } else { \
- end_offset = __fs_MIN((_offset) + (_nbytes), \
- start_offset + SMALL_TITLE_CHUNK_DATA_SZ - ((_it)->empty_header.nkey)) - 1; \
- } \
- to_scan = end_offset - start_offset + 1; \
+ next = get_chunk_address((_it)->empty_header.next_chunk); \
+ ptr = &(_it)->small_title.data[0]; \
+ start_offset = 0; \
+ if (next == NULL && (_beyond_item_boundary)) { \
+ end_offset = SMALL_TITLE_CHUNK_DATA_SZ - 1; \
} else { \
- /* no data in the title block, that means the key is */ \
- /* exactly the same size as */ \
- /* SMALL_TITLE_CHUNK_DATA_SZ. */ \
- next = get_chunk_address((_it)->empty_header.next_chunk); \
- assert( (SMALL_CHUNK_INITIALIZED | SMALL_CHUNK_USED) == next->sc.flags ); \
- ptr = next->sc.sc_body.data; \
- start_offset = 0; \
- \
- /* move the next pointer. */ \
- next = get_chunk_address(next->sc.sc_body.next_chunk); \
- if (next == NULL && (_beyond_item_boundary)) { \
- end_offset = SMALL_BODY_CHUNK_DATA_SZ - 1; \
- } else { \
- end_offset = __fs_MIN((_offset) + (_nbytes), \
- start_offset + SMALL_BODY_CHUNK_DATA_SZ) - 1; \
- } \
- to_scan = end_offset - start_offset + 1; \
+ end_offset = __fs_MIN((_offset) + (_nbytes), \
+ start_offset + SMALL_TITLE_CHUNK_DATA_SZ) - 1; \
} \
- \
- /* printf(" leaving head region with end_offset = %ld\n", end_offset); */ \
+ to_scan = end_offset - start_offset + 1; \
\
/* advance over pages writing while doing the requested action. */ \
do { \
@@ -865,8 +809,6 @@ static inline size_t __fs_MAX(size_t a, size_t b) {
left -= work_len; \
} \
\
- /* printf(" left = %lu\n", left); */ \
- \
if (left == 0) { \
break; \
} \
@@ -886,7 +828,8 @@ static inline size_t __fs_MAX(size_t a, size_t b) {
} \
/* printf(" cycling start_offset = %ld, end_offset = %ld\n", start_offset, end_offset); */ \
to_scan = end_offset - start_offset + 1; \
- } while (start_offset <= (_it)->empty_header.nbytes); \
+ } while (start_offset <= ((_it)->empty_header.nkey + \
+ (_it)->empty_header.nbytes)); \
} \
assert(left == 0); \
} while (0);
View
29 flat_storage_support.h
@@ -21,7 +21,7 @@
#include "memcached.h"
#include "conn_buffer.h"
-static inline int add_item_to_iov(conn *c, const item* it, bool send_cr_lf) {
+static inline int add_item_value_to_iov(conn *c, const item* it, bool send_cr_lf) {
int retval;
#define ADD_ITEM_TO_IOV_APPLIER(it, ptr, bytes) \
@@ -29,7 +29,7 @@ static inline int add_item_to_iov(conn *c, const item* it, bool send_cr_lf) {
return retval; \
}
- ITEM_WALK(it, 0, it->empty_header.nbytes, false, ADD_ITEM_TO_IOV_APPLIER, const);
+ ITEM_WALK(it, it->empty_header.nkey, it->empty_header.nbytes, false, ADD_ITEM_TO_IOV_APPLIER, const);
#undef ADD_ITEM_TO_IOV_APPLIER
@@ -41,6 +41,22 @@ static inline int add_item_to_iov(conn *c, const item* it, bool send_cr_lf) {
}
+static inline int add_item_key_to_iov(conn *c, const item* it) {
+ int retval;
+
+#define ADD_ITEM_TO_IOV_APPLIER(it, ptr, bytes) \
+ if ((retval = add_iov(c, (ptr), (bytes), false)) != 0) { \
+ return retval; \
+ }
+
+ ITEM_WALK(it, 0, it->empty_header.nkey, false, ADD_ITEM_TO_IOV_APPLIER, const);
+
+#undef ADD_ITEM_TO_IOV_APPLIER
+
+ return 0;
+}
+
+
static inline size_t item_setup_receive(item* it, conn* c) {
struct iovec* current_iov;
size_t iov_len_required = data_chunks_in_item(it);
@@ -72,7 +88,7 @@ static inline size_t item_setup_receive(item* it, conn* c) {
current_iov->iov_len = bytes; \
current_iov ++;
- ITEM_WALK(it, 0, it->empty_header.nbytes, false, ITEM_SETUP_RECEIVE_APPLIER, )
+ ITEM_WALK(it, it->empty_header.nkey, it->empty_header.nbytes, false, ITEM_SETUP_RECEIVE_APPLIER, )
#undef ITEM_SETUP_RECEIVE_APPLIER
@@ -113,7 +129,7 @@ static inline int item_strtoul(const item* it, int base) {
} \
}
- ITEM_WALK(it, 0, it->empty_header.nbytes, false, ITEM_STRTOUL_APPLIER, const)
+ ITEM_WALK(it, it->empty_header.nkey, it->empty_header.nbytes, false, ITEM_STRTOUL_APPLIER, const)
#undef ITEM_STRTOUL_APPLIER
@@ -125,9 +141,10 @@ static inline void item_memset(item* it, size_t offset, int c, size_t nbytes) {
#define MEMSET_APPLIER(it, ptr, bytes) \
memset((ptr), c, bytes);
- ITEM_WALK(it, offset, nbytes, 0, MEMSET_APPLIER, );
+ ITEM_WALK(it, it->empty_header.nkey + offset, nbytes, 0, MEMSET_APPLIER, );
#undef MEMSETAPPLIER
}
+
#endif /* #if !defined(_flat_storage_support_h_) */
-#endif /* #if defined(USE_SLAB_ALLOCATOR) */
+#endif /* #if defined(USE_FLAT_ALLOCATOR) */
View
7 items.h
@@ -32,12 +32,12 @@ extern item* do_item_alloc(const char *key, const size_t nkey,
const struct in_addr addr);
extern bool item_size_ok(const size_t nkey, const int flags, const int nbytes);
-extern int do_item_link(item *it); /** may fail if transgresses limits */
-extern void do_item_unlink(item *it, long flags);
+extern int do_item_link(item *it, const char* key); /** may fail if transgresses limits */
+extern void do_item_unlink(item *it, long flags, const char* key);
extern void do_item_unlink_impl(item *it, long flags, bool to_freelist);
extern void do_item_deref(item *it);
extern void do_item_update(item *it); /** update LRU time to current and reposition */
-extern int do_item_replace(item *it, item *new_it);
+extern int do_item_replace(item *it, item *new_it, const char* key);
/*@null@*/
extern char* do_item_stats_sizes(int *bytes);
@@ -58,5 +58,6 @@ extern void item_memcpy_to(item* it, size_t offset, const void* src, size_t nbyt
bool beyond_item_boundary);
extern void item_memcpy_from(void* dst, const item* it, size_t offset, size_t nbytes,
bool beyond_item_boundary);
+extern int item_key_compare(const item* it, const char* key, const size_t nkey);
#endif /* #if !defined(_items_h_) */
View
43 memcached.c
@@ -898,7 +898,7 @@ static void complete_nread(conn* c) {
if (memcmp("\r\n", c->crlf, 2) != 0) {
out_string(c, "CLIENT_ERROR bad data chunk");
} else {
- if (store_item(it, comm)) {
+ if (store_item(it, comm, c->update_key)) {
out_string(c, "STORED");
} else {
out_string(c, "NOT_STORED");
@@ -915,11 +915,13 @@ static void complete_nread(conn* c) {
*
* Returns true if the item was stored.
*/
-int do_store_item(item *it, int comm) {
- char *key = ITEM_key(it);
+int do_store_item(item *it, int comm, const char* key) {
bool delete_locked = false;
- item *old_it = do_item_get_notedeleted(key, ITEM_nkey(it), &delete_locked);
+ item *old_it;
int stored = 0;
+ size_t nkey = ITEM_nkey(it);
+
+ old_it = do_item_get_notedeleted(key, nkey, &delete_locked);
if (old_it != NULL && comm == NREAD_ADD) {
/* add only adds a nonexistent item, but promote to head of LRU */
@@ -934,7 +936,7 @@ int do_store_item(item *it, int comm) {
that's in the namespace/LRU but wasn't returned by
item_get.... because we need to replace it */
if (delete_locked) {
- old_it = do_item_get_nocheck(key, ITEM_nkey(it));
+ old_it = do_item_get_nocheck(key, nkey);
}
STATS_LOCK();
@@ -944,7 +946,7 @@ int do_store_item(item *it, int comm) {
if (old_it != NULL) {
prefix_stats_flags |= PREFIX_IS_OVERWRITE;
}
- stats_prefix_record_byte_total_change(key, ITEM_nkey(it), ITEM_nkey(it) + ITEM_nbytes(it),
+ stats_prefix_record_byte_total_change(key, nkey, ITEM_nkey(it) + ITEM_nbytes(it),
prefix_stats_flags);
}
@@ -953,9 +955,9 @@ int do_store_item(item *it, int comm) {
STATS_UNLOCK();
if (old_it != NULL) {
- do_item_replace(old_it, it);
+ do_item_replace(old_it, it, key);
} else {
- do_item_link(it);
+ do_item_link(it, key);
}
stored = 1;
@@ -1105,7 +1107,7 @@ static void process_stat(conn* c, token_t *tokens, const size_t ntokens) {
offset = append_to_buffer(temp, bufsize, offset, sizeof(terminator), "STAT allocator slab\r\n");
#endif /* #if defined(USE_SLAB_ALLOCATOR) */
#if defined(USE_FLAT_ALLOCATOR)
- offset = append_to_buffer(temp, bufsize, offset, sizeof(terminator), "STAT allocator flat\r\n");
+ offset = append_to_buffer(temp, bufsize, offset, sizeof(terminator), "STAT allocator flat-sk\r\n");
#endif /* #if defined(USE_FLAT_ALLOCATOR) */
#ifndef WIN32
offset = append_to_buffer(temp, bufsize, offset, sizeof(terminator), "STAT rusage_user %ld.%06d\r\n", usage.ru_utime.tv_sec, (int) usage.ru_utime.tv_usec);
@@ -1530,14 +1532,15 @@ static inline void process_get_command(conn* c, token_t *tokens, size_t ntokens)
* " " + flags + " " + data length + "\r\n" + data (with \r\n)
*/
if (add_iov(c, "VALUE ", 6, true) != 0 ||
- add_iov(c, ITEM_key(it), ITEM_nkey(it), false) != 0 ||
+ add_item_key_to_iov(c, it) != 0 ||
add_iov(c, flags_len_string_start, flags_len_string_len, false) != 0 ||
- add_item_to_iov(c, it, true /* send cr-lf */) != 0)
+ add_item_value_to_iov(c, it, true /* send cr-lf */) != 0)
{
break;
}
- if (settings.verbose > 1)
- fprintf(stderr, ">%d sending key %s\n", c->sfd, ITEM_key(it));
+ if (settings.verbose > 1) {
+ fprintf(stderr, ">%d sending key %*s\n", c->sfd, (int) nkey, key);
+ }
/* item_get() has incremented it->refcount for us */
STATS_LOCK();
@@ -1650,10 +1653,11 @@ static inline void process_metaget_command(conn *c, token_t *tokens, size_t ntok
}
if (add_iov(c, "META ", 5, true) == 0 &&
- add_iov(c, ITEM_key(it), ITEM_nkey(it), false) == 0 &&
+ add_item_key_to_iov(c, it) == 0 &&
add_iov(c, txstart, txcount, false) == 0) {
- if (settings.verbose > 1)
- fprintf(stderr, ">%d sending metadata for key %s\n", c->sfd, ITEM_key(it));
+ if (settings.verbose > 1) {
+ fprintf(stderr, ">%d sending metadata for key %*s\n", c->sfd, (int) nkey, key);
+ }
}
item_deref(it);
@@ -1739,6 +1743,7 @@ static void process_update_command(conn *c, token_t *tokens, const size_t ntoken
* for the CR-LF, we're sure that we're
* not reading stale data. */
+ c->update_key = key;
c->item_comm = comm;
c->item = it;
conn_set_state(c, conn_nread);
@@ -1871,7 +1876,7 @@ char *do_add_delta(const char* key, const size_t nkey, const int incr, const uns
return "SERVER_ERROR out of memory";
}
item_memcpy_to(new_it, 0, buf, res, false);
- do_item_replace(it, new_it);
+ do_item_replace(it, new_it, key);
do_item_deref(new_it); /* release our reference */
} else { /* replace in-place */
ITEM_set_nbytes(it, res); /* update the length field. */
@@ -1940,7 +1945,7 @@ static void process_delete_command(conn* c, token_t *tokens, const size_t ntoken
stats_delete(ITEM_nkey(it) + ITEM_nbytes(it));
STATS_UNLOCK();
- item_unlink(it, UNLINK_NORMAL);
+ item_unlink(it, UNLINK_NORMAL, key);
item_deref(it); /* release our reference */
out_string(c, "DELETED");
} else {
@@ -3096,7 +3101,7 @@ void do_run_deferred_deletes(void)
if (item_delete_lock_over(it)) {
assert(ITEM_refcount(it) > 0);
ITEM_unmark_deleted(it);
- do_item_unlink(it, UNLINK_NORMAL);
+ do_item_unlink(it, UNLINK_NORMAL, NULL);
do_item_deref(it);
} else {
todelete[j++] = it;
View
12 memcached.h
@@ -230,6 +230,7 @@ struct conn_s {
void *item; /* for commands set/add/replace */
int item_comm; /* which one is it: set/add/replace */
+ const char *update_key;
/* data for the swallow state */
int sbytes; /* how many bytes to swallow */
@@ -302,7 +303,7 @@ int do_defer_delete(item *item, time_t exptime);
void do_run_deferred_deletes(void);
char *do_add_delta(const char* key, const size_t nkey, const int incr, const unsigned int delta,
char *buf, uint32_t* res_val, const struct in_addr addr);
-int do_store_item(item *item, int comm);
+int do_store_item(item *item, int comm, const char* key);
conn* conn_new(const int sfd, const int init_state, const int event_flags, const int read_buffer_size,
const bool is_udp, const bool is_binary,
const struct sockaddr* const addr, const socklen_t addrlen,
@@ -362,12 +363,10 @@ item *mt_item_alloc(char *key, size_t nkey, int flags, rel_time_t exptime, int n
char *mt_item_cachedump(const unsigned int slabs_clsid, const unsigned int limit, unsigned int *bytes);
void mt_item_flush_expired(void);
item *mt_item_get_notedeleted(const char *key, const size_t nkey, bool *delete_locked);
-int mt_item_link(item *it);
void mt_item_deref(item *it);
-int mt_item_replace(item *it, item *new_it);
char *mt_item_stats(int *bytes);
char *mt_item_stats_sizes(int *bytes);
-void mt_item_unlink(item *it, long flags);
+void mt_item_unlink(item *it, long flags, const char* key);
void mt_item_update(item *it);
void mt_run_deferred_deletes(void);
void *mt_slabs_alloc(size_t size);
@@ -377,7 +376,7 @@ void mt_slabs_rebalance();
char *mt_slabs_stats(int *buflen);
void mt_stats_lock(void);
void mt_stats_unlock(void);
-int mt_store_item(item *item, int comm);
+int mt_store_item(item *item, int comm, const char* key);
# define add_delta mt_add_delta
@@ -397,9 +396,7 @@ int mt_store_item(item *item, int comm);
# define item_cachedump mt_item_cachedump
# define item_flush_expired mt_item_flush_expired
# define item_get_notedeleted mt_item_get_notedeleted
-# define item_link mt_item_link
# define item_deref mt_item_deref
-# define item_replace mt_item_replace
# define item_stats mt_item_stats
# define item_stats_sizes mt_item_stats_sizes
# define item_update mt_item_update
@@ -435,7 +432,6 @@ int mt_store_item(item *item, int comm);
# define item_cachedump do_item_cachedump
# define item_flush_expired do_item_flush_expired
# define item_get_notedeleted do_item_get_notedeleted
-# define item_link do_item_link
# define item_deref do_item_deref
# define item_replace do_item_replace
# define item_stats do_item_stats
View
17 stats.c
@@ -106,6 +106,7 @@ void stats_prefix_clear() {
memset(&wildcard, 0, sizeof(PREFIX_STATS));
}
+
/*
* Returns the stats structure for a prefix, creating it if it's not already
* in the list.
@@ -130,8 +131,9 @@ static PREFIX_STATS *stats_prefix_find(const char *key, const size_t nkey) {
for (pfs = prefix_stats[hashval]; NULL != pfs; pfs = pfs->next) {
if (length == pfs->prefix_len &&
- (strncmp(pfs->prefix, key, length) == 0))
+ (memcmp(pfs->prefix, key, length) == 0)) {
return pfs;
+ }
}
pfs = pool_calloc_locking(false, sizeof(PREFIX_STATS), 1, STATS_PREFIX_POOL);
@@ -147,7 +149,7 @@ static PREFIX_STATS *stats_prefix_find(const char *key, const size_t nkey) {
return NULL;
}
- strncpy(pfs->prefix, key, length);
+ memcpy(pfs->prefix, key, length);
pfs->prefix_len = length;
pfs->next = prefix_stats[hashval];
@@ -275,12 +277,13 @@ void stats_prefix_record_removal(const char *key, const size_t nkey, size_t byte
*/
/*@null@*/
char *stats_prefix_dump(int *length) {
- const char *format = "PREFIX %*s item %u get %" PRINTF_INT64_MODIFIER \
+#define STATS_PREFIX_DUMP_FORMAT \
+ "PREFIX %.*s item %u get %" PRINTF_INT64_MODIFIER \
"u hit %" PRINTF_INT64_MODIFIER "u set %" PRINTF_INT64_MODIFIER \
"u del %" PRINTF_INT64_MODIFIER "u evict %" PRINTF_INT64_MODIFIER \
"u ov %" PRINTF_INT64_MODIFIER "u exp %" PRINTF_INT64_MODIFIER \
"u bytes %" PRINTF_INT64_MODIFIER "u txed %" PRINTF_INT64_MODIFIER \
- "u byte-seconds %" PRINTF_INT64_MODIFIER "u\r\n";
+ "u byte-seconds %" PRINTF_INT64_MODIFIER "u\r\n"
PREFIX_STATS *pfs;
char *buf;
int i;
@@ -298,7 +301,7 @@ char *stats_prefix_dump(int *length) {
*/
STATS_LOCK();
size = total_prefix_size +
- (num_prefixes + 1) * (strlen(format)
+ (num_prefixes + 1) * (strlen(STATS_PREFIX_DUMP_FORMAT)
+ 11 * (20 - format_len)) /* %llu replaced by 20-digit num */
+ sizeof(wildcard_name)
+ sizeof("END\r\n");
@@ -319,7 +322,7 @@ char *stats_prefix_dump(int *length) {
pfs->last_update = now;
offset = append_to_buffer(buf, size, offset, sizeof(terminator),
- format, pfs->prefix_len,
+ STATS_PREFIX_DUMP_FORMAT, (unsigned) pfs->prefix_len,
pfs->prefix, pfs->num_items, pfs->num_gets, pfs->num_hits,
pfs->num_sets, pfs->num_deletes, pfs->num_evicts,
pfs->num_overwrites, pfs->num_expires,
@@ -338,7 +341,7 @@ char *stats_prefix_dump(int *length) {
wildcard.num_sets != 0 ||
wildcard.num_deletes != 0) {
offset = append_to_buffer(buf, size, offset, sizeof(terminator),
- format, sizeof(wildcard_name) - 1,
+ STATS_PREFIX_DUMP_FORMAT, (unsigned) (sizeof(wildcard_name) - 1),
wildcard_name, wildcard.num_items,
wildcard.num_gets, wildcard.num_hits,
wildcard.num_sets, wildcard.num_deletes, wildcard.num_evicts,
View
32 thread.c
@@ -403,18 +403,6 @@ item *mt_item_get_notedeleted(const char *key, const size_t nkey, bool *delete_l
}
/*
- * Links an item into the LRU and hashtable.
- */
-int mt_item_link(item *item) {
- int ret;
-
- pthread_mutex_lock(&cache_lock);
- ret = do_item_link(item);
- pthread_mutex_unlock(&cache_lock);
- return ret;
-}
-
-/*
* Decrements the reference count on an item and adds it to the freelist if
* needed.
*/
@@ -425,23 +413,11 @@ void mt_item_deref(item *item) {
}
/*
- * Replaces one item with another in the hashtable.
- */
-int mt_item_replace(item *old, item *new) {
- int ret;
-
- pthread_mutex_lock(&cache_lock);
- ret = do_item_replace(old, new);
- pthread_mutex_unlock(&cache_lock);
- return ret;
-}
-
-/*
* Unlinks an item from the LRU and hashtable.
*/
-void mt_item_unlink(item *item, long flags) {
+void mt_item_unlink(item *item, long flags, const char* key) {
pthread_mutex_lock(&cache_lock);
- do_item_unlink(item, flags);
+ do_item_unlink(item, flags, key);
pthread_mutex_unlock(&cache_lock);
}
@@ -482,11 +458,11 @@ char *mt_add_delta(const char* key, const size_t nkey, const int incr, const uns
/*
* Stores an item in the cache (high level, obeys set/add/replace semantics)
*/
-int mt_store_item(item *item, int comm) {
+int mt_store_item(item *item, int comm, const char* key) {
int ret;
pthread_mutex_lock(&cache_lock);
- ret = do_store_item(item, comm);
+ ret = do_store_item(item, comm, key);
pthread_mutex_unlock(&cache_lock);
return ret;
}
Please sign in to comment.
Something went wrong with that request. Please try again.