Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Branch: master
executable file 766 lines (678 sloc) 25.306 kB
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/*
* Hash table
*
* The hash function used here is by Bob Jenkins, 1996:
* <http://burtleburtle.net/bob/hash/doobs.html>
* "By Bob Jenkins, 1996. bob_jenkins@burtleburtle.net.
* You may use this code any way you wish, private, educational,
* or commercial. It's free."
*
* The rest of the file is licensed under the BSD license. See LICENSE.
*
* $Id: assoc.c 337 2006-09-04 05:29:05Z bradfitz $
*/
#include "generic.h"
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/signal.h>
#include <sys/resource.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#ifdef HAVE_REGEX_H
#include <regex.h>
#endif
#include "assoc.h"
#include "memcached.h"
/*
* Since the hash function does bit manipulation, it needs to know
* whether it's big or little-endian. ENDIAN_LITTLE and ENDIAN_BIG
* are set in the configure script.
*/
#if ENDIAN_BIG == 1
# define HASH_LITTLE_ENDIAN 0
# define HASH_BIG_ENDIAN 1
#else
# if ENDIAN_LITTLE == 1
# define HASH_LITTLE_ENDIAN 1
# define HASH_BIG_ENDIAN 0
# else
# define HASH_LITTLE_ENDIAN 0
# define HASH_BIG_ENDIAN 0
# endif
#endif
#define rot(x,k) (((x)<<(k)) ^ ((x)>>(32-(k))))
/*
-------------------------------------------------------------------------------
mix -- mix 3 32-bit values reversibly.
This is reversible, so any information in (a,b,c) before mix() is
still in (a,b,c) after mix().
If four pairs of (a,b,c) inputs are run through mix(), or through
mix() in reverse, there are at least 32 bits of the output that
are sometimes the same for one pair and different for another pair.
This was tested for:
* pairs that differed by one bit, by two bits, in any combination
of top bits of (a,b,c), or in any combination of bottom bits of
(a,b,c).
* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
is commonly produced by subtraction) look like a single 1-bit
difference.
* the base values were pseudorandom, all zero but one bit set, or
all zero plus a counter that starts at zero.
Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
satisfy this are
4 6 8 16 19 4
9 15 3 18 27 15
14 9 3 7 17 3
Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
for "differ" defined as + with a one-bit base and a two-bit delta. I
used http://burtleburtle.net/bob/hash/avalanche.html to choose
the operations, constants, and arrangements of the variables.
This does not achieve avalanche. There are input bits of (a,b,c)
that fail to affect some output bits of (a,b,c), especially of a. The
most thoroughly mixed value is c, but it doesn't really even achieve
avalanche in c.
This allows some parallelism. Read-after-writes are good at doubling
the number of bits affected, so the goal of mixing pulls in the opposite
direction as the goal of parallelism. I did what I could. Rotates
seem to cost as much as shifts on every machine I could lay my hands
on, and rotates are much kinder to the top and bottom bits, so I used
rotates.
-------------------------------------------------------------------------------
*/
#define mix(a,b,c) \
{ \
a -= c; a ^= rot(c, 4); c += b; \
b -= a; b ^= rot(a, 6); a += c; \
c -= b; c ^= rot(b, 8); b += a; \
a -= c; a ^= rot(c,16); c += b; \
b -= a; b ^= rot(a,19); a += c; \
c -= b; c ^= rot(b, 4); b += a; \
}
/*
-------------------------------------------------------------------------------
final -- final mixing of 3 32-bit values (a,b,c) into c
Pairs of (a,b,c) values differing in only a few bits will usually
produce values of c that look totally different. This was tested for
* pairs that differed by one bit, by two bits, in any combination
of top bits of (a,b,c), or in any combination of bottom bits of
(a,b,c).
* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
is commonly produced by subtraction) look like a single 1-bit
difference.
* the base values were pseudorandom, all zero but one bit set, or
all zero plus a counter that starts at zero.
These constants passed:
14 11 25 16 4 14 24
12 14 25 16 4 14 24
and these came close:
4 8 15 26 3 22 24
10 8 15 26 3 22 24
11 8 15 26 3 22 24
-------------------------------------------------------------------------------
*/
#define final(a,b,c) \
{ \
c ^= b; c -= rot(b,14); \
a ^= c; a -= rot(c,11); \
b ^= a; b -= rot(a,25); \
c ^= b; c -= rot(b,16); \
a ^= c; a -= rot(c,4); \
b ^= a; b -= rot(a,14); \
c ^= b; c -= rot(b,24); \
}
#if HASH_LITTLE_ENDIAN == 1
uint32_t hash(
const void *key, /* the key to hash */
size_t length, /* length of the key */
const uint32_t initval) /* initval */
{
uint32_t a,b,c; /* internal state */
union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
/* Set up the internal state */
a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
u.ptr = key;
if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
const uint32_t *k = key; /* read 32-bit chunks */
#ifdef VALGRIND
const uint8_t *k8;
#endif /* ifdef VALGRIND */
/*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
while (length > 12)
{
a += k[0];
b += k[1];
c += k[2];
mix(a,b,c);
length -= 12;
k += 3;
}
/*----------------------------- handle the last (probably partial) block */
/*
* "k[2]&0xffffff" actually reads beyond the end of the string, but
* then masks off the part it's not allowed to read. Because the
* string is aligned, the masked-off tail is in the same word as the
* rest of the string. Every machine with memory protection I've seen
* does it on word boundaries, so is OK with this. But VALGRIND will
* still catch it and complain. The masking trick does make the hash
* noticably faster for short strings (like English words).
*/
#ifndef VALGRIND
switch(length)
{
case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
case 8 : b+=k[1]; a+=k[0]; break;
case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
case 6 : b+=k[1]&0xffff; a+=k[0]; break;
case 5 : b+=k[1]&0xff; a+=k[0]; break;
case 4 : a+=k[0]; break;
case 3 : a+=k[0]&0xffffff; break;
case 2 : a+=k[0]&0xffff; break;
case 1 : a+=k[0]&0xff; break;
case 0 : return c; /* zero length strings require no mixing */
}
#else /* make valgrind happy */
k8 = (const uint8_t *)k;
switch(length)
{
case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
case 9 : c+=k8[8]; /* fall through */
case 8 : b+=k[1]; a+=k[0]; break;
case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
case 5 : b+=k8[4]; /* fall through */
case 4 : a+=k[0]; break;
case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
case 1 : a+=k8[0]; break;
case 0 : return c; /* zero length strings require no mixing */
}
#endif /* !valgrind */
} else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
const uint16_t *k = key; /* read 16-bit chunks */
const uint8_t *k8;
/*--------------- all but last block: aligned reads and different mixing */
while (length > 12)
{
a += k[0] + (((uint32_t)k[1])<<16);
b += k[2] + (((uint32_t)k[3])<<16);
c += k[4] + (((uint32_t)k[5])<<16);
mix(a,b,c);
length -= 12;
k += 6;
}
/*----------------------------- handle the last (probably partial) block */
k8 = (const uint8_t *)k;
switch(length)
{
case 12: c+=k[4]+(((uint32_t)k[5])<<16);
b+=k[2]+(((uint32_t)k[3])<<16);
a+=k[0]+(((uint32_t)k[1])<<16);
break;
case 11: c+=((uint32_t)k8[10])<<16; /* @fallthrough */
case 10: c+=k[4]; /* @fallthrough@ */
b+=k[2]+(((uint32_t)k[3])<<16);
a+=k[0]+(((uint32_t)k[1])<<16);
break;
case 9 : c+=k8[8]; /* @fallthrough */
case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
a+=k[0]+(((uint32_t)k[1])<<16);
break;
case 7 : b+=((uint32_t)k8[6])<<16; /* @fallthrough */
case 6 : b+=k[2];
a+=k[0]+(((uint32_t)k[1])<<16);
break;
case 5 : b+=k8[4]; /* @fallthrough */
case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
break;
case 3 : a+=((uint32_t)k8[2])<<16; /* @fallthrough */
case 2 : a+=k[0];
break;
case 1 : a+=k8[0];
break;
case 0 : return c; /* zero length strings require no mixing */
}
} else { /* need to read the key one byte at a time */
const uint8_t *k = key;
/*--------------- all but the last block: affect some 32 bits of (a,b,c) */
while (length > 12)
{
a += k[0];
a += ((uint32_t)k[1])<<8;
a += ((uint32_t)k[2])<<16;
a += ((uint32_t)k[3])<<24;
b += k[4];
b += ((uint32_t)k[5])<<8;
b += ((uint32_t)k[6])<<16;
b += ((uint32_t)k[7])<<24;
c += k[8];
c += ((uint32_t)k[9])<<8;
c += ((uint32_t)k[10])<<16;
c += ((uint32_t)k[11])<<24;
mix(a,b,c);
length -= 12;
k += 12;
}
/*-------------------------------- last block: affect all 32 bits of (c) */
switch(length) /* all the case statements fall through */
{
case 12: c+=((uint32_t)k[11])<<24;
case 11: c+=((uint32_t)k[10])<<16;
case 10: c+=((uint32_t)k[9])<<8;
case 9 : c+=k[8];
case 8 : b+=((uint32_t)k[7])<<24;
case 7 : b+=((uint32_t)k[6])<<16;
case 6 : b+=((uint32_t)k[5])<<8;
case 5 : b+=k[4];
case 4 : a+=((uint32_t)k[3])<<24;
case 3 : a+=((uint32_t)k[2])<<16;
case 2 : a+=((uint32_t)k[1])<<8;
case 1 : a+=k[0];
break;
case 0 : return c; /* zero length strings require no mixing */
}
}
final(a,b,c);
return c; /* zero length strings require no mixing */
}
#elif HASH_BIG_ENDIAN == 1
/*
* hashbig():
* This is the same as hashword() on big-endian machines. It is different
* from hashlittle() on all machines. hashbig() takes advantage of
* big-endian byte ordering.
*/
uint32_t hash( const void *key, size_t length, const uint32_t initval)
{
uint32_t a,b,c;
union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */
/* Set up the internal state */
a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
u.ptr = key;
if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) {
const uint32_t *k = key; /* read 32-bit chunks */
#ifdef VALGRIND
const uint8_t *k8;
#endif /* ifdef VALGRIND */
/*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
while (length > 12)
{
a += k[0];
b += k[1];
c += k[2];
mix(a,b,c);
length -= 12;
k += 3;
}
/*----------------------------- handle the last (probably partial) block */
/*
* "k[2]<<8" actually reads beyond the end of the string, but
* then shifts out the part it's not allowed to read. Because the
* string is aligned, the illegal read is in the same word as the
* rest of the string. Every machine with memory protection I've seen
* does it on word boundaries, so is OK with this. But VALGRIND will
* still catch it and complain. The masking trick does make the hash
* noticably faster for short strings (like English words).
*/
#ifndef VALGRIND
switch(length)
{
case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break;
case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break;
case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break;
case 8 : b+=k[1]; a+=k[0]; break;
case 7 : b+=k[1]&0xffffff00; a+=k[0]; break;
case 6 : b+=k[1]&0xffff0000; a+=k[0]; break;
case 5 : b+=k[1]&0xff000000; a+=k[0]; break;
case 4 : a+=k[0]; break;
case 3 : a+=k[0]&0xffffff00; break;
case 2 : a+=k[0]&0xffff0000; break;
case 1 : a+=k[0]&0xff000000; break;
case 0 : return c; /* zero length strings require no mixing */
}
#else /* make valgrind happy */
k8 = (const uint8_t *)k;
switch(length) /* all the case statements fall through */
{
case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
case 11: c+=((uint32_t)k8[10])<<8; /* fall through */
case 10: c+=((uint32_t)k8[9])<<16; /* fall through */
case 9 : c+=((uint32_t)k8[8])<<24; /* fall through */
case 8 : b+=k[1]; a+=k[0]; break;
case 7 : b+=((uint32_t)k8[6])<<8; /* fall through */
case 6 : b+=((uint32_t)k8[5])<<16; /* fall through */
case 5 : b+=((uint32_t)k8[4])<<24; /* fall through */
case 4 : a+=k[0]; break;
case 3 : a+=((uint32_t)k8[2])<<8; /* fall through */
case 2 : a+=((uint32_t)k8[1])<<16; /* fall through */
case 1 : a+=((uint32_t)k8[0])<<24; break;
case 0 : return c;
}
#endif /* !VALGRIND */
} else { /* need to read the key one byte at a time */
const uint8_t *k = key;
/*--------------- all but the last block: affect some 32 bits of (a,b,c) */
while (length > 12)
{
a += ((uint32_t)k[0])<<24;
a += ((uint32_t)k[1])<<16;
a += ((uint32_t)k[2])<<8;
a += ((uint32_t)k[3]);
b += ((uint32_t)k[4])<<24;
b += ((uint32_t)k[5])<<16;
b += ((uint32_t)k[6])<<8;
b += ((uint32_t)k[7]);
c += ((uint32_t)k[8])<<24;
c += ((uint32_t)k[9])<<16;
c += ((uint32_t)k[10])<<8;
c += ((uint32_t)k[11]);
mix(a,b,c);
length -= 12;
k += 12;
}
/*-------------------------------- last block: affect all 32 bits of (c) */
switch(length) /* all the case statements fall through */
{
case 12: c+=k[11];
case 11: c+=((uint32_t)k[10])<<8;
case 10: c+=((uint32_t)k[9])<<16;
case 9 : c+=((uint32_t)k[8])<<24;
case 8 : b+=k[7];
case 7 : b+=((uint32_t)k[6])<<8;
case 6 : b+=((uint32_t)k[5])<<16;
case 5 : b+=((uint32_t)k[4])<<24;
case 4 : a+=k[3];
case 3 : a+=((uint32_t)k[2])<<8;
case 2 : a+=((uint32_t)k[1])<<16;
case 1 : a+=((uint32_t)k[0])<<24;
break;
case 0 : return c;
}
}
final(a,b,c);
return c;
}
#else /* HASH_XXX_ENDIAN == 1 */
#error Must define HASH_BIG_ENDIAN or HASH_LITTLE_ENDIAN
#endif /* HASH_XXX_ENDIAN == 1 */
typedef unsigned long int ub4; /* unsigned 4-byte quantities */
typedef unsigned char ub1; /* unsigned 1-byte quantities */
/* how many powers of 2's worth of buckets we use */
static unsigned int hashpower = 16;
#define hashsize(n) ((ub4)1<<(n))
#define hashmask(n) (hashsize(n)-1)
/* Main hash table. This is where we look except during expansion. */
static item_ptr_t* primary_hashtable = 0;
/*
* Previous hash table. During expansion, we look here for keys that haven't
* been moved over to the primary yet.
*/
static item_ptr_t* old_hashtable = 0;
/* Number of items in the hash table. */
static unsigned int hash_items = 0;
/* Flag: Are we in the middle of expanding now? */
static bool expanding = false;
/*
* During expansion we migrate values with bucket granularity; this is how
* far we've gotten so far. Ranges from 0 .. hashsize(hashpower - 1) - 1.
*/
static unsigned int expand_bucket = 0;
void assoc_init(void) {
unsigned int hash_size = hashsize(hashpower) * sizeof(item_ptr_t);
primary_hashtable = pool_malloc(hash_size, ASSOC_POOL);
if (! primary_hashtable) {
fprintf(stderr, "Failed to init hashtable.\n");
exit(EXIT_FAILURE);
}
memset(primary_hashtable, 0, hash_size);
}
item *assoc_find(const char *key, const size_t nkey) {
uint32_t hv = hash(key, nkey, 0);
item_ptr_t iptr;
unsigned int oldbucket;
if (expanding &&
(oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket)
{
iptr = old_hashtable[oldbucket];
} else {
iptr = primary_hashtable[hv & hashmask(hashpower)];
}
while (iptr) {
if (item_key_compare(ITEM(iptr), key, nkey) == 0) {
return ITEM(iptr);
}
iptr = ITEM_PTR_h_next(iptr);
}
return 0;
}
/* returns the address of the item pointer before the key. if *item == 0,
the item wasn't found */
static item_ptr_t* _hashitem_before (const char *key, const size_t nkey) {
uint32_t hv = hash(key, nkey, 0);
item_ptr_t* pos;
unsigned int oldbucket;
if (expanding &&
(oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket)
{
pos = &old_hashtable[oldbucket];
} else {
pos = &primary_hashtable[hv & hashmask(hashpower)];
}
while (*pos && item_key_compare(ITEM(*pos), key, nkey)) {
pos = ITEM_h_next_p(ITEM(*pos));
}
return pos;
}
/* returns the address of the item pointer before it. if *item == 0,
the item wasn't found */
static item_ptr_t* _hashitem_before_item (item* it) {
/* this is one of the few times we totally break the storage layer
* abstraction. the only way we could do this cleanly is to either:
*
* 1) Have the flat allocator malloc a block of memory and use that to
* duplicate the key. This is inefficient because malloc is an overkill
* for this.
*
* 2) Have the slab allocator copy out the key as well. This is
* inefficient because it is totally unnecessary and unfairly punishes
* the slab allocator.
*/
#if defined(USE_FLAT_ALLOCATOR)
char key_temp[KEY_MAX_LENGTH];
#endif /* #if defined(USE_FLAT_ALLOCATOR) */
const char* key;
uint32_t hv;
item_ptr_t* pos;
unsigned int oldbucket;
#if defined(USE_FLAT_ALLOCATOR)
key = item_key_copy(it, key_temp);
#endif /* #if defined(USE_FLAT_ALLOCATOR) */
#if defined(USE_SLAB_ALLOCATOR)
key = ITEM_key(it);
#endif /* #if defined(USE_SLAB_ALLOCATOR) */
hv = hash(key, ITEM_nkey(it), 0);
if (expanding &&
(oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket)
{
pos = &old_hashtable[oldbucket];
} else {
pos = &primary_hashtable[hv & hashmask(hashpower)];
}
while (*pos && (ITEM(*pos) != it)) {
pos = ITEM_h_next_p(ITEM(*pos));
}
return pos;
}
/* grows the hashtable to the next power of 2. */
static void assoc_expand(void) {
old_hashtable = primary_hashtable;
primary_hashtable = pool_calloc(hashsize(hashpower + 1), sizeof(item_ptr_t), ASSOC_POOL);
if (primary_hashtable) {
if (settings.verbose > 1)
fprintf(stderr, "Hash table expansion starting\n");
hashpower++;
expanding = true;
expand_bucket = 0;
do_assoc_move_next_bucket();
} else {
primary_hashtable = old_hashtable;
/* Bad news, but we can keep running. */
}
}
/* migrates the next bucket to the primary hashtable if we're expanding. */
void do_assoc_move_next_bucket(void) {
item_ptr_t iptr, next;
int bucket;
/* this is one of the few times we totally break the storage layer
* abstraction. the only way we could do this cleanly is to either:
*
* 1) Have the flat allocator malloc a block of memory and use that to
* duplicate the key. This is inefficient because malloc is an overkill
* for this.
*
* 2) Have the slab allocator copy out the key as well. This is
* inefficient because it is totally unnecessary and unfairly punishes
* the slab allocator.
*/
#if defined(USE_FLAT_ALLOCATOR)
char key_temp[KEY_MAX_LENGTH];
#endif /* #if defined(USE_FLAT_ALLOCATOR) */
const char* key;
if (expanding) {
for (iptr = old_hashtable[expand_bucket]; ITEM_PTR_IS_NULL(iptr); iptr = next) {
next = ITEM_PTR_h_next(iptr);
#if defined(USE_FLAT_ALLOCATOR)
key = item_key_copy(ITEM(iptr), key_temp);
#endif /* #if defined(USE_FLAT_ALLOCATOR) */
#if defined(USE_SLAB_ALLOCATOR)
key = ITEM_key(ITEM(iptr));
#endif /* #if defined(USE_SLAB_ALLOCATOR) */
bucket = hash(key, ITEM_nkey(ITEM(iptr)), 0) & hashmask(hashpower);
ITEM_set_h_next(ITEM(iptr), primary_hashtable[bucket]);
primary_hashtable[bucket] = iptr;
}
old_hashtable[expand_bucket] = NULL_ITEM_PTR;
expand_bucket++;
if (expand_bucket == hashsize(hashpower - 1)) {
expanding = false;
pool_free(old_hashtable,
(hashsize(hashpower - 1) * sizeof(item_ptr_t)),
ASSOC_POOL);
if (settings.verbose > 1)
fprintf(stderr, "Hash table expansion done\n");
}
}
}
/* Note: this isn't an assoc_update. The key must not already exist to call this */
int assoc_insert(item *it, const char* key) {
uint32_t hv;
unsigned int oldbucket;
assert(assoc_find(key, ITEM_nkey(it)) == 0); /* shouldn't have duplicately named things defined */
hv = hash(key, ITEM_nkey(it), 0);
if (expanding &&
(oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket)
{
ITEM_set_h_next(it, old_hashtable[oldbucket]);
old_hashtable[oldbucket] = ITEM_PTR(it);
} else {
ITEM_set_h_next(it, primary_hashtable[hv & hashmask(hashpower)]);
primary_hashtable[hv & hashmask(hashpower)] = ITEM_PTR(it);
}
hash_items++;
if (! expanding && hash_items > (hashsize(hashpower) * 3) / 2) {
assoc_expand();
}
return 1;
}
/* given item it, replace the mapping from (ITEM_key(it), ITEM_nkey(it)) ->
* old_it with (ITEM_key(it), ITEM_nkey(it)) -> it. returns old_it.
*/
void assoc_update(item* old_it, item *it) {
item_ptr_t* before = _hashitem_before_item(old_it);
assert(before != NULL &&
ITEM(*before) == old_it);
*before = ITEM_PTR(it);
}
void assoc_delete(const char *key, const size_t nkey) {
item_ptr_t* before = _hashitem_before(key, nkey);
if (*before) {
item_ptr_t next = ITEM_PTR_h_next(*before);
*before = next;
hash_items--;
return;
}
/* Note: we never actually get here. the callers don't delete things
they can't find. */
assert(*before != 0);
}
/* marks all items whose keys match a regular expression as expired. */
int do_assoc_expire_regex(char *pattern) {
#ifdef HAVE_REGEX_H
regex_t regex;
int bucket;
item_ptr_t iptr;
/* this is one of the few times we totally break the storage layer
* abstraction. the only way we could do this cleanly is to either:
*
* 1) Have the flat allocator malloc a block of memory and use that to
* duplicate the key. This is inefficient because malloc is an overkill
* for this.
*
* 2) Have the slab allocator copy out the key as well. This is
* inefficient because it is totally unnecessary and unfairly punishes
* the slab allocator.
*/
#if defined(USE_FLAT_ALLOCATOR)
char key_temp[KEY_MAX_LENGTH];
#endif /* #if defined(USE_FLAT_ALLOCATOR) */
const char* key;
if (regcomp(&regex, pattern, REG_EXTENDED | REG_NOSUB))
return 0;
for (bucket = 0; bucket < hashsize(hashpower); bucket++) {
for (iptr = primary_hashtable[bucket]; ITEM_PTR_IS_NULL(iptr); iptr = ITEM_PTR_h_next(iptr)) {
#if defined(USE_FLAT_ALLOCATOR)
key = item_key_copy(ITEM(iptr), key_temp);
#endif /* #if defined(USE_FLAT_ALLOCATOR) */
#if defined(USE_SLAB_ALLOCATOR)
key = ITEM_key(ITEM(iptr));
#endif /* #if defined(USE_SLAB_ALLOCATOR) */
if (regexec(&regex, key, 0, NULL, 0) == 0) {
/* the item matches; mark it expired. */
ITEM_set_exptime(ITEM(iptr), 1);
}
}
}
if (expanding) {
for (bucket = expand_bucket; bucket < hashsize(hashpower-1); bucket++) {
for (iptr = old_hashtable[bucket]; ITEM_PTR_IS_NULL(iptr); iptr = ITEM_PTR_h_next(iptr)) {
#if defined(USE_FLAT_ALLOCATOR)
key = item_key_copy(ITEM(iptr), key_temp);
#endif /* #if defined(USE_FLAT_ALLOCATOR) */
#if defined(USE_SLAB_ALLOCATOR)
key = ITEM_key(ITEM(iptr));
#endif /* #if defined(USE_SLAB_ALLOCATOR) */
if (regexec(&regex, key, 0, NULL, 0) == 0) {
/* the item matches; mark it expired. */
ITEM_set_exptime(ITEM(iptr), 1);
}
}
}
}
return 1; /* success */
#else
return 0;
#endif
}
Jump to Line
Something went wrong with that request. Please try again.