Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

A few locking/concurrency fixes related to replication - do not

expire keys in randomkey and wrap the propogate() call in a server
lock.
  • Loading branch information...
commit 05118a75afe406c09f4bbffd6559a695fac92d73 1 parent e9a3636
@grisha authored
View
7 src/db.c
@@ -153,10 +153,9 @@ robj *dbRandomKey(redisDb *db) {
key = dictGetKey(de);
keyobj = createStringObject(key,sdslen(key));
if (dictFind(db->expires,key)) {
- if (expireIfNeeded(db,keyobj)) {
- decrRefCount(keyobj);
- continue; /* search for another key. This expired. */
- }
+ /* search for another key. This expired. */
+ decrRefCount(keyobj);
+ continue;
}
pthread_mutex_unlock(db->lock);
return keyobj;
View
2  src/networking.c
@@ -1016,7 +1016,7 @@ int processMultibulkBuffer(redisClient *c) {
void processInputBuffer(redisClient *c) {
/* Keep processing while there is something in the input buffer */
- while(sdslen(c->querybuf)) {
+ while(c->querybuf && sdslen(c->querybuf)) {
/* Immediately abort if the client is in the middle of something. */
if (c->flags & REDIS_BLOCKED) {
View
6 src/redis.c
@@ -1535,12 +1535,13 @@ void call(redisClient *c, int flags) {
{
replicationFeedMonitors(c,server.monitors,c->db->id,c->argv,c->argc);
}
- pthread_mutex_unlock(server.lock);
/* Call the command. */
- redisOpArrayInit(&server.also_propagate); // THREDIS TODO - is this a problem?
+ redisOpArrayInit(&server.also_propagate);
dirty = server.dirty;
+ pthread_mutex_unlock(server.lock);
c->cmd->proc(c);
+ pthread_mutex_lock(server.lock);
dirty = server.dirty-dirty;
duration = ustime()-start;
@@ -1582,6 +1583,7 @@ void call(redisClient *c, int flags) {
redisOpArrayFree(&server.also_propagate);
}
server.stat_numcommands++;
+ pthread_mutex_unlock(server.lock);
}
int timedEventProcessInputBufferHandler(aeEventLoop *el, long long id, void *clientData) {
View
4 tests/integration/replication-4.tcl
@@ -24,11 +24,11 @@ start_server {tags {"repl"}} {
test {Test replication with parallel clients writing in differnet DBs} {
lappend slave [srv 0 client]
- after 10000
+ after 5000
stop_bg_complex_data $load_handle0
stop_bg_complex_data $load_handle1
stop_bg_complex_data $load_handle2
- set retry 100
+ set retry 10
while {$retry && ([$master debug digest] ne [$slave debug digest])}\
{
after 1000
Please sign in to comment.
Something went wrong with that request. Please try again.