Skip to content

Commit

Permalink
win: fix unsavory rwlock fallback implementation
Browse files Browse the repository at this point in the history
Before this patch an uv_mutex_t (backed by a critical section) could be
released by a tread different from the thread that acquired it, which is
not allowed. This is fixed by using a semaphore instead.

BUG: libuv#515
  • Loading branch information
piscisaureus committed Sep 8, 2015
1 parent e95960c commit 75559e4
Show file tree
Hide file tree
Showing 2 changed files with 67 additions and 37 deletions.
12 changes: 10 additions & 2 deletions include/uv-win.h
Expand Up @@ -250,8 +250,16 @@ typedef union {
/* windows.h. */
SRWLOCK srwlock_;
struct {
uv_mutex_t read_mutex_;
uv_mutex_t write_mutex_;
union {
CRITICAL_SECTION cs;
/* TODO: remove me in v2.x. */
uv_mutex_t unused;
} read_lock_;
union {
HANDLE sem;
/* TODO: remove me in v2.x. */
uv_mutex_t unused;
} write_lock_;
unsigned int num_readers_;
} fallback_;
} uv_rwlock_t;
Expand Down
92 changes: 57 additions & 35 deletions src/win/thread.c
Expand Up @@ -396,83 +396,105 @@ static void uv__rwlock_srwlock_wrunlock(uv_rwlock_t* rwlock) {


static int uv__rwlock_fallback_init(uv_rwlock_t* rwlock) {
int err;

err = uv_mutex_init(&rwlock->fallback_.read_mutex_);
if (err)
return err;
// Initialize the semaphore that acts as the write lock.
HANDLE handle = CreateSemaphoreW(NULL, 1, 1, NULL);
if (handle == NULL)
return uv_translate_sys_error(GetLastError());
rwlock->fallback_.write_lock_.sem = handle;

err = uv_mutex_init(&rwlock->fallback_.write_mutex_);
if (err) {
uv_mutex_destroy(&rwlock->fallback_.read_mutex_);
return err;
}
// Initialize the critical section protecting the reader count.
InitializeCriticalSection(&rwlock->fallback_.read_lock_.cs);

// Initialize the reader count.
rwlock->fallback_.num_readers_ = 0;

return 0;
}


static void uv__rwlock_fallback_destroy(uv_rwlock_t* rwlock) {
uv_mutex_destroy(&rwlock->fallback_.read_mutex_);
uv_mutex_destroy(&rwlock->fallback_.write_mutex_);
DeleteCriticalSection(&rwlock->fallback_.read_lock_.cs);
CloseHandle(rwlock->fallback_.write_lock_.sem);
}


static void uv__rwlock_fallback_rdlock(uv_rwlock_t* rwlock) {
uv_mutex_lock(&rwlock->fallback_.read_mutex_);

if (++rwlock->fallback_.num_readers_ == 1)
uv_mutex_lock(&rwlock->fallback_.write_mutex_);
// Acquire the lock that protects the reader count;
EnterCriticalSection(&rwlock->fallback_.read_lock_.cs);

/* Increate the reader count, and lock for write if this is the first
* reader.
*/
if (++rwlock->fallback_.num_readers_ == 1) {
DWORD r = WaitForSingleObject(rwlock->fallback_.write_lock_.sem, INFINITE);
if (r != WAIT_OBJECT_0)
uv_fatal_error(GetLastError(), "WaitForSingleObject");
}

uv_mutex_unlock(&rwlock->fallback_.read_mutex_);
/* Release the lock that protects the reader count. */
LeaveCriticalSection(&rwlock->fallback_.read_lock_.cs);
}


static int uv__rwlock_fallback_tryrdlock(uv_rwlock_t* rwlock) {
int err;

err = uv_mutex_trylock(&rwlock->fallback_.read_mutex_);
if (err)
goto out;
if (!TryEnterCriticalSection(&rwlock->fallback_.read_lock_.cs))
return UV_EAGAIN;

err = 0;
if (rwlock->fallback_.num_readers_ == 0)
err = uv_mutex_trylock(&rwlock->fallback_.write_mutex_);

if (err == 0)
rwlock->fallback_.num_readers_++;

uv_mutex_unlock(&rwlock->fallback_.read_mutex_);
if (rwlock->fallback_.num_readers_ == 0) {
DWORD r = WaitForSingleObject(&rwlock->fallback_.write_lock_.sem, 0);
if (r == WAIT_OBJECT_0)
rwlock->fallback_.num_readers_++;
else if (r == WAIT_TIMEOUT)
err = UV_EAGAIN;
else if (r == WAIT_FAILED)
err = uv_translate_sys_error(GetLastError());
else
err = UV_EIO;
}

out:
LeaveCriticalSection(&rwlock->fallback_.read_lock_.cs);
return err;
}


static void uv__rwlock_fallback_rdunlock(uv_rwlock_t* rwlock) {
uv_mutex_lock(&rwlock->fallback_.read_mutex_);
EnterCriticalSection(&rwlock->fallback_.read_lock_.cs);

if (--rwlock->fallback_.num_readers_ == 0)
uv_mutex_unlock(&rwlock->fallback_.write_mutex_);
if (--rwlock->fallback_.num_readers_ == 0) {
if (!ReleaseSemaphore(rwlock->fallback_.write_lock_.sem, 1, NULL))
uv_fatal_error(GetLastError(), "ReleaseSemaphore");
}

uv_mutex_unlock(&rwlock->fallback_.read_mutex_);
LeaveCriticalSection(&rwlock->fallback_.read_lock_.cs);
}


static void uv__rwlock_fallback_wrlock(uv_rwlock_t* rwlock) {
uv_mutex_lock(&rwlock->fallback_.write_mutex_);
DWORD r = WaitForSingleObject(rwlock->fallback_.write_lock_.sem, INFINITE);
if (r != WAIT_OBJECT_0)
uv_fatal_error(GetLastError(), "WaitForSingleObject");
}


static int uv__rwlock_fallback_trywrlock(uv_rwlock_t* rwlock) {
return uv_mutex_trylock(&rwlock->fallback_.write_mutex_);
DWORD r = WaitForSingleObject(&rwlock->fallback_.write_lock_.sem, 0);
if (r == WAIT_OBJECT_0)
return 0;
else if (r == WAIT_TIMEOUT)
return UV_EAGAIN;
else if (r == WAIT_FAILED)
return uv_translate_sys_error(GetLastError());
else
return UV_EIO;
}


static void uv__rwlock_fallback_wrunlock(uv_rwlock_t* rwlock) {
uv_mutex_unlock(&rwlock->fallback_.write_mutex_);
if (!ReleaseSemaphore(rwlock->fallback_.write_lock_.sem, 1, NULL))
uv_fatal_error(GetLastError(), "ReleaseSemaphore");
}


Expand Down

0 comments on commit 75559e4

Please sign in to comment.