New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add phase-fairness RW lock scheduling #20

Merged
merged 8 commits into from Feb 24, 2018

yamc::fair::shared_(timed_)mutex bugfix

- deadlock on racy condition between unlock and try_lock_* timeout.
- try_lock_* timeout with PhaseFairness policy overrun subsequent
  shared-lock nodes 'lockable' marking, it causes writer starvation.
  • Loading branch information...
yohhoy committed Feb 18, 2018
commit 560f3ff91ada1b6109704e3eb643f6b17ff73391
Copy path View file
@@ -211,9 +211,9 @@ class shared_mutex_base {
assert(queue_.next == &locked_ && (locked_.status & node_status_mask) == 2);
wq_pop_locknode();
if (!wq_empty()) {
// mark subsequent 'waiting' shared-lock nodes as 'lockable'
// mark subsequent shared-lock nodes as 'lockable'
if (RwLockFairness::phased) {
// PhaseFairness: mark all queued shared-lock nodes
// PhaseFairness: mark all queued shared-lock nodes if the next is (waiting) shared-lock.
if ((queue_.next->status & node_status_mask) == 1) {
for (node* p = queue_.next; p != &queue_; p = p->next) {
if ((p->status & node_status_mask) == 1) {
@@ -222,7 +222,7 @@ class shared_mutex_base {
}
}
} else {
// TaskFairness: mark immediately subsequent shared-lock nodes
// TaskFairness: mark directly subsequent shared-lock nodes group.
node* p = queue_.next;
while (p != &queue_ && (p->status & node_status_mask) == 1) {
p->status |= 2;
@@ -246,25 +246,15 @@ class shared_mutex_base {
if (cv_.wait_until(lk, tp) == std::cv_status::timeout) {
if (queue_.next == &request) // re-check predicate
break;
if (request.prev == &locked_ && (locked_.status & node_status_mask) == 3) {
if ((request.prev->status & node_status_mask) == 3) {
//
// When exclusive-lock are timeouted and previous shared-lock own lock,
// mark subsequent 'waiting' shared-lock nodes as 'lockable'.
// When exclusive-lock timeout and previous shared-lock is 'lockable(-ing)',
// mark directly subsequent 'waiting' shared-lock nodes group as 'lockable'.
//
if (RwLockFairness::phased) {
// PhaseFairness: mark all queued shared-lock nodes
for (node* p = request.next; p != &queue_; p = p->next) {
if ((p->status & node_status_mask) == 1) {
p->status |= 2;
}
}
} else {
// TaskFairness: mark immediately subsequent shared-lock nodes group
node* p = request.next;
while (p != &queue_ && (p->status & node_status_mask) == 1) {
p->status |= 2;
p = p->next;
}
node* p = request.next;
while (p != &queue_ && (p->status & node_status_mask) == 1) {
p->status |= 2;
p = p->next;
}
cv_.notify_all();
}
ProTip! Use n and p to navigate between commits in a pull request.