Skip to content

Commit

Permalink
rcu: make memory barriers more explicit
Browse files Browse the repository at this point in the history
Prepare for introducing smp_mb_placeholder() and smp_mb_global().
The new smp_mb() in synchronize_rcu() is not strictly necessary, since
the first atomic_mb_set for rcu_gp_ctr provides the required ordering.
However, synchronize_rcu is not performance critical, and it *will* be
necessary to introduce a smp_mb_global before calling wait_for_readers().

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
bonzini committed Mar 12, 2018
1 parent 729c0dd commit 77a8b84
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 5 deletions.
15 changes: 13 additions & 2 deletions include/qemu/rcu.h
Expand Up @@ -79,7 +79,10 @@ static inline void rcu_read_lock(void)
}

ctr = atomic_read(&rcu_gp_ctr);
atomic_xchg(&p_rcu_reader->ctr, ctr);
atomic_set(&p_rcu_reader->ctr, ctr);

/* Write p_rcu_reader->ctr before reading RCU-protected pointers. */
smp_mb();
}

static inline void rcu_read_unlock(void)
Expand All @@ -91,7 +94,15 @@ static inline void rcu_read_unlock(void)
return;
}

atomic_xchg(&p_rcu_reader->ctr, 0);
/* Ensure that the critical section is seen to precede the
* store to p_rcu_reader->ctr. Together with the following
* smp_mb(), this ensures writes to p_rcu_reader->ctr
* are sequentially consistent.
*/
atomic_store_release(&p_rcu_reader->ctr, 0);

/* Write p_rcu_reader->ctr before reading p_rcu_reader->waiting. */
smp_mb();
if (unlikely(atomic_read(&p_rcu_reader->waiting))) {
atomic_set(&p_rcu_reader->waiting, false);
qemu_event_set(&rcu_gp_event);
Expand Down
12 changes: 9 additions & 3 deletions util/rcu.c
Expand Up @@ -92,8 +92,9 @@ static void wait_for_readers(void)
atomic_set(&index->waiting, true);
}

/* Here, order the stores to index->waiting before the
* loads of index->ctr.
/* Here, order the stores to index->waiting before the loads of
* index->ctr. Pairs with smp_mb() in rcu_read_unlock(),
* ensuring that the loads of index->ctr are sequentially consistent.
*/
smp_mb();

Expand Down Expand Up @@ -142,8 +143,13 @@ static void wait_for_readers(void)
void synchronize_rcu(void)
{
qemu_mutex_lock(&rcu_sync_lock);
qemu_mutex_lock(&rcu_registry_lock);

/* Write RCU-protected pointers before reading p_rcu_reader->ctr.
* Pairs with smp_mb() in rcu_read_lock().
*/
smp_mb();

qemu_mutex_lock(&rcu_registry_lock);
if (!QLIST_EMPTY(&registry)) {
/* In either case, the atomic_mb_set below blocks stores that free
* old RCU-protected pointers.
Expand Down

0 comments on commit 77a8b84

Please sign in to comment.