Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
rcu: remove qatomic_mb_set, expand comments
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
bonzini committed May 8, 2023
1 parent 792f77f commit 6e288b0
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 14 deletions.
5 changes: 4 additions & 1 deletion include/qemu/rcu.h
Expand Up @@ -87,7 +87,10 @@ static inline void rcu_read_lock(void)
ctr = qatomic_read(&rcu_gp_ctr);
qatomic_set(&p_rcu_reader->ctr, ctr);

/* Write p_rcu_reader->ctr before reading RCU-protected pointers. */
/*
* Read rcu_gp_ptr and write p_rcu_reader->ctr before reading
* RCU-protected pointers.
*/
smp_mb_placeholder();
}

Expand Down
24 changes: 11 additions & 13 deletions util/rcu.c
Expand Up @@ -83,19 +83,17 @@ static void wait_for_readers(void)
*/
qemu_event_reset(&rcu_gp_event);

/* Instead of using qatomic_mb_set for index->waiting, and
* qatomic_mb_read for index->ctr, memory barriers are placed
* manually since writes to different threads are independent.
* qemu_event_reset has acquire semantics, so no memory barrier
* is needed here.
*/
QLIST_FOREACH(index, &registry, node) {
qatomic_set(&index->waiting, true);
}

/* Here, order the stores to index->waiting before the loads of
* index->ctr. Pairs with smp_mb_placeholder() in rcu_read_unlock(),
* ensuring that the loads of index->ctr are sequentially consistent.
*
* If this is the last iteration, this barrier also prevents
* frees from seeping upwards, and orders the two wait phases
* on architectures with 32-bit longs; see synchronize_rcu().
*/
smp_mb_global();

Expand All @@ -104,7 +102,7 @@ static void wait_for_readers(void)
QLIST_REMOVE(index, node);
QLIST_INSERT_HEAD(&qsreaders, index, node);

/* No need for mb_set here, worst of all we
/* No need for memory barriers here, worst of all we
* get some extra futex wakeups.
*/
qatomic_set(&index->waiting, false);
Expand Down Expand Up @@ -149,26 +147,26 @@ void synchronize_rcu(void)

/* Write RCU-protected pointers before reading p_rcu_reader->ctr.
* Pairs with smp_mb_placeholder() in rcu_read_lock().
*
* Also orders write to RCU-protected pointers before
* write to rcu_gp_ctr.
*/
smp_mb_global();

QEMU_LOCK_GUARD(&rcu_registry_lock);
if (!QLIST_EMPTY(&registry)) {
/* In either case, the qatomic_mb_set below blocks stores that free
* old RCU-protected pointers.
*/
if (sizeof(rcu_gp_ctr) < 8) {
/* For architectures with 32-bit longs, a two-subphases algorithm
* ensures we do not encounter overflow bugs.
*
* Switch parity: 0 -> 1, 1 -> 0.
*/
qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
qatomic_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
wait_for_readers();
qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
qatomic_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
} else {
/* Increment current grace period. */
qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
qatomic_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
}

wait_for_readers();
Expand Down

0 comments on commit 6e288b0

Please sign in to comment.