Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse code

rcu: Allow nesting of rcu_idle_enter() and rcu_idle_exit()

Use of RCU in the idle loop is incorrect, quite a few instances of
just that have made their way into mainline, primarily event tracing.
The problem with RCU read-side critical sections on CPUs that RCU believes
to be idle is that RCU is completely ignoring the CPU, along with any
attempts and RCU read-side critical sections.

The approaches of eliminating the offending uses and of pushing the
definition of idle down beyond the offending uses have both proved
impractical.  The new approach is to encapsulate offending uses of RCU
with rcu_idle_exit() and rcu_idle_enter(), but this requires nesting
for code that is invoked both during idle and and during normal execution.
Therefore, this commit modifies rcu_idle_enter() and rcu_idle_exit() to
permit nesting.

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Acked-by: Deepthi Dharwar <deepthi@linux.vnet.ibm.com>
  • Loading branch information...
commit 29e37d814188ac8d60f2120583704d3ef6d634b4 1 parent ce5df97
Paul E. McKenney paulmck authored

Showing 3 changed files with 46 additions and 12 deletions. Show diff stats Hide diff stats

  1. +20 1 kernel/rcu.h
  2. +12 4 kernel/rcutiny.c
  3. +14 7 kernel/rcutree.c
21 kernel/rcu.h
@@ -33,8 +33,27 @@
33 33 * Process-level increment to ->dynticks_nesting field. This allows for
34 34 * architectures that use half-interrupts and half-exceptions from
35 35 * process context.
  36 + *
  37 + * DYNTICK_TASK_NEST_MASK defines a field of width DYNTICK_TASK_NEST_WIDTH
  38 + * that counts the number of process-based reasons why RCU cannot
  39 + * consider the corresponding CPU to be idle, and DYNTICK_TASK_NEST_VALUE
  40 + * is the value used to increment or decrement this field.
  41 + *
  42 + * The rest of the bits could in principle be used to count interrupts,
  43 + * but this would mean that a negative-one value in the interrupt
  44 + * field could incorrectly zero out the DYNTICK_TASK_NEST_MASK field.
  45 + * We therefore provide a two-bit guard field defined by DYNTICK_TASK_MASK
  46 + * that is set to DYNTICK_TASK_FLAG upon initial exit from idle.
  47 + * The DYNTICK_TASK_EXIT_IDLE value is thus the combined value used upon
  48 + * initial exit from idle.
36 49 */
37   -#define DYNTICK_TASK_NESTING (LLONG_MAX / 2 - 1)
  50 +#define DYNTICK_TASK_NEST_WIDTH 7
  51 +#define DYNTICK_TASK_NEST_VALUE ((LLONG_MAX >> DYNTICK_TASK_NEST_WIDTH) + 1)
  52 +#define DYNTICK_TASK_NEST_MASK (LLONG_MAX - DYNTICK_TASK_NEST_VALUE + 1)
  53 +#define DYNTICK_TASK_FLAG ((DYNTICK_TASK_NEST_VALUE / 8) * 2)
  54 +#define DYNTICK_TASK_MASK ((DYNTICK_TASK_NEST_VALUE / 8) * 3)
  55 +#define DYNTICK_TASK_EXIT_IDLE (DYNTICK_TASK_NEST_VALUE + \
  56 + DYNTICK_TASK_FLAG)
38 57
39 58 /*
40 59 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
16 kernel/rcutiny.c
@@ -53,7 +53,7 @@ static void __call_rcu(struct rcu_head *head,
53 53
54 54 #include "rcutiny_plugin.h"
55 55
56   -static long long rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
  56 +static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
57 57
58 58 /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
59 59 static void rcu_idle_enter_common(long long oldval)
@@ -88,7 +88,12 @@ void rcu_idle_enter(void)
88 88
89 89 local_irq_save(flags);
90 90 oldval = rcu_dynticks_nesting;
91   - rcu_dynticks_nesting = 0;
  91 + WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
  92 + if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
  93 + DYNTICK_TASK_NEST_VALUE)
  94 + rcu_dynticks_nesting = 0;
  95 + else
  96 + rcu_dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
92 97 rcu_idle_enter_common(oldval);
93 98 local_irq_restore(flags);
94 99 }
@@ -140,8 +145,11 @@ void rcu_idle_exit(void)
140 145
141 146 local_irq_save(flags);
142 147 oldval = rcu_dynticks_nesting;
143   - WARN_ON_ONCE(oldval != 0);
144   - rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
  148 + WARN_ON_ONCE(rcu_dynticks_nesting < 0);
  149 + if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
  150 + rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
  151 + else
  152 + rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
145 153 rcu_idle_exit_common(oldval);
146 154 local_irq_restore(flags);
147 155 }
21 kernel/rcutree.c
@@ -198,7 +198,7 @@ void rcu_note_context_switch(int cpu)
198 198 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
199 199
200 200 DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
201   - .dynticks_nesting = DYNTICK_TASK_NESTING,
  201 + .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
202 202 .dynticks = ATOMIC_INIT(1),
203 203 };
204 204
@@ -394,7 +394,11 @@ void rcu_idle_enter(void)
394 394 local_irq_save(flags);
395 395 rdtp = &__get_cpu_var(rcu_dynticks);
396 396 oldval = rdtp->dynticks_nesting;
397   - rdtp->dynticks_nesting = 0;
  397 + WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
  398 + if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
  399 + rdtp->dynticks_nesting = 0;
  400 + else
  401 + rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
398 402 rcu_idle_enter_common(rdtp, oldval);
399 403 local_irq_restore(flags);
400 404 }
@@ -467,7 +471,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
467 471 * Exit idle mode, in other words, -enter- the mode in which RCU
468 472 * read-side critical sections can occur.
469 473 *
470   - * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NESTING to
  474 + * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
471 475 * allow for the possibility of usermode upcalls messing up our count
472 476 * of interrupt nesting level during the busy period that is just
473 477 * now starting.
@@ -481,8 +485,11 @@ void rcu_idle_exit(void)
481 485 local_irq_save(flags);
482 486 rdtp = &__get_cpu_var(rcu_dynticks);
483 487 oldval = rdtp->dynticks_nesting;
484   - WARN_ON_ONCE(oldval != 0);
485   - rdtp->dynticks_nesting = DYNTICK_TASK_NESTING;
  488 + WARN_ON_ONCE(oldval < 0);
  489 + if (oldval & DYNTICK_TASK_NEST_MASK)
  490 + rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
  491 + else
  492 + rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
486 493 rcu_idle_exit_common(rdtp, oldval);
487 494 local_irq_restore(flags);
488 495 }
@@ -2253,7 +2260,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
2253 2260 rdp->qlen_lazy = 0;
2254 2261 rdp->qlen = 0;
2255 2262 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
2256   - WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
  2263 + WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
2257 2264 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
2258 2265 rdp->cpu = cpu;
2259 2266 rdp->rsp = rsp;
@@ -2281,7 +2288,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
2281 2288 rdp->qlen_last_fqs_check = 0;
2282 2289 rdp->n_force_qs_snap = rsp->n_force_qs;
2283 2290 rdp->blimit = blimit;
2284   - rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING;
  2291 + rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
2285 2292 atomic_set(&rdp->dynticks->dynticks,
2286 2293 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
2287 2294 rcu_prepare_for_idle_init(cpu);

0 comments on commit 29e37d8

Please sign in to comment.
Something went wrong with that request. Please try again.