Skip to content

Commit

Permalink
tasks-rcu: Track blocked RCU Tasks Trace readers
Browse files Browse the repository at this point in the history
This commit places any task that has ever blocked within an RCU
Tasks Trace read-side critcial section on a per-CPU list within the
rcu_tasks_percpu structure.  Tasks are removed from this list when they
exit by the exit_tasks_rcu_finish_trace() function.

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Cc: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Martin KaFai Lau <kafai@fb.com>
Cc: KP Singh <kpsingh@kernel.org>
  • Loading branch information
paulmckrcu committed May 17, 2022
1 parent 52bc714 commit 7c98805
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 9 deletions.
20 changes: 13 additions & 7 deletions include/linux/rcupdate.h
Expand Up @@ -170,13 +170,19 @@ void synchronize_rcu_tasks(void);
# endif

# ifdef CONFIG_TASKS_TRACE_RCU
# define rcu_tasks_trace_qs(t) \
do { \
if (!likely(READ_ONCE((t)->trc_reader_checked)) && \
!unlikely(READ_ONCE((t)->trc_reader_nesting))) { \
smp_store_release(&(t)->trc_reader_checked, true); \
smp_mb(); /* Readers partitioned by store. */ \
} \
void rcu_tasks_trace_qs_blkd(struct task_struct *t);
# define rcu_tasks_trace_qs(t) \
do { \
int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \
\
if (!likely(READ_ONCE((t)->trc_reader_checked)) && \
!unlikely(___rttq_nesting)) { \
smp_store_release(&(t)->trc_reader_checked, true); \
smp_mb(); /* Readers partitioned by store. */ \
} else if (___rttq_nesting && \
!READ_ONCE((t)->trc_reader_special.b.blocked)) { \
rcu_tasks_trace_qs_blkd(t); \
} \
} while (0)
# else
# define rcu_tasks_trace_qs(t) do { } while (0)
Expand Down
21 changes: 20 additions & 1 deletion kernel/rcu/tasks.h
Expand Up @@ -1239,6 +1239,23 @@ void rcu_read_unlock_trace_special(struct task_struct *t)
}
EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);

/* Add a newly blocked reader task to its CPU's list. */
void rcu_tasks_trace_qs_blkd(struct task_struct *t)
{
unsigned long flags;
struct rcu_tasks_percpu *rtpcp;

local_irq_save(flags);
rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu);
raw_spin_lock_rcu_node(rtpcp); // irqs already disabled
t->trc_blkd_cpu = smp_processor_id();
if (!rtpcp->rtp_blkd_tasks.next)
INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
t->trc_reader_special.b.blocked = true;
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
}

/* Add a task to the holdout list, if it is not already on the list. */
static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
{
Expand Down Expand Up @@ -1599,10 +1616,12 @@ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
/* Report any needed quiescent state for this exiting task. */
static void exit_tasks_rcu_finish_trace(struct task_struct *t)
{
union rcu_special trs = READ_ONCE(t->trc_reader_special);

WRITE_ONCE(t->trc_reader_checked, true);
WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
WRITE_ONCE(t->trc_reader_nesting, 0);
if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
if (WARN_ON_ONCE(trs.b.need_qs) || trs.b.blocked)
rcu_read_unlock_trace_special(t);
}

Expand Down
2 changes: 1 addition & 1 deletion kernel/rcu/tree_plugin.h
Expand Up @@ -899,8 +899,8 @@ void rcu_note_context_switch(bool preempt)
this_cpu_write(rcu_data.rcu_urgent_qs, false);
if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
rcu_momentary_dyntick_idle();
rcu_tasks_qs(current, preempt);
out:
rcu_tasks_qs(current, preempt);
trace_rcu_utilization(TPS("End context switch"));
}
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
Expand Down

0 comments on commit 7c98805

Please sign in to comment.