Skip to content

Commit

Permalink
rcu-tasks: Manual revert of 4b2a6af
Browse files Browse the repository at this point in the history
4b2a6af ("rcu-tasks: Add detailed debugging facility to RCU Tasks Trace CPU stall warning")

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
  • Loading branch information
paulmckrcu committed Jun 1, 2022
1 parent 8d0e772 commit 4b3564b
Show file tree
Hide file tree
Showing 5 changed files with 6 additions and 57 deletions.
4 changes: 0 additions & 4 deletions Documentation/admin-guide/kernel-parameters.txt
Expand Up @@ -5030,10 +5030,6 @@
A change in value does not take effect until
the beginning of the next grace period.

rcupdate.rcu_task_trc_detail_stall= [KNL]
Print detailed per-task information prior to a
RCU Tasks Trace CPU stall warning.

rcupdate.rcu_self_test= [KNL]
Run the RCU early boot self tests

Expand Down
1 change: 0 additions & 1 deletion include/linux/sched.h
Expand Up @@ -846,7 +846,6 @@ struct task_struct {
int trc_ipi_to_cpu;
union rcu_special trc_reader_special;
struct list_head trc_holdout_list;
bool trc_needreport;
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */

struct sched_info sched_info;
Expand Down
1 change: 0 additions & 1 deletion init/init_task.c
Expand Up @@ -157,7 +157,6 @@ struct task_struct init_task
.trc_reader_nesting = 0,
.trc_reader_special.s = 0,
.trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list),
.trc_needreport = false,
#endif
#ifdef CONFIG_CPUSETS
.mems_allowed_seq = SEQCNT_SPINLOCK_ZERO(init_task.mems_allowed_seq,
Expand Down
1 change: 0 additions & 1 deletion kernel/fork.c
Expand Up @@ -1811,7 +1811,6 @@ static inline void rcu_copy_process(struct task_struct *p)
p->trc_reader_nesting = 0;
p->trc_reader_special.s = 0;
INIT_LIST_HEAD(&p->trc_holdout_list);
p->trc_needreport = false;
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
}

Expand Down
56 changes: 6 additions & 50 deletions kernel/rcu/tasks.h
Expand Up @@ -161,9 +161,6 @@ module_param(rcu_task_contend_lim, int, 0444);
static int rcu_task_collapse_lim __read_mostly = 10;
module_param(rcu_task_collapse_lim, int, 0444);

static int rcu_task_trc_detail_stall;
module_param(rcu_task_trc_detail_stall, int, 0644);

/* RCU tasks grace-period state for debugging. */
#define RTGS_INIT 0
#define RTGS_WAIT_WAIT_CBS 1
Expand Down Expand Up @@ -1241,8 +1238,6 @@ u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
trs_new.b.need_qs = new;
ret = cmpxchg(&t->trc_reader_special, trs_old, trs_new);
realnew = READ_ONCE(t->trc_reader_special.b.need_qs);
if (READ_ONCE(t->trc_needreport))
pr_info("%s(P%d/%d, %d/%d, %d)->%d.\n", __func__, t->pid, task_cpu(t), old, ret.b.need_qs, new, realnew);
return ret.b.need_qs;
}

Expand Down Expand Up @@ -1291,41 +1286,29 @@ static void trc_read_check_handler(void *t_in)
struct task_struct *texp = t_in;

// If the task is no longer running on this CPU, leave.
if (unlikely(texp != t)) {
if (READ_ONCE(t->trc_needreport))
pr_info("%s(P%d/%d) IPIed, but P%d running.\n", __func__, texp->pid, task_cpu(texp), t->pid);
if (unlikely(texp != t))
goto reset_ipi; // Already on holdout list, so will check later.
}

// If the task is not in a read-side critical section, and
// if this is the last reader, awaken the grace-period kthread.
nesting = READ_ONCE(t->trc_reader_nesting);
if (likely(!nesting)) {
if (READ_ONCE(t->trc_needreport))
pr_info("%s(P%d/%d) in quiescent state.\n", __func__, t->pid, task_cpu(t));
rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
goto reset_ipi;
}
// If we are racing with an rcu_read_unlock_trace(), try again later.
if (unlikely(nesting < 0)) {
if (READ_ONCE(t->trc_needreport))
pr_info("%s(P%d/%d) exiting read-side critical section.\n", __func__, t->pid, task_cpu(t));
if (unlikely(nesting < 0))
goto reset_ipi;
}

// Get here if the task is in a read-side critical section. Set
// its state so that it will awaken the grace-period kthread upon
// exit from that critical section.
if (READ_ONCE(t->trc_needreport))
pr_info("%s(P%d/%d) in read-side critical section.\n", __func__, t->pid, task_cpu(t));
rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED);

reset_ipi:
// Allow future IPIs to be sent on CPU and for task.
// Also order this IPI handler against any later manipulations of
// the intended task.
if (READ_ONCE(t->trc_needreport))
pr_info("%s(P%d/%d) resetting IPIs.\n", __func__, texp->pid, task_cpu(texp));
smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
}
Expand All @@ -1339,14 +1322,9 @@ static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
bool ofl = cpu_is_offline(cpu);

if (task_curr(t) && !ofl) {
if (READ_ONCE(t->trc_needreport))
pr_info("%s(P%d/%d) task is running.\n", __func__, t->pid, task_cpu(t));
// If no chance of heavyweight readers, do it the hard way.
if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
if (READ_ONCE(t->trc_needreport))
pr_info("%s(P%d/%d) task is running and cannot remotely sample.\n", __func__, t->pid, task_cpu(t));
if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
return -EINVAL;
}

// If heavyweight readers are enabled on the remote task,
// we can inspect its state despite its currently running.
Expand All @@ -1361,8 +1339,6 @@ static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
nesting = 0;
} else {
// The task is not running, so C-language access is safe.
if (READ_ONCE(t->trc_needreport))
pr_info("%s(P%d/%d) task is not running.\n", __func__, t->pid, task_cpu(t));
nesting = t->trc_reader_nesting;
WARN_ON_ONCE(ofl && task_curr(t) && !is_idle_task(t));
}
Expand All @@ -1371,8 +1347,6 @@ static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
// so that the grace-period kthread will remove it from the
// holdout list.
if (nesting <= 0) {
if (READ_ONCE(t->trc_needreport))
pr_info("%s(P%d/%d) in QS or exiting towards one.\n", __func__, t->pid, task_cpu(t));
if (!nesting)
rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
return nesting ? -EINVAL : 0; // If in QS, done, otherwise try again later.
Expand All @@ -1381,8 +1355,6 @@ static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
// The task is in a read-side critical section, so set up its
// state so that it will awaken the grace-period kthread upon exit
// from that critical section.
if (READ_ONCE(t->trc_needreport))
pr_info("%s(P%d/%d) in read-side critical section.\n", __func__, t->pid, task_cpu(t));
if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED))
trc_add_holdout(t, bhp);
return 0;
Expand All @@ -1395,16 +1367,11 @@ static void trc_wait_for_one_reader(struct task_struct *t,
int cpu;

// If a previous IPI is still in flight, let it complete.
if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) { // Order IPI
if (READ_ONCE(t->trc_needreport))
pr_info("%s(P%d/%d) IPI to task still in flight.\n", __func__, t->pid, task_cpu(t));
if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
return;
}

// The current task had better be in a quiescent state.
if (t == current) {
if (READ_ONCE(t->trc_needreport))
pr_info("%s(P%d/%d) is currently running task.\n", __func__, t->pid, task_cpu(t));
rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
return;
Expand All @@ -1413,8 +1380,6 @@ static void trc_wait_for_one_reader(struct task_struct *t,
// Attempt to nail down the task for inspection.
get_task_struct(t);
if (!task_call_func(t, trc_inspect_reader, bhp)) {
if (READ_ONCE(t->trc_needreport))
pr_info("%s(P%d/%d) task_call_func() succeeded.\n", __func__, t->pid, task_cpu(t));
put_task_struct(t);
return;
}
Expand All @@ -1434,19 +1399,13 @@ static void trc_wait_for_one_reader(struct task_struct *t,
cpu = task_cpu(t);

// If there is already an IPI outstanding, let it happen.
if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) {
if (READ_ONCE(t->trc_needreport))
pr_info("%s(P%d/%d) IPI to CPU still in flight.\n", __func__, t->pid, task_cpu(t));
if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
return;
}

per_cpu(trc_ipi_to_cpu, cpu) = true;
t->trc_ipi_to_cpu = cpu;
rcu_tasks_trace.n_ipis++;
if (!smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
if (READ_ONCE(t->trc_needreport))
pr_info("%s(P%d/%d) smp_call_function_single to CPU %d launched.\n", __func__, t->pid, task_cpu(t), cpu);
} else {
if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
// Just in case there is some other reason for
// failure than the target CPU being offline.
WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
Expand Down Expand Up @@ -1578,8 +1537,6 @@ static void check_all_holdout_tasks_trace(struct list_head *hop,

list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
// If safe and needed, try to check the current task.
if (READ_ONCE(rcu_task_trc_detail_stall))
t->trc_needreport = needreport;
if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
!(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED))
trc_wait_for_one_reader(t, hop);
Expand All @@ -1590,7 +1547,6 @@ static void check_all_holdout_tasks_trace(struct list_head *hop,
trc_del_holdout(t);
else if (needreport)
show_stalled_task_trace(t, firstreport);
t->trc_needreport = false;
}

// Re-enable CPU hotplug now that the holdout list scan has completed.
Expand Down

0 comments on commit 4b3564b

Please sign in to comment.