Skip to content

Commit

Permalink
Revert "workqueue: RCU protect wq->dfl_pwq and implement accessors fo…
Browse files Browse the repository at this point in the history
…r it"

This reverts commit bd31fb9 which is
commit 9f66cff upstream.

The workqueue patches backported to 6.6.y caused some reported
regressions, so revert them for now.

Reported-by: Thorsten Leemhuis <regressions@leemhuis.info>
Cc: Tejun Heo <tj@kernel.org>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Nathan Chancellor <nathan@kernel.org>
Cc: Sasha Levin <sashal@kernel.org>
Cc: Audra Mitchell <audra@redhat.com>
Link: https://lore.kernel.org/all/ce4c2f67-c298-48a0-87a3-f933d646c73b@leemhuis.info/
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
gregkh committed Apr 4, 2024
1 parent bfb429f commit f3c11cb
Showing 1 changed file with 24 additions and 40 deletions.
64 changes: 24 additions & 40 deletions kernel/workqueue.c
Expand Up @@ -304,7 +304,7 @@ struct workqueue_struct {
int saved_max_active; /* WQ: saved max_active */

struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
struct pool_workqueue __rcu *dfl_pwq; /* PW: only for unbound wqs */
struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */

#ifdef CONFIG_SYSFS
struct wq_device *wq_dev; /* I: for sysfs interface */
Expand Down Expand Up @@ -629,23 +629,6 @@ static int worker_pool_assign_id(struct worker_pool *pool)
return ret;
}

static struct pool_workqueue __rcu **
unbound_pwq_slot(struct workqueue_struct *wq, int cpu)
{
if (cpu >= 0)
return per_cpu_ptr(wq->cpu_pwq, cpu);
else
return &wq->dfl_pwq;
}

/* @cpu < 0 for dfl_pwq */
static struct pool_workqueue *unbound_pwq(struct workqueue_struct *wq, int cpu)
{
return rcu_dereference_check(*unbound_pwq_slot(wq, cpu),
lockdep_is_held(&wq_pool_mutex) ||
lockdep_is_held(&wq->mutex));
}

static unsigned int work_color_to_flags(int color)
{
return color << WORK_STRUCT_COLOR_SHIFT;
Expand Down Expand Up @@ -4335,11 +4318,10 @@ static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu,
"possible intersect\n");
}

/* install @pwq into @wq and return the old pwq, @cpu < 0 for dfl_pwq */
/* install @pwq into @wq's cpu_pwq and return the old pwq */
static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq,
int cpu, struct pool_workqueue *pwq)
{
struct pool_workqueue __rcu **slot = unbound_pwq_slot(wq, cpu);
struct pool_workqueue *old_pwq;

lockdep_assert_held(&wq_pool_mutex);
Expand All @@ -4348,8 +4330,8 @@ static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq,
/* link_pwq() can handle duplicate calls */
link_pwq(pwq);

old_pwq = rcu_access_pointer(*slot);
rcu_assign_pointer(*slot, pwq);
old_pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
rcu_assign_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu), pwq);
return old_pwq;
}

Expand Down Expand Up @@ -4449,11 +4431,14 @@ static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)

copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);

/* save the previous pwqs and install the new ones */
/* save the previous pwq and install the new one */
for_each_possible_cpu(cpu)
ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu,
ctx->pwq_tbl[cpu]);
ctx->dfl_pwq = install_unbound_pwq(ctx->wq, -1, ctx->dfl_pwq);

/* @dfl_pwq might not have been used, ensure it's linked */
link_pwq(ctx->dfl_pwq);
swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);

mutex_unlock(&ctx->wq->mutex);
}
Expand Down Expand Up @@ -4576,7 +4561,9 @@ static void wq_update_pod(struct workqueue_struct *wq, int cpu,

/* nothing to do if the target cpumask matches the current pwq */
wq_calc_pod_cpumask(target_attrs, cpu, off_cpu);
if (wqattrs_equal(target_attrs, unbound_pwq(wq, cpu)->pool->attrs))
pwq = rcu_dereference_protected(*per_cpu_ptr(wq->cpu_pwq, cpu),
lockdep_is_held(&wq_pool_mutex));
if (wqattrs_equal(target_attrs, pwq->pool->attrs))
return;

/* create a new pwq */
Expand All @@ -4594,11 +4581,10 @@ static void wq_update_pod(struct workqueue_struct *wq, int cpu,

use_dfl_pwq:
mutex_lock(&wq->mutex);
pwq = unbound_pwq(wq, -1);
raw_spin_lock_irq(&pwq->pool->lock);
get_pwq(pwq);
raw_spin_unlock_irq(&pwq->pool->lock);
old_pwq = install_unbound_pwq(wq, cpu, pwq);
raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
get_pwq(wq->dfl_pwq);
raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
old_pwq = install_unbound_pwq(wq, cpu, wq->dfl_pwq);
out_unlock:
mutex_unlock(&wq->mutex);
put_pwq_unlocked(old_pwq);
Expand Down Expand Up @@ -4636,13 +4622,10 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)

cpus_read_lock();
if (wq->flags & __WQ_ORDERED) {
struct pool_workqueue *dfl_pwq;

ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
/* there should only be single pwq for ordering guarantee */
dfl_pwq = rcu_access_pointer(wq->dfl_pwq);
WARN(!ret && (wq->pwqs.next != &dfl_pwq->pwqs_node ||
wq->pwqs.prev != &dfl_pwq->pwqs_node),
WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
"ordering guarantee broken for workqueue %s\n", wq->name);
} else {
ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
Expand Down Expand Up @@ -4873,7 +4856,7 @@ static bool pwq_busy(struct pool_workqueue *pwq)
if (pwq->nr_in_flight[i])
return true;

if ((pwq != rcu_access_pointer(pwq->wq->dfl_pwq)) && (pwq->refcnt > 1))
if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
return true;
if (!pwq_is_empty(pwq))
return true;
Expand Down Expand Up @@ -4957,12 +4940,13 @@ void destroy_workqueue(struct workqueue_struct *wq)
rcu_read_lock();

for_each_possible_cpu(cpu) {
put_pwq_unlocked(unbound_pwq(wq, cpu));
RCU_INIT_POINTER(*unbound_pwq_slot(wq, cpu), NULL);
pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
RCU_INIT_POINTER(*per_cpu_ptr(wq->cpu_pwq, cpu), NULL);
put_pwq_unlocked(pwq);
}

put_pwq_unlocked(unbound_pwq(wq, -1));
RCU_INIT_POINTER(*unbound_pwq_slot(wq, -1), NULL);
put_pwq_unlocked(wq->dfl_pwq);
wq->dfl_pwq = NULL;

rcu_read_unlock();
}
Expand Down

0 comments on commit f3c11cb

Please sign in to comment.