Skip to content

Commit

Permalink
kthread: Stop abusing TASK_UNINTERRUPTIBLE (INCOMPLETE)
Browse files Browse the repository at this point in the history
Instead leave the task as a new unscheduled task and require the
caller to call wake_up_new_task.

Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
  • Loading branch information
ebiederm authored and intel-lab-lkp committed Jun 26, 2022
1 parent 26f2f82 commit 950f492
Show file tree
Hide file tree
Showing 16 changed files with 72 additions and 77 deletions.
2 changes: 1 addition & 1 deletion arch/arm/common/bL_switcher.c
Expand Up @@ -311,7 +311,7 @@ static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
cpu_to_node(cpu), "kswitcher_%d", cpu);
if (!IS_ERR(task)) {
kthread_bind(task, cpu);
wake_up_process(task);
wake_up_new_task(task);
} else
pr_err("%s failed for CPU %d\n", __func__, cpu);
return task;
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/cpu/resctrl/pseudo_lock.c
Expand Up @@ -1206,7 +1206,7 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
goto out;
}
kthread_bind(thread, cpu);
wake_up_process(thread);
wake_up_new_task(thread);

ret = wait_event_interruptible(plr->lock_thread_wq,
plr->thread_done == 1);
Expand Down Expand Up @@ -1304,7 +1304,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
}

kthread_bind(thread, plr->cpu);
wake_up_process(thread);
wake_up_new_task(thread);

ret = wait_event_interruptible(plr->lock_thread_wq,
plr->thread_done == 1);
Expand Down
2 changes: 1 addition & 1 deletion drivers/block/mtip32xx/mtip32xx.c
Expand Up @@ -3649,7 +3649,7 @@ static int mtip_block_initialize(struct driver_data *dd)
rv = -EFAULT;
goto kthread_run_error;
}
wake_up_process(dd->mtip_svc_handler);
wake_up_new_task(dd->mtip_svc_handler);
if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
rv = wait_for_rebuild;

Expand Down
2 changes: 1 addition & 1 deletion drivers/firmware/psci/psci_checker.c
Expand Up @@ -418,7 +418,7 @@ static int suspend_tests(void)
* wait for the completion of suspend_threads_started.
*/
for (i = 0; i < nb_threads; ++i)
wake_up_process(threads[i]);
wake_up_new_task(threads[i]);
complete_all(&suspend_threads_started);

wait_for_completion(&suspend_threads_done);
Expand Down
4 changes: 2 additions & 2 deletions drivers/firmware/stratix10-svc.c
Expand Up @@ -581,7 +581,7 @@ static int svc_get_sh_memory(struct platform_device *pdev,
return -EINVAL;
}

wake_up_process(sh_memory_task);
wake_up_new_task(sh_memory_task);

if (!wait_for_completion_timeout(&sh_memory->sync_complete, 10 * HZ)) {
dev_err(dev,
Expand Down Expand Up @@ -834,7 +834,7 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
return -EINVAL;
}
kthread_bind(chan->ctrl->task, cpu);
wake_up_process(chan->ctrl->task);
wake_up_new_task(chan->ctrl->task);
}

pr_debug("%s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__,
Expand Down
2 changes: 1 addition & 1 deletion drivers/scsi/bnx2fc/bnx2fc_fcoe.c
Expand Up @@ -2622,7 +2622,7 @@ static int bnx2fc_cpu_online(unsigned int cpu)
/* bind thread to the cpu */
kthread_bind(thread, cpu);
p->iothread = thread;
wake_up_process(thread);
wake_up_new_task(thread);
return 0;
}

Expand Down
2 changes: 1 addition & 1 deletion drivers/scsi/bnx2i/bnx2i_init.c
Expand Up @@ -424,7 +424,7 @@ static int bnx2i_cpu_online(unsigned int cpu)
/* bind thread to the cpu */
kthread_bind(thread, cpu);
p->iothread = thread;
wake_up_process(thread);
wake_up_new_task(thread);
return 0;
}

Expand Down
2 changes: 1 addition & 1 deletion drivers/scsi/qedi/qedi_main.c
Expand Up @@ -1967,7 +1967,7 @@ static int qedi_cpu_online(unsigned int cpu)

kthread_bind(thread, cpu);
p->iothread = thread;
wake_up_process(thread);
wake_up_new_task(thread);
return 0;
}

Expand Down
4 changes: 2 additions & 2 deletions include/linux/kthread.h
Expand Up @@ -53,7 +53,7 @@ bool kthread_is_per_cpu(struct task_struct *k);
struct task_struct *__k \
= kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); \
if (!IS_ERR(__k)) \
wake_up_process(__k); \
wake_up_new_task(__k); \
__k; \
})

Expand All @@ -77,7 +77,7 @@ kthread_run_on_cpu(int (*threadfn)(void *data), void *data,

p = kthread_create_on_cpu(threadfn, data, cpu, namefmt);
if (!IS_ERR(p))
wake_up_process(p);
wake_up_new_task(p);

return p;
}
Expand Down
2 changes: 1 addition & 1 deletion kernel/bpf/cpumap.c
Expand Up @@ -475,7 +475,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,

/* Make sure kthread runs on a single CPU */
kthread_bind(rcpu->kthread, cpu);
wake_up_process(rcpu->kthread);
wake_up_new_task(rcpu->kthread);

return rcpu;

Expand Down
2 changes: 1 addition & 1 deletion kernel/dma/map_benchmark.c
Expand Up @@ -134,7 +134,7 @@ static int do_map_benchmark(struct map_benchmark_data *map)

for (i = 0; i < threads; i++) {
get_task_struct(tsk[i]);
wake_up_process(tsk[i]);
wake_up_new_task(tsk[i]);
}

msleep_interruptible(map->bparam.seconds * 1000);
Expand Down
114 changes: 54 additions & 60 deletions kernel/kthread.c
Expand Up @@ -329,51 +329,12 @@ EXPORT_SYMBOL(kthread_complete_and_exit);

static int kthread(void *_create)
{
static const struct sched_param param = { .sched_priority = 0 };
/* Copy data: it's on kthread's stack */
struct kthread_create_info *create = _create;
int (*threadfn)(void *data) = create->threadfn;
void *data = create->data;
struct completion *done;
struct kthread *self;
int ret;

self = to_kthread(current);
struct kthread *self = to_kthread(current);
int ret = -EINTR;

/* If user was SIGKILLed, I release the structure. */
done = xchg(&create->done, NULL);
if (!done) {
kfree(create);
kthread_exit(-EINTR);
}

self->threadfn = threadfn;
self->data = data;

/*
* The new thread inherited kthreadd's priority and CPU mask. Reset
* back to default in case they have been changed.
*/
sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));

/* OK, tell user we're spawned, wait for stop or wakeup */
__set_current_state(TASK_UNINTERRUPTIBLE);
create->result = current;
/*
* Thread is going to call schedule(), do not preempt it,
* or the creator may spend more time in wait_task_inactive().
*/
preempt_disable();
complete(done);
schedule_preempt_disabled();
preempt_enable();

ret = -EINTR;
if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
cgroup_kthread_ready();
__kthread_parkme(self);
ret = threadfn(data);
ret = self->threadfn(self->data);
}
kthread_exit(ret);
}
Expand All @@ -391,25 +352,41 @@ int tsk_fork_get_node(struct task_struct *tsk)
static void create_kthread(struct kthread_create_info *create)
{
struct task_struct *new;
struct completion *done;

#ifdef CONFIG_NUMA
current->pref_node_fork = create->node;
#endif
/* We want our own signal handler (we take no signals by default). */
new = new_kthread(kthread, create, NUMA_NO_NODE);
create->result = new;
/* Claim the completion */
done = xchg(&create->done, NULL);
if (IS_ERR(new)) {
/* If user was SIGKILLed, I release the structure. */
struct completion *done = xchg(&create->done, NULL);
if (done)
complete(done);
} else if (done) {
static const struct sched_param param = { .sched_priority = 0 };
struct kthread *kthread = to_kthread(new);

if (!done) {
kfree(create);
return;
}
create->result = ERR_CAST(new);
complete(done);
} else {
wake_up_new_task(new);
kthread->threadfn = create->threadfn;
kthread->data = create->data;

/*
* The new thread inherited kthreadd's priority and CPU mask. Reset
* back to default in case they have been changed.
*/
sched_setscheduler_nocheck(new, SCHED_NORMAL, &param);
set_cpus_allowed_ptr(new, housekeeping_cpumask(HK_TYPE_KTHREAD));

/* OK, tell user we're spawned, wait for stop or wakeup */
//wake_up_new_task(new);
}
/* If user was SIGKILLed, release the structure. */
if (!done)
kfree(create);
else
complete(done);
}

static __printf(4, 0)
Expand Down Expand Up @@ -518,11 +495,11 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
}
EXPORT_SYMBOL(kthread_create_on_node);

static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
static void kthread_bind_mask_parked(struct task_struct *p, const struct cpumask *mask)
{
unsigned long flags;

if (!wait_task_inactive(p, state)) {
if (!wait_task_inactive(p, TASK_PARKED)) {
WARN_ON(1);
return;
}
Expand All @@ -534,14 +511,31 @@ static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mas
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}

static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
static void kthread_bind_mask_new(struct task_struct *new, const struct cpumask *mask)
{
unsigned long flags;

/*
* FIXME: verify that p is a new task that
* has not yet been passed through
* wake_up_new_task
*/

/* It's safe because new has never been scheduled. */
raw_spin_lock_irqsave(&new->pi_lock, flags);
do_set_cpus_allowed(new, mask);
new->flags |= PF_NO_SETAFFINITY;
raw_spin_unlock_irqrestore(&new->pi_lock, flags);
}

static void __kthread_bind(struct task_struct *p, unsigned int cpu)
{
__kthread_bind_mask(p, cpumask_of(cpu), state);
kthread_bind_mask_new(p, cpumask_of(cpu));
}

void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
{
__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
kthread_bind_mask_new(p, mask);
}

/**
Expand All @@ -555,7 +549,7 @@ void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
*/
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
__kthread_bind(p, cpu);
}
EXPORT_SYMBOL(kthread_bind);

Expand Down Expand Up @@ -629,7 +623,7 @@ void kthread_unpark(struct task_struct *k)
* The binding was lost and we need to set it again.
*/
if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
__kthread_bind(k, kthread->cpu, TASK_PARKED);
kthread_bind_mask_parked(k, cpumask_of(kthread->cpu));

clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
/*
Expand Down Expand Up @@ -863,7 +857,7 @@ __kthread_create_worker(int cpu, unsigned int flags,

worker->flags = flags;
worker->task = task;
wake_up_process(task);
wake_up_new_task(task);
return worker;

fail_task:
Expand Down
1 change: 1 addition & 0 deletions kernel/smpboot.c
Expand Up @@ -192,6 +192,7 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
* Park the thread so that it could start right on the CPU
* when it is available.
*/
wake_up_new_task(tsk);
kthread_park(tsk);
get_task_struct(tsk);
*per_cpu_ptr(ht->store, cpu) = tsk;
Expand Down
2 changes: 1 addition & 1 deletion kernel/workqueue.c
Expand Up @@ -1961,7 +1961,7 @@ static struct worker *create_worker(struct worker_pool *pool)
raw_spin_lock_irq(&pool->lock);
worker->pool->nr_workers++;
worker_enter_idle(worker);
wake_up_process(worker->task);
wake_up_new_task(worker->task);
raw_spin_unlock_irq(&pool->lock);

return worker;
Expand Down
2 changes: 1 addition & 1 deletion net/core/pktgen.c
Expand Up @@ -3864,7 +3864,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)

t->net = pn;
get_task_struct(p);
wake_up_process(p);
wake_up_new_task(p);
wait_for_completion(&t->start_done);

return 0;
Expand Down
2 changes: 1 addition & 1 deletion net/sunrpc/svc.c
Expand Up @@ -769,7 +769,7 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
svc_pool_map_set_cpumask(task, chosen_pool->sp_id);

svc_sock_update_bufs(serv);
wake_up_process(task);
wake_up_new_task(task);
} while (nrservs > 0);

return 0;
Expand Down

0 comments on commit 950f492

Please sign in to comment.