diff --git a/include/linux/sched.h b/include/linux/sched.h index 710d58bdec53..ce10c2dbed27 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1866,6 +1866,11 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma } #endif +void sched_migrate_to_cpumask_start(struct cpumask *old_mask, + const struct cpumask *dest); +void sched_migrate_to_cpumask_end(const struct cpumask *old_mask, + const struct cpumask *dest); + #ifndef cpu_relax_yield #define cpu_relax_yield() cpu_relax() #endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 58235f55963c..ffeca1398987 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2043,6 +2043,47 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, return ret; } +/* + * Calls to sched_migrate_to_cpumask_start() cannot nest. This can only be used + * in process context. + */ +void sched_migrate_to_cpumask_start(struct cpumask *old_mask, + const struct cpumask *dest) +{ + struct task_struct *p = current; + + raw_spin_lock_irq(&p->pi_lock); + *cpumask_bits(old_mask) = *cpumask_bits(&p->cpus_allowed); + raw_spin_unlock_irq(&p->pi_lock); + + /* + * This will force the current task onto the destination cpumask. It + * will sleep when a migration to another CPU is actually needed. + */ + set_cpus_allowed_ptr(p, dest); +} + +void sched_migrate_to_cpumask_end(const struct cpumask *old_mask, + const struct cpumask *dest) +{ + struct task_struct *p = current; + + /* + * Check that cpus_allowed didn't change from what it was temporarily + * set to earlier. If so, we can go ahead and lazily restore the old + * cpumask. There's no need to immediately migrate right now. + */ + raw_spin_lock_irq(&p->pi_lock); + if (*cpumask_bits(&p->cpus_allowed) == *cpumask_bits(dest)) { + struct rq *rq = this_rq(); + + raw_spin_lock(&rq->lock); + do_set_cpus_allowed(p, old_mask); + raw_spin_unlock(&rq->lock); + } + raw_spin_unlock_irq(&p->pi_lock); +} + /* * wait_task_inactive - wait for a thread to unschedule. *