Skip to content

Commit

Permalink
Revert "cpuset: Convert callback_lock to raw_spinlock_t"
Browse files Browse the repository at this point in the history
Since the mm/slub rework the code is not invoked with disabled
interrupts so there is no need to convert them raw_spinlock_t.

Revert back to spinlock_t.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
  • Loading branch information
Sebastian Andrzej Siewior committed Sep 22, 2021
1 parent dbef0d6 commit 6e3039e
Showing 1 changed file with 41 additions and 41 deletions.
82 changes: 41 additions & 41 deletions kernel/cgroup/cpuset.c
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ void cpuset_read_unlock(void)
percpu_up_read(&cpuset_rwsem);
}

static DEFINE_RAW_SPINLOCK(callback_lock);
static DEFINE_SPINLOCK(callback_lock);

static struct workqueue_struct *cpuset_migrate_mm_wq;

Expand Down Expand Up @@ -1308,7 +1308,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
* Newly added CPUs will be removed from effective_cpus and
* newly deleted ones will be added back to effective_cpus.
*/
raw_spin_lock_irq(&callback_lock);
spin_lock_irq(&callback_lock);
if (adding) {
cpumask_or(parent->subparts_cpus,
parent->subparts_cpus, tmp->addmask);
Expand All @@ -1331,7 +1331,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
if (old_prs != new_prs)
cpuset->partition_root_state = new_prs;

raw_spin_unlock_irq(&callback_lock);
spin_unlock_irq(&callback_lock);
notify_partition_change(cpuset, old_prs, new_prs);

return cmd == partcmd_update;
Expand Down Expand Up @@ -1435,7 +1435,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
continue;
rcu_read_unlock();

raw_spin_lock_irq(&callback_lock);
spin_lock_irq(&callback_lock);

cpumask_copy(cp->effective_cpus, tmp->new_cpus);
if (cp->nr_subparts_cpus && (new_prs != PRS_ENABLED)) {
Expand Down Expand Up @@ -1469,7 +1469,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
if (new_prs != old_prs)
cp->partition_root_state = new_prs;

raw_spin_unlock_irq(&callback_lock);
spin_unlock_irq(&callback_lock);
notify_partition_change(cp, old_prs, new_prs);

WARN_ON(!is_in_v2_mode() &&
Expand Down Expand Up @@ -1588,7 +1588,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
return -EINVAL;
}

raw_spin_lock_irq(&callback_lock);
spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);

/*
Expand All @@ -1599,7 +1599,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
cs->cpus_allowed);
cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
}
raw_spin_unlock_irq(&callback_lock);
spin_unlock_irq(&callback_lock);

update_cpumasks_hier(cs, &tmp);

Expand Down Expand Up @@ -1798,9 +1798,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
continue;
rcu_read_unlock();

raw_spin_lock_irq(&callback_lock);
spin_lock_irq(&callback_lock);
cp->effective_mems = *new_mems;
raw_spin_unlock_irq(&callback_lock);
spin_unlock_irq(&callback_lock);

WARN_ON(!is_in_v2_mode() &&
!nodes_equal(cp->mems_allowed, cp->effective_mems));
Expand Down Expand Up @@ -1868,9 +1868,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
if (retval < 0)
goto done;

raw_spin_lock_irq(&callback_lock);
spin_lock_irq(&callback_lock);
cs->mems_allowed = trialcs->mems_allowed;
raw_spin_unlock_irq(&callback_lock);
spin_unlock_irq(&callback_lock);

/* use trialcs->mems_allowed as a temp variable */
update_nodemasks_hier(cs, &trialcs->mems_allowed);
Expand Down Expand Up @@ -1961,9 +1961,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
|| (is_spread_page(cs) != is_spread_page(trialcs)));

raw_spin_lock_irq(&callback_lock);
spin_lock_irq(&callback_lock);
cs->flags = trialcs->flags;
raw_spin_unlock_irq(&callback_lock);
spin_unlock_irq(&callback_lock);

if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
rebuild_sched_domains_locked();
Expand Down Expand Up @@ -2054,9 +2054,9 @@ static int update_prstate(struct cpuset *cs, int new_prs)
rebuild_sched_domains_locked();
out:
if (!err) {
raw_spin_lock_irq(&callback_lock);
spin_lock_irq(&callback_lock);
cs->partition_root_state = new_prs;
raw_spin_unlock_irq(&callback_lock);
spin_unlock_irq(&callback_lock);
notify_partition_change(cs, old_prs, new_prs);
}

Expand Down Expand Up @@ -2471,7 +2471,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
cpuset_filetype_t type = seq_cft(sf)->private;
int ret = 0;

raw_spin_lock_irq(&callback_lock);
spin_lock_irq(&callback_lock);

switch (type) {
case FILE_CPULIST:
Expand All @@ -2493,7 +2493,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
ret = -EINVAL;
}

raw_spin_unlock_irq(&callback_lock);
spin_unlock_irq(&callback_lock);
return ret;
}

Expand Down Expand Up @@ -2811,14 +2811,14 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)

cpuset_inc();

raw_spin_lock_irq(&callback_lock);
spin_lock_irq(&callback_lock);
if (is_in_v2_mode()) {
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
cs->effective_mems = parent->effective_mems;
cs->use_parent_ecpus = true;
parent->child_ecpus_count++;
}
raw_spin_unlock_irq(&callback_lock);
spin_unlock_irq(&callback_lock);

if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
goto out_unlock;
Expand All @@ -2845,12 +2845,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
}
rcu_read_unlock();

raw_spin_lock_irq(&callback_lock);
spin_lock_irq(&callback_lock);
cs->mems_allowed = parent->mems_allowed;
cs->effective_mems = parent->mems_allowed;
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
raw_spin_unlock_irq(&callback_lock);
spin_unlock_irq(&callback_lock);
out_unlock:
percpu_up_write(&cpuset_rwsem);
cpus_read_unlock();
Expand Down Expand Up @@ -2906,7 +2906,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
percpu_down_write(&cpuset_rwsem);
raw_spin_lock_irq(&callback_lock);
spin_lock_irq(&callback_lock);

if (is_in_v2_mode()) {
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
Expand All @@ -2917,7 +2917,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
top_cpuset.mems_allowed = top_cpuset.effective_mems;
}

raw_spin_unlock_irq(&callback_lock);
spin_unlock_irq(&callback_lock);
percpu_up_write(&cpuset_rwsem);
}

Expand Down Expand Up @@ -3014,12 +3014,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
{
bool is_empty;

raw_spin_lock_irq(&callback_lock);
spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, new_cpus);
cpumask_copy(cs->effective_cpus, new_cpus);
cs->mems_allowed = *new_mems;
cs->effective_mems = *new_mems;
raw_spin_unlock_irq(&callback_lock);
spin_unlock_irq(&callback_lock);

/*
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
Expand Down Expand Up @@ -3056,10 +3056,10 @@ hotplug_update_tasks(struct cpuset *cs,
if (nodes_empty(*new_mems))
*new_mems = parent_cs(cs)->effective_mems;

raw_spin_lock_irq(&callback_lock);
spin_lock_irq(&callback_lock);
cpumask_copy(cs->effective_cpus, new_cpus);
cs->effective_mems = *new_mems;
raw_spin_unlock_irq(&callback_lock);
spin_unlock_irq(&callback_lock);

if (cpus_updated)
update_tasks_cpumask(cs);
Expand Down Expand Up @@ -3126,10 +3126,10 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
if (is_partition_root(cs) && (cpumask_empty(&new_cpus) ||
(parent->partition_root_state == PRS_ERROR))) {
if (cs->nr_subparts_cpus) {
raw_spin_lock_irq(&callback_lock);
spin_lock_irq(&callback_lock);
cs->nr_subparts_cpus = 0;
cpumask_clear(cs->subparts_cpus);
raw_spin_unlock_irq(&callback_lock);
spin_unlock_irq(&callback_lock);
compute_effective_cpumask(&new_cpus, cs, parent);
}

Expand All @@ -3147,9 +3147,9 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
NULL, tmp);
old_prs = cs->partition_root_state;
if (old_prs != PRS_ERROR) {
raw_spin_lock_irq(&callback_lock);
spin_lock_irq(&callback_lock);
cs->partition_root_state = PRS_ERROR;
raw_spin_unlock_irq(&callback_lock);
spin_unlock_irq(&callback_lock);
notify_partition_change(cs, old_prs, PRS_ERROR);
}
}
Expand Down Expand Up @@ -3231,7 +3231,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)

/* synchronize cpus_allowed to cpu_active_mask */
if (cpus_updated) {
raw_spin_lock_irq(&callback_lock);
spin_lock_irq(&callback_lock);
if (!on_dfl)
cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
/*
Expand All @@ -3251,17 +3251,17 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
}
}
cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
raw_spin_unlock_irq(&callback_lock);
spin_unlock_irq(&callback_lock);
/* we don't mess with cpumasks of tasks in top_cpuset */
}

/* synchronize mems_allowed to N_MEMORY */
if (mems_updated) {
raw_spin_lock_irq(&callback_lock);
spin_lock_irq(&callback_lock);
if (!on_dfl)
top_cpuset.mems_allowed = new_mems;
top_cpuset.effective_mems = new_mems;
raw_spin_unlock_irq(&callback_lock);
spin_unlock_irq(&callback_lock);
update_tasks_nodemask(&top_cpuset);
}

Expand Down Expand Up @@ -3362,9 +3362,9 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
{
unsigned long flags;

raw_spin_lock_irqsave(&callback_lock, flags);
spin_lock_irqsave(&callback_lock, flags);
guarantee_online_cpus(tsk, pmask);
raw_spin_unlock_irqrestore(&callback_lock, flags);
spin_unlock_irqrestore(&callback_lock, flags);
}

/**
Expand Down Expand Up @@ -3435,11 +3435,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
nodemask_t mask;
unsigned long flags;

raw_spin_lock_irqsave(&callback_lock, flags);
spin_lock_irqsave(&callback_lock, flags);
rcu_read_lock();
guarantee_online_mems(task_cs(tsk), &mask);
rcu_read_unlock();
raw_spin_unlock_irqrestore(&callback_lock, flags);
spin_unlock_irqrestore(&callback_lock, flags);

return mask;
}
Expand Down Expand Up @@ -3531,14 +3531,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
return true;

/* Not hardwall and node outside mems_allowed: scan up cpusets */
raw_spin_lock_irqsave(&callback_lock, flags);
spin_lock_irqsave(&callback_lock, flags);

rcu_read_lock();
cs = nearest_hardwall_ancestor(task_cs(current));
allowed = node_isset(node, cs->mems_allowed);
rcu_read_unlock();

raw_spin_unlock_irqrestore(&callback_lock, flags);
spin_unlock_irqrestore(&callback_lock, flags);
return allowed;
}

Expand Down

0 comments on commit 6e3039e

Please sign in to comment.