Skip to content
Permalink
Browse files
lib/cpumask: replace cpumask_weight with cpumask_empty where appropriate
In many cases, kernel code calls cpumask_weight() to check if any bit of
a given cpumask is set. We can do it more efficiently with cpumask_empty().
The efficiency comes from the fact that cpumask_empty() stops traversing
the cpumask as soon as it finds first set bit, while cpumask_weight()
counts all bits unconditionally.

Signed-off-by: Yury Norov <yury.norov@gmail.com>
  • Loading branch information
YuryNorov committed Dec 8, 2021
1 parent 8dbe288 commit 80216b2d5ee90bb6dea0aaa7224948f6deb3ae83
Show file tree
Hide file tree
Showing 18 changed files with 26 additions and 27 deletions.
@@ -125,7 +125,7 @@ common_shutdown_1(void *generic_ptr)
/* Wait for the secondaries to halt. */
set_cpu_present(boot_cpuid, false);
set_cpu_possible(boot_cpuid, false);
while (cpumask_weight(cpu_present_mask))
while (!cpumask_empty(cpu_present_mask))
barrier();
#endif

@@ -572,7 +572,7 @@ setup_arch (char **cmdline_p)
#ifdef CONFIG_ACPI_HOTPLUG_CPU
prefill_possible_map();
#endif
per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ?
per_cpu_scan_finalize((cpumask_empty(&early_cpu_possible_map) ?
32 : cpumask_weight(&early_cpu_possible_map)),
additional_cpus > 0 ? additional_cpus : 0);
#endif /* CONFIG_ACPI_NUMA */
@@ -341,14 +341,14 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,

/* Check whether cpus belong to parent ctrl group */
cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
if (cpumask_weight(tmpmask)) {
if (!cpumask_empty(tmpmask)) {
rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
return -EINVAL;
}

/* Check whether cpus are dropped from this group */
cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
if (cpumask_weight(tmpmask)) {
if (!cpumask_empty(tmpmask)) {
/* Give any dropped cpus to parent rdtgroup */
cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
update_closid_rmid(tmpmask, prgrp);
@@ -359,7 +359,7 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
* and update per-cpu rmid
*/
cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
if (cpumask_weight(tmpmask)) {
if (!cpumask_empty(tmpmask)) {
head = &prgrp->mon.crdtgrp_list;
list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
if (crgrp == rdtgrp)
@@ -394,7 +394,7 @@ static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,

/* Check whether cpus are dropped from this group */
cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
if (cpumask_weight(tmpmask)) {
if (!cpumask_empty(tmpmask)) {
/* Can't drop from default group */
if (rdtgrp == &rdtgroup_default) {
rdt_last_cmd_puts("Can't drop CPUs from default group\n");
@@ -413,12 +413,12 @@ static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
* and update per-cpu closid/rmid.
*/
cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
if (cpumask_weight(tmpmask)) {
if (!cpumask_empty(tmpmask)) {
list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
if (r == rdtgrp)
continue;
cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
if (cpumask_weight(tmpmask1))
if (!cpumask_empty(tmpmask1))
cpumask_rdtgrp_clear(r, tmpmask1);
}
update_closid_rmid(tmpmask, rdtgrp);
@@ -488,7 +488,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,

/* check that user didn't specify any offline cpus */
cpumask_andnot(tmpmask, newmask, cpu_online_mask);
if (cpumask_weight(tmpmask)) {
if (!cpumask_empty(tmpmask)) {
ret = -EINVAL;
rdt_last_cmd_puts("Can only assign online CPUs\n");
goto unlock;
@@ -400,7 +400,7 @@ static void leave_uniprocessor(void)
int cpu;
int err;

if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0)
if (!cpumask_available(downed_cpus) || cpumask_empty(downed_cpus))
return;
pr_notice("Re-enabling CPUs...\n");
for_each_cpu(cpu, downed_cpus) {
@@ -985,7 +985,7 @@ static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)

/* Clear global flags */
if (master) {
if (cpumask_weight(uv_nmi_cpu_mask))
if (!cpumask_empty(uv_nmi_cpu_mask))
uv_nmi_cleanup_mask();
atomic_set(&uv_nmi_cpus_in_nmi, -1);
atomic_set(&uv_nmi_cpu, -1);
@@ -482,7 +482,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
}

qcom_get_related_cpus(index, policy->cpus);
if (!cpumask_weight(policy->cpus)) {
if (cpumask_empty(policy->cpus)) {
dev_err(dev, "Domain-%d failed to get related CPUs\n", index);
ret = -ENOENT;
goto error;
@@ -154,7 +154,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
* table and opp-shared.
*/
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
if (ret || !cpumask_weight(priv->opp_shared_cpus)) {
if (ret || cpumask_empty(priv->opp_shared_cpus)) {
/*
* Either opp-table is not set or no opp-shared was found.
* Use the CPU mask from SCMI to designate CPUs sharing an OPP
@@ -1048,7 +1048,7 @@ static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
GEM_BUG_ON(!pmu->base.event_init);

/* Select the first online CPU as a designated reader. */
if (!cpumask_weight(&i915_pmu_cpumask))
if (cpumask_empty(&i915_pmu_cpumask))
cpumask_set_cpu(cpu, &i915_pmu_cpumask);

return 0;
@@ -667,7 +667,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
* engines, use the same CPU cores as general/control
* context.
*/
if (cpumask_weight(&entry->def_intr.mask) == 0)
if (cpumask_empty(&entry->def_intr.mask))
cpumask_copy(&entry->def_intr.mask,
&entry->general_intr_mask);
}
@@ -687,7 +687,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
* vectors, use the same CPU core as the general/control
* context.
*/
if (cpumask_weight(&entry->comp_vect_mask) == 0)
if (cpumask_empty(&entry->comp_vect_mask))
cpumask_copy(&entry->comp_vect_mask,
&entry->general_intr_mask);
}
@@ -193,8 +193,7 @@ int siw_get_tx_cpu(struct siw_device *sdev)
else
tx_cpumask = siw_cpu_info.tx_valid_cpus[node];

num_cpus = cpumask_weight(tx_cpumask);
if (!num_cpus) {
if (cpumask_empty(tx_cpumask)) {
/* no CPU on this NUMA node */
tx_cpumask = cpu_online_mask;
num_cpus = cpumask_weight(tx_cpumask);
@@ -315,7 +315,7 @@ static int __init bcm6345_l1_of_init(struct device_node *dn,
cpumask_set_cpu(idx, &intc->cpumask);
}

if (!cpumask_weight(&intc->cpumask)) {
if (cpumask_empty(&intc->cpumask)) {
ret = -ENODEV;
goto out_free;
}
@@ -258,7 +258,7 @@ static int __irq_build_affinity_masks(unsigned int startvec,
nodemask_t nodemsk = NODE_MASK_NONE;
struct node_vectors *node_vectors;

if (!cpumask_weight(cpu_mask))
if (cpumask_empty(cpu_mask))
return 0;

nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk);
@@ -181,7 +181,7 @@ int padata_do_parallel(struct padata_shell *ps,
goto out;

if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
if (!cpumask_weight(pd->cpumask.cbcpu))
if (cpumask_empty(pd->cpumask.cbcpu))
goto out;

/* Select an alternate fallback CPU and notify the caller. */
@@ -1169,7 +1169,7 @@ void __init rcu_init_nohz(void)
struct rcu_data *rdp;

#if defined(CONFIG_NO_HZ_FULL)
if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
if (tick_nohz_full_running && !cpumask_empty(tick_nohz_full_mask))
need_rcu_nocb_mask = true;
#endif /* #if defined(CONFIG_NO_HZ_FULL) */

@@ -1215,7 +1215,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
cpu != outgoingcpu)
cpumask_set_cpu(cpu, cm);
cpumask_and(cm, cm, housekeeping_cpumask(HK_FLAG_RCU));
if (cpumask_weight(cm) == 0)
if (cpumask_empty(cm))
cpumask_copy(cm, housekeeping_cpumask(HK_FLAG_RCU));
set_cpus_allowed_ptr(t, cm);
free_cpumask_var(cm);
@@ -74,7 +74,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
break;
}

if (!cpumask_weight(sched_group_span(group))) {
if (cpumask_empty(sched_group_span(group))) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: empty group\n");
break;
@@ -337,7 +337,7 @@ void clocksource_verify_percpu(struct clocksource *cs)
cpus_read_lock();
preempt_disable();
clocksource_verify_choose_cpus();
if (cpumask_weight(&cpus_chosen) == 0) {
if (cpumask_empty(&cpus_chosen)) {
preempt_enable();
cpus_read_unlock();
pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
@@ -2032,7 +2032,7 @@ static void __init init_cpu_node_state(void)
int node;

for_each_online_node(node) {
if (cpumask_weight(cpumask_of_node(node)) > 0)
if (!cpumask_empty(cpumask_of_node(node)))
node_set_state(node, N_CPU);
}
}
@@ -2059,7 +2059,7 @@ static int vmstat_cpu_dead(unsigned int cpu)

refresh_zone_stat_thresholds();
node_cpus = cpumask_of_node(node);
if (cpumask_weight(node_cpus) > 0)
if (!cpumask_empty(node_cpus))
return 0;

node_clear_state(node, N_CPU);

0 comments on commit 80216b2

Please sign in to comment.