Skip to content

Commit

Permalink
ice: Change assigning method of the CPU affinity masks
Browse files Browse the repository at this point in the history
With the introduction of sched_numa_hop_mask() and
for_each_numa_hop_mask(), the affinity masks for queue vectors can be
conveniently set by preferring the CPUs that are closest to the NUMA node
of the parent PCI device.

Signed-off-by: Pawel Chmielewski <pawel.chmielewski@intel.com>
  • Loading branch information
pawelchm-intel authored and intel-lab-lkp committed Feb 8, 2023
1 parent 5444bf1 commit 33971c3
Showing 1 changed file with 14 additions and 3 deletions.
17 changes: 14 additions & 3 deletions drivers/net/ethernet/intel/ice/ice_base.c
Expand Up @@ -122,8 +122,6 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
if (vsi->type == ICE_VSI_VF)
goto out;
/* only set affinity_mask if the CPU is online */
if (cpu_online(v_idx))
cpumask_set_cpu(v_idx, &q_vector->affinity_mask);

/* This will not be called in the driver load path because the netdev
* will not be created yet. All other cases with register the NAPI
Expand Down Expand Up @@ -659,8 +657,10 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
*/
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
{
cpumask_t *aff_mask, *last_aff_mask = cpu_none_mask;
struct device *dev = ice_pf_to_dev(vsi->back);
u16 v_idx;
int numa_node = dev->numa_node;
u16 v_idx, cpu = 0;
int err;

if (vsi->q_vectors[0]) {
Expand All @@ -674,6 +674,17 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
goto err_out;
}

v_idx = 0;
for_each_numa_hop_mask(aff_mask, numa_node) {
for_each_cpu_andnot(cpu, aff_mask, last_aff_mask)
if (v_idx < vsi->num_q_vectors) {
if (cpu_online(cpu))
cpumask_set_cpu(cpu, &vsi->q_vectors[v_idx]->affinity_mask);
v_idx++;
}
last_aff_mask = aff_mask;
}

return 0;

err_out:
Expand Down

0 comments on commit 33971c3

Please sign in to comment.