Skip to content

Commit

Permalink
blk-mq: build default queue map via group_cpus_evenly()
Browse files Browse the repository at this point in the history
The default queue mapping builder of blk_mq_map_queues doesn't take NUMA
topo into account, so the built mapping is pretty bad, since CPUs
belonging to different NUMA node are assigned to same queue. It is
observed that IOPS drops by ~30% when running two jobs on same hctx
of null_blk from two CPUs belonging to two NUMA nodes compared with
from same NUMA node.

Address the issue by reusing group_cpus_evenly() for addressing the
issue since group_cpus_evenly() does group cpus according to CPU/NUMA
locality.

Lots of drivers may benefit from the change, such as nvme pci poll,
nvme tcp, ...

Signed-off-by: Ming Lei <ming.lei@redhat.com>
  • Loading branch information
Ming Lei authored and intel-lab-lkp committed Aug 14, 2021
1 parent 5cd330f commit 46b1d0e
Showing 1 changed file with 13 additions and 51 deletions.
64 changes: 13 additions & 51 deletions block/blk-mq-cpumap.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,67 +10,29 @@
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/group_cpus.h>

#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"

static int queue_index(struct blk_mq_queue_map *qmap,
unsigned int nr_queues, const int q)
{
return qmap->queue_offset + (q % nr_queues);
}

static int get_first_sibling(unsigned int cpu)
{
unsigned int ret;

ret = cpumask_first(topology_sibling_cpumask(cpu));
if (ret < nr_cpu_ids)
return ret;

return cpu;
}

int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
{
unsigned int *map = qmap->mq_map;
unsigned int nr_queues = qmap->nr_queues;
unsigned int cpu, first_sibling, q = 0;

for_each_possible_cpu(cpu)
map[cpu] = -1;
const struct cpumask *masks;
unsigned int queue, cpu;

/*
* Spread queues among present CPUs first for minimizing
* count of dead queues which are mapped by all un-present CPUs
*/
for_each_present_cpu(cpu) {
if (q >= nr_queues)
break;
map[cpu] = queue_index(qmap, nr_queues, q++);
}
masks = group_cpus_evenly(qmap->nr_queues);
if (!masks)
goto fallback;

for_each_possible_cpu(cpu) {
if (map[cpu] != -1)
continue;
/*
* First do sequential mapping between CPUs and queues.
* In case we still have CPUs to map, and we have some number of
* threads per cores then map sibling threads to the same queue
* for performance optimizations.
*/
if (q < nr_queues) {
map[cpu] = queue_index(qmap, nr_queues, q++);
} else {
first_sibling = get_first_sibling(cpu);
if (first_sibling == cpu)
map[cpu] = queue_index(qmap, nr_queues, q++);
else
map[cpu] = map[first_sibling];
}
for (queue = 0; queue < qmap->nr_queues; queue++) {
for_each_cpu(cpu, &masks[queue])
qmap->mq_map[cpu] = qmap->queue_offset + queue;
}

return 0;
fallback:
for_each_possible_cpu(cpu)
qmap->mq_map[cpu] = qmap->queue_offset;
return 0;
}
EXPORT_SYMBOL_GPL(blk_mq_map_queues);
Expand Down

0 comments on commit 46b1d0e

Please sign in to comment.