Skip to content

Commit

Permalink
arm64/numa: support HAVE_SETUP_PER_CPU_AREA
Browse files Browse the repository at this point in the history
To make each percpu area allocated from its local numa node. Without this
patch, all percpu areas will be allocated from the node which cpu0 belongs
to.

Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
  • Loading branch information
Zhen Lei authored and wildea01 committed Sep 9, 2016
1 parent f11c7ba commit 7af3a0a
Show file tree
Hide file tree
Showing 2 changed files with 60 additions and 0 deletions.
8 changes: 8 additions & 0 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -600,6 +600,14 @@ config USE_PERCPU_NUMA_NODE_ID
def_bool y
depends on NUMA

config HAVE_SETUP_PER_CPU_AREA
def_bool y
depends on NUMA

config NEED_PER_CPU_EMBED_FIRST_CHUNK
def_bool y
depends on NUMA

source kernel/Kconfig.preempt
source kernel/Kconfig.hz

Expand Down
52 changes: 52 additions & 0 deletions arch/arm64/mm/numa.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#include <linux/of.h>

#include <asm/acpi.h>
#include <asm/sections.h>

struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
Expand Down Expand Up @@ -131,6 +132,57 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid)
cpu_to_node_map[cpu] = nid;
}

#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);

static int __init early_cpu_to_node(int cpu)
{
return cpu_to_node_map[cpu];
}

static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
return node_distance(from, to);
}

static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
size_t align)
{
int nid = early_cpu_to_node(cpu);

return memblock_virt_alloc_try_nid(size, align,
__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}

static void __init pcpu_fc_free(void *ptr, size_t size)
{
memblock_free_early(__pa(ptr), size);
}

void __init setup_per_cpu_areas(void)
{
unsigned long delta;
unsigned int cpu;
int rc;

/*
* Always reserve area for module percpu variables. That's
* what the legacy allocator did.
*/
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
pcpu_cpu_distance,
pcpu_fc_alloc, pcpu_fc_free);
if (rc < 0)
panic("Failed to initialize percpu areas.");

delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu)
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
}
#endif

/**
* numa_add_memblk - Set node id to memblk
* @nid: NUMA node ID of the new memblk
Expand Down

0 comments on commit 7af3a0a

Please sign in to comment.