Skip to content

Commit

Permalink
From patchwork series 395855
Browse files Browse the repository at this point in the history
  • Loading branch information
Fox Snowpatch committed Feb 21, 2024
1 parent 88333f7 commit 6c48cad
Show file tree
Hide file tree
Showing 32 changed files with 176 additions and 74 deletions.
2 changes: 1 addition & 1 deletion arch/arm/kernel/irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ static void __init init_irq_stacks(void)
THREAD_SIZE_ORDER);
else
stack = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN,
THREADINFO_GFP, NUMA_NO_NODE,
THREADINFO_GFP, 0, NUMA_NO_NODE,
__builtin_return_address(0));

if (WARN_ON(!stack))
Expand Down
11 changes: 9 additions & 2 deletions arch/arm64/include/asm/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

#define __HAVE_ARCH_ADDR_COND_PMD
#define __HAVE_ARCH_PGD_FREE
#include <asm-generic/pgalloc.h>

Expand Down Expand Up @@ -74,10 +75,16 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
* of the mm address space.
*/
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep,
unsigned long address)
{
pmdval_t pmd = PMD_TYPE_TABLE | PMD_TABLE_UXN;
VM_BUG_ON(mm && mm != &init_mm);
__pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN);
if (IS_DATA_VMALLOC_ADDR(address) &&
IS_DATA_VMALLOC_ADDR(address + PMD_SIZE)) {
pmd |= PMD_TABLE_PXN;
}
__pmd_populate(pmdp, __pa(ptep), pmd);
}

static inline void
Expand Down
8 changes: 8 additions & 0 deletions arch/arm64/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,4 +31,12 @@ static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
return pgprot_tagged(prot);
}

extern unsigned long code_region_start __ro_after_init;
extern unsigned long code_region_end __ro_after_init;

#define IS_DATA_VMALLOC_ADDR(vaddr) (((vaddr) < code_region_start || \
(vaddr) > code_region_end) && \
((vaddr) >= VMALLOC_START && \
(vaddr) < VMALLOC_END))

#endif /* _ASM_ARM64_VMALLOC_H */
2 changes: 1 addition & 1 deletion arch/arm64/include/asm/vmap_stack.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ static inline unsigned long *arch_alloc_vmap_stack(size_t stack_size, int node)

BUILD_BUG_ON(!IS_ENABLED(CONFIG_VMAP_STACK));

p = __vmalloc_node(stack_size, THREAD_ALIGN, THREADINFO_GFP, node,
p = __vmalloc_node(stack_size, THREAD_ALIGN, THREADINFO_GFP, 0, node,
__builtin_return_address(0));
return kasan_reset_tag(p);
}
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kernel/efi.c
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ static int __init arm64_efi_rt_init(void)
return 0;

p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL,
NUMA_NO_NODE, &&l);
0, NUMA_NO_NODE, &&l);
l: if (!p) {
pr_warn("Failed to allocate EFI runtime stack\n");
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
Expand Down
7 changes: 7 additions & 0 deletions arch/arm64/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,10 @@
static u64 module_direct_base __ro_after_init = 0;
static u64 module_plt_base __ro_after_init = 0;

/* For pre-init vmalloc, assume the worst-case code range */
unsigned long code_region_start __ro_after_init = (u64) (_end - SZ_2G);
unsigned long code_region_end __ro_after_init = (u64) (_text + SZ_2G);

/*
* Choose a random page-aligned base address for a window of 'size' bytes which
* entirely contains the interval [start, end - 1].
Expand Down Expand Up @@ -101,6 +105,9 @@ static int __init module_init_limits(void)
module_plt_base = random_bounding_box(SZ_2G, min, max);
}

code_region_start = module_plt_base;
code_region_end = module_plt_base + SZ_2G;

pr_info("%llu pages in range for non-PLT usage",
module_direct_base ? (SZ_128M - kernel_size) / PAGE_SIZE : 0);
pr_info("%llu pages in range for PLT usage",
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kernel/probes/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)

void *alloc_insn_page(void)
{
return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
return __vmalloc_node_range(PAGE_SIZE, 1, code_region_start, code_region_end,
GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
NUMA_NO_NODE, __builtin_return_address(0));
}
Expand Down
3 changes: 2 additions & 1 deletion arch/arm64/mm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@
obj-y := dma-mapping.o extable.o fault.o init.o \
cache.o copypage.o flush.o \
ioremap.o mmap.o pgd.o mmu.o \
context.o proc.o pageattr.o fixmap.o
context.o proc.o pageattr.o fixmap.o \
vmalloc.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/mm/trans_pgd.c
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ static int copy_pte(struct trans_pgd_info *info, pmd_t *dst_pmdp,
dst_ptep = trans_alloc(info);
if (!dst_ptep)
return -ENOMEM;
pmd_populate_kernel(NULL, dst_pmdp, dst_ptep);
pmd_populate_kernel_at(NULL, dst_pmdp, dst_ptep, addr);
dst_ptep = pte_offset_kernel(dst_pmdp, start);

src_ptep = pte_offset_kernel(src_pmdp, start);
Expand Down
57 changes: 57 additions & 0 deletions arch/arm64/mm/vmalloc.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
// SPDX-License-Identifier: GPL-2.0-only

#include <linux/vmalloc.h>
#include <linux/mm.h>

static void *__vmalloc_node_range_split(unsigned long size, unsigned long align,
unsigned long start, unsigned long end,
unsigned long exclusion_start, unsigned long exclusion_end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller)
{
void *res = NULL;

res = __vmalloc_node_range(size, align, start, exclusion_start,
gfp_mask, prot, vm_flags, node, caller);
if (!res)
res = __vmalloc_node_range(size, align, exclusion_end, end,
gfp_mask, prot, vm_flags, node, caller);

return res;
}

void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, unsigned long vm_flags, int node,
const void *caller)
{
return __vmalloc_node_range_split(size, align, VMALLOC_START,
VMALLOC_END, code_region_start, code_region_end,
gfp_mask, PAGE_KERNEL, vm_flags, node, caller);
}

void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
{
return __vmalloc_node_range_split(size, 1, VMALLOC_START, VMALLOC_END,
code_region_start, code_region_end,
gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
NUMA_NO_NODE, __builtin_return_address(0));
}

void *vmalloc_user(unsigned long size)
{
return __vmalloc_node_range_split(size, SHMLBA, VMALLOC_START, VMALLOC_END,
code_region_start, code_region_end,
GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
VM_USERMAP, NUMA_NO_NODE,
__builtin_return_address(0));
}

void *vmalloc_32_user(unsigned long size)
{
return __vmalloc_node_range_split(size, SHMLBA, VMALLOC_START, VMALLOC_END,
code_region_start, code_region_end,
GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
VM_USERMAP, NUMA_NO_NODE,
__builtin_return_address(0));
}

5 changes: 3 additions & 2 deletions arch/arm64/net/bpf_jit_comp.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <linux/memory.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/moduleloader.h>

#include <asm/asm-extable.h>
#include <asm/byteorder.h>
Expand Down Expand Up @@ -1690,12 +1691,12 @@ u64 bpf_jit_alloc_exec_limit(void)
void *bpf_jit_alloc_exec(unsigned long size)
{
/* Memory is intended to be executable, reset the pointer tag. */
return kasan_reset_tag(vmalloc(size));
return kasan_reset_tag(module_alloc(size));
}

void bpf_jit_free_exec(void *addr)
{
return vfree(addr);
return module_memfree(addr);
}

/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
static void *__init alloc_vm_stack(void)
{
return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
NUMA_NO_NODE, (void *)_RET_IP_);
0, NUMA_NO_NODE, (void *)_RET_IP_);
}

static void __init vmap_irqstack_init(void)
Expand Down
2 changes: 1 addition & 1 deletion arch/riscv/include/asm/irq_stack.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ static inline unsigned long *arch_alloc_vmap_stack(size_t stack_size, int node)
{
void *p;

p = __vmalloc_node(stack_size, THREAD_ALIGN, THREADINFO_GFP, node,
p = __vmalloc_node(stack_size, THREAD_ALIGN, THREADINFO_GFP, 0, node,
__builtin_return_address(0));
return kasan_reset_tag(p);
}
Expand Down
2 changes: 1 addition & 1 deletion arch/s390/hypfs/hypfs_diag.c
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ void *diag204_get_buffer(enum diag204_format fmt, int *pages)
return ERR_PTR(-EOPNOTSUPP);
}
diag204_buf = __vmalloc_node(array_size(*pages, PAGE_SIZE),
PAGE_SIZE, GFP_KERNEL, NUMA_NO_NODE,
PAGE_SIZE, GFP_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
if (!diag204_buf)
return ERR_PTR(-ENOMEM);
Expand Down
6 changes: 3 additions & 3 deletions arch/s390/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ static void __init conmode_default(void)
cpcmd("QUERY TERM", query_buffer, 1024, NULL);
ptr = strstr(query_buffer, "CONMODE");
/*
* Set the conmode to 3215 so that the device recognition
* Set the conmode to 3215 so that the device recognition
* will set the cu_type of the console to 3215. If the
* conmode is 3270 and we don't set it back then both
* 3215 and the 3270 driver will try to access the console
Expand Down Expand Up @@ -314,7 +314,7 @@ static inline void setup_zfcpdump(void) {}

/*
* Reboot, halt and power_off stubs. They just call _machine_restart,
* _machine_halt or _machine_power_off.
* _machine_halt or _machine_power_off.
*/

void machine_restart(char *command)
Expand Down Expand Up @@ -364,7 +364,7 @@ unsigned long stack_alloc(void)
void *ret;

ret = __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP,
NUMA_NO_NODE, __builtin_return_address(0));
0, NUMA_NO_NODE, __builtin_return_address(0));
kmemleak_not_leak(ret);
return (unsigned long)ret;
#else
Expand Down
2 changes: 1 addition & 1 deletion arch/s390/kernel/sthyi.c
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
return;

diag204_buf = __vmalloc_node(array_size(pages, PAGE_SIZE),
PAGE_SIZE, GFP_KERNEL, NUMA_NO_NODE,
PAGE_SIZE, GFP_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
if (!diag204_buf)
return;
Expand Down
18 changes: 18 additions & 0 deletions include/asm-generic/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,24 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
}
#endif

#ifdef __HAVE_ARCH_ADDR_COND_PMD
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
pte_t *ptep, unsigned long address);
#else
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
pte_t *ptep);
#endif

static inline void pmd_populate_kernel_at(struct mm_struct *mm, pmd_t *pmdp,
pte_t *ptep, unsigned long address)
{
#ifdef __HAVE_ARCH_ADDR_COND_PMD
pmd_populate_kernel(mm, pmdp, ptep, address);
#else
pmd_populate_kernel(mm, pmdp, ptep);
#endif
}

#ifndef __HAVE_ARCH_PMD_FREE
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
Expand Down
4 changes: 2 additions & 2 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -2782,7 +2782,7 @@ static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
#endif

int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
int __pte_alloc_kernel(pmd_t *pmd);
int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);

#if defined(CONFIG_MMU)

Expand Down Expand Up @@ -2977,7 +2977,7 @@ pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
NULL : pte_offset_map_lock(mm, pmd, address, ptlp))

#define pte_alloc_kernel(pmd, address) \
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address)) ? \
NULL: pte_offset_kernel(pmd, address))

#if USE_SPLIT_PMD_PTLOCKS
Expand Down
15 changes: 14 additions & 1 deletion include/linux/vmalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,8 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller) __alloc_size(1);
void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
int node, const void *caller) __alloc_size(1);
unsigned long vm_flags, int node, const void *caller)
__alloc_size(1);
void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);

extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
Expand Down Expand Up @@ -295,4 +296,16 @@ bool vmalloc_dump_obj(void *object);
static inline bool vmalloc_dump_obj(void *object) { return false; }
#endif

#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
#else
/*
* 64b systems should always have either DMA or DMA32 zones. For others
* GFP_DMA32 should do the right thing and use the normal zone.
*/
#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
#endif

#endif /* _LINUX_VMALLOC_H */
4 changes: 2 additions & 2 deletions kernel/bpf/syscall.c
Original file line number Diff line number Diff line change
Expand Up @@ -303,8 +303,8 @@ static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
return area;
}

return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
return __vmalloc_node(size, align,
gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL,
flags, numa_node, __builtin_return_address(0));
}

Expand Down
4 changes: 1 addition & 3 deletions kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -304,10 +304,8 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
* so memcg accounting is performed manually on assigning/releasing
* stacks to tasks. Drop __GFP_ACCOUNT.
*/
stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
VMALLOC_START, VMALLOC_END,
stack = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN,
THREADINFO_GFP & ~__GFP_ACCOUNT,
PAGE_KERNEL,
0, node, __builtin_return_address(0));
if (!stack)
return -ENOMEM;
Expand Down
3 changes: 1 addition & 2 deletions kernel/scs.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,7 @@ static void *__scs_alloc(int node)
}
}

s = __vmalloc_node_range(SCS_SIZE, 1, VMALLOC_START, VMALLOC_END,
GFP_SCS, PAGE_KERNEL, 0, node,
s = __vmalloc_node(SCS_SIZE, 1, GFP_SCS, 0, node,
__builtin_return_address(0));

out:
Expand Down
2 changes: 1 addition & 1 deletion lib/objpool.c
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs,
slot = kmalloc_node(size, pool->gfp, cpu_to_node(i));
else
slot = __vmalloc_node(size, sizeof(void *), pool->gfp,
cpu_to_node(i), __builtin_return_address(0));
0, cpu_to_node(i), __builtin_return_address(0));
if (!slot)
return -ENOMEM;
memset(slot, 0, size);
Expand Down
6 changes: 3 additions & 3 deletions lib/test_vmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ static int random_size_align_alloc_test(void)
size = ((rnd % 10) + 1) * PAGE_SIZE;

ptr = __vmalloc_node(size, align, GFP_KERNEL | __GFP_ZERO, 0,
__builtin_return_address(0));
0, __builtin_return_address(0));
if (!ptr)
return -1;

Expand All @@ -120,7 +120,7 @@ static int align_shift_alloc_test(void)
align = ((unsigned long) 1) << i;

ptr = __vmalloc_node(PAGE_SIZE, align, GFP_KERNEL|__GFP_ZERO, 0,
__builtin_return_address(0));
0, __builtin_return_address(0));
if (!ptr)
return -1;

Expand All @@ -138,7 +138,7 @@ static int fix_align_alloc_test(void)
for (i = 0; i < test_loop_count; i++) {
ptr = __vmalloc_node(5 * PAGE_SIZE, THREAD_ALIGN << 1,
GFP_KERNEL | __GFP_ZERO, 0,
__builtin_return_address(0));
0, __builtin_return_address(0));
if (!ptr)
return -1;

Expand Down

0 comments on commit 6c48cad

Please sign in to comment.