Skip to content
Permalink
Browse files
elf/vdso: Reuse arch_setup_additional_pages() parameters
Both parameters of arch_setup_additional_pages() are currently unused.
commit fc5243d ("[S390] arch_setup_additional_pages arguments")
tried to introduce useful arguments, but they still are not used.

Remove old parameters and introduce sysinfo_ehdr argument that will be
used to return vdso address to put as AT_SYSINFO_EHDR tag in auxiliary
vector. The reason to do it is that many architecture have vDSO pointer
saved in their mm->context with the only purpose to use it later
in ARCH_DLINFO. That's the macro for elf loader to setup sysinfo_ehdr
tag.

Return sysinfo_ehdr address that will be later used by ARCH_DLINFO as
an argument. That will allow to drop vDSO pointer from mm.context
and any code responsible to track vDSO position on platforms that
don't use vDSO as a landing in userspace (arm/s390/sparc).

Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Dmitry Safonov <dima@arista.com>
  • Loading branch information
0x7f454c46 authored and intel-lab-lkp committed Nov 9, 2020
1 parent 41e4cc3 commit 04586680978b048abe74dd892c5b1fcde7c486a3
Show file tree
Hide file tree
Showing 19 changed files with 84 additions and 57 deletions.
@@ -10,13 +10,15 @@ struct mm_struct;

#ifdef CONFIG_VDSO

void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
void arm_install_vdso(struct mm_struct *mm, unsigned long addr,
unsigned long *sysinfo_ehdr);

extern unsigned int vdso_total_pages;

#else /* CONFIG_VDSO */

static inline void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
static inline void arm_install_vdso(struct mm_struct *mm, unsigned long addr,
unsigned long *sysinfo_ehdr)
{
}

@@ -389,7 +389,7 @@ static const struct vm_special_mapping sigpage_mapping = {
.mremap = sigpage_mremap,
};

int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
int arch_setup_additional_pages(unsigned long *sysinfo_ehdr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -430,7 +430,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* to be fatal to the process, so no error check needed
* here.
*/
arm_install_vdso(mm, addr + PAGE_SIZE);
arm_install_vdso(mm, addr + PAGE_SIZE, sysinfo_ehdr);

up_fail:
mmap_write_unlock(mm);
@@ -233,7 +233,8 @@ static int install_vvar(struct mm_struct *mm, unsigned long addr)
}

/* assumes mmap_lock is write-locked */
void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
void arm_install_vdso(struct mm_struct *mm, unsigned long addr,
unsigned long *sysinfo_ehdr)
{
struct vm_area_struct *vma;
unsigned long len;
@@ -254,7 +255,10 @@ void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
&vdso_text_mapping);

if (!IS_ERR(vma))
mm->context.vdso = addr;
if (IS_ERR(vma))
return;

mm->context.vdso = addr;
*sysinfo_ehdr = addr;
}

@@ -213,8 +213,7 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,

static int __setup_additional_pages(enum vdso_abi abi,
struct mm_struct *mm,
struct linux_binprm *bprm,
int uses_interp)
unsigned long *sysinfo_ehdr)
{
unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
unsigned long gp_flags = 0;
@@ -250,6 +249,8 @@ static int __setup_additional_pages(enum vdso_abi abi,
if (IS_ERR(ret))
goto up_fail;

*sysinfo_ehdr = vdso_base;

return 0;

up_fail:
@@ -401,8 +402,7 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
return PTR_ERR_OR_ZERO(ret);
}

static int aarch32_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp)
static int aarch32_setup_additional_pages(unsigned long *sysinfo_ehdr)
{
struct mm_struct *mm = current->mm;
int ret;
@@ -412,8 +412,7 @@ static int aarch32_setup_additional_pages(struct linux_binprm *bprm,
return ret;

if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
uses_interp);
ret = __setup_additional_pages(VDSO_ABI_AA32, mm, sysinfo_ehdr);
if (ret)
return ret;
}
@@ -447,7 +446,7 @@ static int __init vdso_init(void)
}
arch_initcall(vdso_init);

int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
int arch_setup_additional_pages(unsigned long *sysinfo_ehdr)
{
struct mm_struct *mm = current->mm;
int ret;
@@ -456,9 +455,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
return -EINTR;

if (is_compat_task())
ret = aarch32_setup_additional_pages(bprm, uses_interp);
ret = aarch32_setup_additional_pages(sysinfo_ehdr);
else
ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
ret = __setup_additional_pages(VDSO_ABI_AA64, mm, sysinfo_ehdr);

mmap_write_unlock(mm);

@@ -44,7 +44,7 @@ static int __init init_vdso(void)
}
subsys_initcall(init_vdso);

int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
int arch_setup_additional_pages(unsigned long *sysinfo_ehdr)
{
int ret;
unsigned long addr;
@@ -68,6 +68,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto up_fail;

mm->context.vdso = (void *)addr;
*sysinfo_ehdr = addr;

up_fail:
mmap_write_unlock(mm);
@@ -46,7 +46,7 @@ arch_initcall(vdso_init);
/*
* Called from binfmt_elf. Create a VMA for the vDSO page.
*/
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
int arch_setup_additional_pages(unsigned long *sysinfo_ehdr)
{
int ret;
unsigned long vdso_base;
@@ -74,6 +74,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto up_fail;

mm->context.vdso = (void *)vdso_base;
*sysinfo_ehdr = vdso_base;

up_fail:
mmap_write_unlock(mm);
@@ -86,7 +86,7 @@ static unsigned long vdso_base(void)
return base;
}

int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
int arch_setup_additional_pages(unsigned long *sysinfo_ehdr)
{
struct mips_vdso_image *image = current->thread.abi->vdso;
struct mm_struct *mm = current->mm;
@@ -184,6 +184,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
}

mm->context.vdso = (void *)vdso_addr;
*sysinfo_ehdr = vdso_addr;
ret = 0;

out:
@@ -111,7 +111,7 @@ unsigned long inline vdso_random_addr(unsigned long vdso_mapping_len)
return addr;
}

int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
int arch_setup_additional_pages(unsigned long *sysinfo_ehdr)
{
struct mm_struct *mm = current->mm;
unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
@@ -176,6 +176,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
/*Map vdso to user space */
vdso_base += PAGE_SIZE;
mm->context.vdso = (void *)vdso_base;
*sysinfo_ehdr = vdso_base;
vma = _install_special_mapping(mm, vdso_base, vdso_text_len,
VM_READ | VM_EXEC |
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
@@ -104,7 +104,7 @@ static int alloc_kuser_page(void)
}
arch_initcall(alloc_kuser_page);

int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
int arch_setup_additional_pages(unsigned long *sysinfo_ehdr)
{
struct mm_struct *mm = current->mm;
int ret;
@@ -122,7 +122,7 @@ struct lib64_elfinfo
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
*/
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
int arch_setup_additional_pages(unsigned long *sysinfo_ehdr)
{
struct mm_struct *mm = current->mm;
struct page **vdso_pagelist;
@@ -211,6 +211,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
}

mmap_write_unlock(mm);
*sysinfo_ehdr = vdso_base;
return 0;

fail_mmapsem:
@@ -56,11 +56,10 @@ static int __init vdso_init(void)
}
arch_initcall(vdso_init);

int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp)
int arch_setup_additional_pages(unsigned long *sysinfo_ehdr)
{
struct mm_struct *mm = current->mm;
unsigned long vdso_base, vdso_len;
unsigned long vdso_base, vvar_base, vdso_len;
int ret;

vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
@@ -89,12 +88,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
goto end;
}

vdso_base += (vdso_pages << PAGE_SHIFT);
vvar_base = vdso_base + (vdso_pages << PAGE_SHIFT);
ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
(VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);

if (unlikely(ret))
mm->context.vdso = NULL;
else
*sysinfo_ehdr = vdso_base;
end:
mmap_write_unlock(mm);
return ret;
@@ -150,7 +150,7 @@ void vdso_free_per_cpu(struct lowcore *lowcore)
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
*/
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
int arch_setup_additional_pages(unsigned long *sysinfo_ehdr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -205,6 +205,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
}

current->mm->context.vdso_base = vdso_base;
*sysinfo_ehdr = vdso_base;
rc = 0;

out_up:
@@ -55,7 +55,7 @@ int __init vsyscall_init(void)
}

/* Setup a VMA at program startup for the vsyscall page */
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
int arch_setup_additional_pages(unsigned long *sysinfo_ehdr)
{
struct mm_struct *mm = current->mm;
unsigned long addr;
@@ -78,6 +78,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto up_fail;

current->mm->context.vdso = (void *)addr;
*sysinfo_ehdr = addr;

up_fail:
mmap_write_unlock(mm);
@@ -346,8 +346,6 @@ static int __init init_vdso(void)
}
subsys_initcall(init_vdso);

struct linux_binprm;

/* Shuffle the vdso up a bit, randomly. */
static unsigned long vdso_addr(unsigned long start, unsigned int len)
{
@@ -359,7 +357,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned int len)
}

static int map_vdso(const struct vdso_image *image,
struct vm_special_mapping *vdso_mapping)
struct vm_special_mapping *vdso_mapping,
unsigned long *sysinfo_ehdr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -421,24 +420,26 @@ static int map_vdso(const struct vdso_image *image,
up_fail:
if (ret)
current->mm->context.vdso = NULL;
else
*sysinfo_ehdr = text_start;

mmap_write_unlock(mm);
return ret;
}

int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
int arch_setup_additional_pages(unsigned long *sysinfo_ehdr)
{

if (!vdso_enabled)
return 0;

#if defined CONFIG_COMPAT
if (!(is_32bit_task()))
return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
return map_vdso(&vdso_image_64_builtin, &vdso_mapping64, sysinfo_ehdr);
else
return map_vdso(&vdso_image_32_builtin, &vdso_mapping32);
return map_vdso(&vdso_image_32_builtin, &vdso_mapping32, sysinfo_ehdr);
#else
return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
return map_vdso(&vdso_image_64_builtin, &vdso_mapping64, sysinfo_ehdr);
#endif

}

0 comments on commit 0458668

Please sign in to comment.