Skip to content

Commit

Permalink
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/…
Browse files Browse the repository at this point in the history
…git/arm64/linux

Pull arm64 fixes from Will Deacon:

 - fix HugeTLB leak due to CoW and PTE_RDONLY mismatch

 - avoid accessing unmapped FDT fields when checking validity

 - correctly account for vDSO AUX entry in ARCH_DLINFO

 - fix kallsyms with absolute expressions in linker script

 - kill unnecessary symbol-based relocs in vmlinux

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: Fix copy-on-write referencing in HugeTLB
  arm64: mm: avoid fdt_check_header() before the FDT is fully mapped
  arm64: Define AT_VECTOR_SIZE_ARCH for ARCH_DLINFO
  arm64: relocatable: suppress R_AARCH64_ABS64 relocations in vmlinux
  arm64: vmlinux.lds: make __rela_offset and __dynsym_offset ABSOLUTE
  • Loading branch information
torvalds committed Aug 6, 2016
2 parents a157b3a + 747a70e commit 194d6ad
Show file tree
Hide file tree
Showing 7 changed files with 30 additions and 34 deletions.
2 changes: 1 addition & 1 deletion arch/arm64/Makefile
Expand Up @@ -15,7 +15,7 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
GZFLAGS :=-9

ifneq ($(CONFIG_RELOCATABLE),)
LDFLAGS_vmlinux += -pie
LDFLAGS_vmlinux += -pie -Bsymbolic
endif

KBUILD_DEFCONFIG := defconfig
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/include/asm/elf.h
Expand Up @@ -140,6 +140,7 @@ typedef struct user_fpsimd_state elf_fpregset_t;

#define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT);

/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
Expand Down
17 changes: 17 additions & 0 deletions arch/arm64/include/asm/pgtable.h
Expand Up @@ -224,6 +224,23 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
set_pte(ptep, pte);
}

#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
pteval_t lhs, rhs;

lhs = pte_val(pte_a);
rhs = pte_val(pte_b);

if (pte_present(pte_a))
lhs &= ~PTE_RDONLY;

if (pte_present(pte_b))
rhs &= ~PTE_RDONLY;

return (lhs == rhs);
}

/*
* Huge pte definitions.
*/
Expand Down
2 changes: 2 additions & 0 deletions arch/arm64/include/uapi/asm/auxvec.h
Expand Up @@ -19,4 +19,6 @@
/* vDSO location */
#define AT_SYSINFO_EHDR 33

#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */

#endif
21 changes: 3 additions & 18 deletions arch/arm64/kernel/head.S
Expand Up @@ -781,40 +781,25 @@ __primary_switch:
* Iterate over each entry in the relocation table, and apply the
* relocations in place.
*/
ldr w8, =__dynsym_offset // offset to symbol table
ldr w9, =__rela_offset // offset to reloc table
ldr w10, =__rela_size // size of reloc table

mov_q x11, KIMAGE_VADDR // default virtual offset
add x11, x11, x23 // actual virtual offset
add x8, x8, x11 // __va(.dynsym)
add x9, x9, x11 // __va(.rela)
add x10, x9, x10 // __va(.rela) + sizeof(.rela)

0: cmp x9, x10
b.hs 2f
b.hs 1f
ldp x11, x12, [x9], #24
ldr x13, [x9, #-8]
cmp w12, #R_AARCH64_RELATIVE
b.ne 1f
b.ne 0b
add x13, x13, x23 // relocate
str x13, [x11, x23]
b 0b

1: cmp w12, #R_AARCH64_ABS64
b.ne 0b
add x12, x12, x12, lsl #1 // symtab offset: 24x top word
add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word
ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx
ldr x15, [x12, #8] // Elf64_Sym::st_value
cmp w14, #-0xf // SHN_ABS (0xfff1) ?
add x14, x15, x23 // relocate
csel x15, x14, x15, ne
add x15, x13, x15
str x15, [x11, x23]
b 0b

2:
1:
#endif
ldr x8, =__primary_switched
br x8
Expand Down
13 changes: 2 additions & 11 deletions arch/arm64/kernel/vmlinux.lds.S
Expand Up @@ -103,6 +103,7 @@ SECTIONS
*(.discard)
*(.discard.*)
*(.interp .dynamic)
*(.dynsym .dynstr .hash)
}

. = KIMAGE_VADDR + TEXT_OFFSET;
Expand Down Expand Up @@ -174,19 +175,9 @@ SECTIONS
.rela : ALIGN(8) {
*(.rela .rela*)
}
.dynsym : ALIGN(8) {
*(.dynsym)
}
.dynstr : {
*(.dynstr)
}
.hash : {
*(.hash)
}

__rela_offset = ADDR(.rela) - KIMAGE_VADDR;
__rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
__rela_size = SIZEOF(.rela);
__dynsym_offset = ADDR(.dynsym) - KIMAGE_VADDR;

. = ALIGN(SEGMENT_ALIGN);
__init_end = .;
Expand Down
8 changes: 4 additions & 4 deletions arch/arm64/mm/mmu.c
Expand Up @@ -686,9 +686,9 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
/*
* Check whether the physical FDT address is set and meets the minimum
* alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
* at least 8 bytes so that we can always access the size field of the
* FDT header after mapping the first chunk, double check here if that
* is indeed the case.
* at least 8 bytes so that we can always access the magic and size
* fields of the FDT header after mapping the first chunk, double check
* here if that is indeed the case.
*/
BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
Expand Down Expand Up @@ -716,7 +716,7 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
dt_virt_base, SWAPPER_BLOCK_SIZE, prot);

if (fdt_check_header(dt_virt) != 0)
if (fdt_magic(dt_virt) != FDT_MAGIC)
return NULL;

*size = fdt_totalsize(dt_virt);
Expand Down

0 comments on commit 194d6ad

Please sign in to comment.