Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
linux-user: Do not adjust zero_bss for host page size
Rely on target_mmap to handle guest vs host page size mismatch.

Tested-by: Helge Deller <deller@gmx.de>
Reviewed-by: Helge Deller <deller@gmx.de>
Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
  • Loading branch information
rth7680 committed Aug 8, 2023
1 parent e3d97d5 commit 2d385be
Showing 1 changed file with 30 additions and 39 deletions.
69 changes: 30 additions & 39 deletions linux-user/elfload.c
Expand Up @@ -2211,47 +2211,37 @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
}
}

/* Map and zero the bss. We need to explicitly zero any fractional pages
after the data section (i.e. bss). */
static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
/**
* zero_bss:
*
* Map and zero the bss. We need to explicitly zero any fractional pages
* after the data section (i.e. bss). Return false on mapping failure.
*/
static bool zero_bss(abi_ulong start_bss, abi_ulong end_bss, int prot)
{
uintptr_t host_start, host_map_start, host_end;

last_bss = TARGET_PAGE_ALIGN(last_bss);

/* ??? There is confusion between qemu_real_host_page_size and
qemu_host_page_size here and elsewhere in target_mmap, which
may lead to the end of the data section mapping from the file
not being mapped. At least there was an explicit test and
comment for that here, suggesting that "the file size must
be known". The comment probably pre-dates the introduction
of the fstat system call in target_mmap which does in fact
find out the size. What isn't clear is if the workaround
here is still actually needed. For now, continue with it,
but merge it with the "normal" mmap that would allocate the bss. */

host_start = (uintptr_t) g2h_untagged(elf_bss);
host_end = (uintptr_t) g2h_untagged(last_bss);
host_map_start = REAL_HOST_PAGE_ALIGN(host_start);

if (host_map_start < host_end) {
void *p = mmap((void *)host_map_start, host_end - host_map_start,
prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (p == MAP_FAILED) {
perror("cannot mmap brk");
exit(-1);
}
}
abi_ulong align_bss;

/* Ensure that the bss page(s) are valid */
if ((page_get_flags(last_bss-1) & prot) != prot) {
page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss - 1,
prot | PAGE_VALID);
}
align_bss = TARGET_PAGE_ALIGN(start_bss);
end_bss = TARGET_PAGE_ALIGN(end_bss);

if (host_start < host_map_start) {
memset((void *)host_start, 0, host_map_start - host_start);
if (start_bss < align_bss) {
int flags = page_get_flags(start_bss);

if (!(flags & PAGE_VALID)) {
/* Map the start of the bss. */
align_bss -= TARGET_PAGE_SIZE;
} else if (flags & PAGE_WRITE) {
/* The page is already mapped writable. */
memset(g2h_untagged(start_bss), 0, align_bss - start_bss);
} else {
/* Read-only zeros? */
g_assert_not_reached();
}
}

return align_bss >= end_bss ||
target_mmap(align_bss, end_bss - align_bss, prot,
MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) != -1;
}

#if defined(TARGET_ARM)
Expand Down Expand Up @@ -3255,8 +3245,9 @@ static void load_elf_image(const char *image_name, int image_fd,
/*
* If the load segment requests extra zeros (e.g. bss), map it.
*/
if (eppnt->p_filesz < eppnt->p_memsz) {
zero_bss(vaddr_ef, vaddr_em, elf_prot);
if (eppnt->p_filesz < eppnt->p_memsz &&
!zero_bss(vaddr_ef, vaddr_em, elf_prot)) {
goto exit_mmap;
}
} else if (eppnt->p_memsz != 0) {
vaddr_len = eppnt->p_memsz + vaddr_po;
Expand Down

0 comments on commit 2d385be

Please sign in to comment.