Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
Merge tag 'pull-tcg-20230715' of https://gitlab.com/rth7680/qemu into…
… staging

tcg: Use HAVE_CMPXCHG128 instead of CONFIG_CMPXCHG128
accel/tcg: Introduce page_check_range_empty
accel/tcg: Introduce page_find_range_empty
accel/tcg: Accept more page flags in page_check_range
accel/tcg: Return bool from page_check_range
accel/tcg: Always lock pages before translation
linux-user: Use abi_* types for target structures in syscall_defs.h
linux-user: Fix abi_llong alignment for microblaze and nios2
linux-user: Fix do_shmat type errors
linux-user: Implement execve without execveat
linux-user: Make sure initial brk is aligned
linux-user: Use a mask with strace flags
linux-user: Implement MAP_FIXED_NOREPLACE
linux-user: Widen target_mmap offset argument to off_t
linux-user: Use page_find_range_empty for mmap_find_vma_reserved
linux-user: Use 'last' instead of 'end' in target_mmap and subroutines
linux-user: Remove can_passthrough_madvise
linux-user: Simplify target_madvise
linux-user: Drop uint and ulong types
linux-user/arm: Do not allocate a commpage at all for M-profile CPUs
bsd-user: Use page_check_range_empty for MAP_EXCL
bsd-user: Use page_find_range_empty for mmap_find_vma_reserved

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmSypEYdHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9VzQf/RMRK4SQDJiJEbQ6K
# 5U1i955Rl4MMLT8PrkbT/UDA9soyIlSVjUenW8ThJJg6SLbSvkXZsWn165PFu+yW
# nYkeCYxkJtAjWmmFlZ44J+VLEZZ6LkWrIvPZHvKohelpi6uT/fuQaAZjKuH2prI/
# 7bdP5YdLUMpCztERHYfxmroEX4wJR6knsRpt5rYchADxEfkWk82PanneCw7grQ6V
# VNg1pRGplp0jMkpOOBvMD1ENkmoipklMe9P1gQdCHobg2/kqpozhT1oQp/gHNkP5
# 66Cjzv8o0nnPjJetm74pnP06iNhuMjDesD7f+Vq/DALgMobwjvhDW5GD+Ccto85B
# hqvwHA==
# =vm0t
# -----END PGP SIGNATURE-----
# gpg: Signature made Sat 15 Jul 2023 02:51:02 PM BST
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [ultimate]

* tag 'pull-tcg-20230715' of https://gitlab.com/rth7680/qemu: (47 commits)
  tcg: Use HAVE_CMPXCHG128 instead of CONFIG_CMPXCHG128
  accel/tcg: Always lock pages before translation
  linux-user/arm: Do not allocate a commpage at all for M-profile CPUs
  linux-user: Drop uint and ulong
  linux-user: Simplify target_madvise
  linux-user: Remove can_passthrough_madvise
  accel/tcg: Return bool from page_check_range
  accel/tcg: Accept more page flags in page_check_range
  linux-user: Simplify target_munmap
  linux-user: Rename mmap_reserve to mmap_reserve_or_unmap
  linux-user: Rewrite mmap_reserve
  linux-user: Use 'last' instead of 'end' in target_mmap
  linux-user: Use page_find_range_empty for mmap_find_vma_reserved
  bsd-user: Use page_find_range_empty for mmap_find_vma_reserved
  accel/tcg: Introduce page_find_range_empty
  linux-user: Rewrite mmap_frag
  linux-user: Rewrite target_mprotect
  linux-user: Widen target_mmap offset argument to off_t
  linux-user: Split out target_to_host_prot
  linux-user: Implement MAP_FIXED_NOREPLACE
  ...

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
  • Loading branch information
rth7680 committed Jul 15, 2023
2 parents 4633c1e + 76f9d6a commit d7be40e
Show file tree
Hide file tree
Showing 36 changed files with 2,070 additions and 1,851 deletions.
2 changes: 1 addition & 1 deletion accel/tcg/atomic_common.c.inc
Expand Up @@ -41,7 +41,7 @@ CMPXCHG_HELPER(cmpxchgq_be, uint64_t)
CMPXCHG_HELPER(cmpxchgq_le, uint64_t)
#endif

#ifdef CONFIG_CMPXCHG128
#if HAVE_CMPXCHG128
CMPXCHG_HELPER(cmpxchgo_be, Int128)
CMPXCHG_HELPER(cmpxchgo_le, Int128)
#endif
Expand Down
63 changes: 39 additions & 24 deletions accel/tcg/cpu-exec.c
Expand Up @@ -526,6 +526,43 @@ static void cpu_exec_exit(CPUState *cpu)
}
}

static void cpu_exec_longjmp_cleanup(CPUState *cpu)
{
/* Non-buggy compilers preserve this; assert the correct value. */
g_assert(cpu == current_cpu);

#ifdef CONFIG_USER_ONLY
clear_helper_retaddr();
if (have_mmap_lock()) {
mmap_unlock();
}
#else
/*
* For softmmu, a tlb_fill fault during translation will land here,
* and we need to release any page locks held. In system mode we
* have one tcg_ctx per thread, so we know it was this cpu doing
* the translation.
*
* Alternative 1: Install a cleanup to be called via an exception
* handling safe longjmp. It seems plausible that all our hosts
* support such a thing. We'd have to properly register unwind info
* for the JIT for EH, rather that just for GDB.
*
* Alternative 2: Set and restore cpu->jmp_env in tb_gen_code to
* capture the cpu_loop_exit longjmp, perform the cleanup, and
* jump again to arrive here.
*/
if (tcg_ctx->gen_tb) {
tb_unlock_pages(tcg_ctx->gen_tb);
tcg_ctx->gen_tb = NULL;
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
}

void cpu_exec_step_atomic(CPUState *cpu)
{
CPUArchState *env = cpu->env_ptr;
Expand Down Expand Up @@ -568,16 +605,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
cpu_tb_exec(cpu, tb, &tb_exit);
cpu_exec_exit(cpu);
} else {
#ifdef CONFIG_USER_ONLY
clear_helper_retaddr();
if (have_mmap_lock()) {
mmap_unlock();
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
cpu_exec_longjmp_cleanup(cpu);
}

/*
Expand Down Expand Up @@ -1023,20 +1051,7 @@ static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc)
{
/* Prepare setjmp context for exception handling. */
if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) {
/* Non-buggy compilers preserve this; assert the correct value. */
g_assert(cpu == current_cpu);

#ifdef CONFIG_USER_ONLY
clear_helper_retaddr();
if (have_mmap_lock()) {
mmap_unlock();
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}

assert_no_pages_locked();
cpu_exec_longjmp_cleanup(cpu);
}

return cpu_exec_loop(cpu, sc);
Expand Down
2 changes: 1 addition & 1 deletion accel/tcg/cputlb.c
Expand Up @@ -3105,7 +3105,7 @@ void cpu_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val,
#include "atomic_template.h"
#endif

#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128)
#if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
#define DATA_SIZE 16
#include "atomic_template.h"
#endif
Expand Down
30 changes: 28 additions & 2 deletions accel/tcg/internal.h
Expand Up @@ -10,6 +10,7 @@
#define ACCEL_TCG_INTERNAL_H

#include "exec/exec-all.h"
#include "exec/translate-all.h"

/*
* Access to the various translations structures need to be serialised
Expand All @@ -35,6 +36,32 @@ static inline void page_table_config_init(void) { }
void page_table_config_init(void);
#endif

#ifdef CONFIG_USER_ONLY
/*
* For user-only, page_protect sets the page read-only.
* Since most execution is already on read-only pages, and we'd need to
* account for other TBs on the same page, defer undoing any page protection
* until we receive the write fault.
*/
static inline void tb_lock_page0(tb_page_addr_t p0)
{
page_protect(p0);
}

static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
{
page_protect(p1);
}

static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
static inline void tb_unlock_pages(TranslationBlock *tb) { }
#else
void tb_lock_page0(tb_page_addr_t);
void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
void tb_unlock_pages(TranslationBlock *);
#endif

#ifdef CONFIG_SOFTMMU
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
unsigned size,
Expand All @@ -48,8 +75,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
void page_init(void);
void tb_htable_init(void);
void tb_reset_jump(TranslationBlock *tb, int n);
TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb_page_addr_t phys_page2);
TranslationBlock *tb_link_page(TranslationBlock *tb);
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc);
Expand Down
4 changes: 2 additions & 2 deletions accel/tcg/ldst_atomicity.c.inc
Expand Up @@ -159,7 +159,7 @@ static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
* another process, because the fallback start_exclusive solution
* provides no protection across processes.
*/
if (!page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) {
if (page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) {
uint64_t *p = __builtin_assume_aligned(pv, 8);
return *p;
}
Expand Down Expand Up @@ -194,7 +194,7 @@ static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
* another process, because the fallback start_exclusive solution
* provides no protection across processes.
*/
if (!page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) {
if (page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) {
return *p;
}
#endif
Expand Down

0 comments on commit d7be40e

Please sign in to comment.