Showing with 1,030 additions and 2,061 deletions.
  1. +3 −0 MAINTAINERS
  2. +4 −38 accel/tcg/atomic_common.c.inc
  3. +16 −77 accel/tcg/atomic_template.h
  4. +0 −2 accel/tcg/cpu-exec.c
  5. +55 −154 accel/tcg/cputlb.c
  6. +23 −112 accel/tcg/ldst_atomicity.c.inc
  7. +12 −12 accel/tcg/ldst_common.c.inc
  8. +1 −3 accel/tcg/tcg-runtime.h
  9. +0 −2 accel/tcg/translate-all.c
  10. +0 −2 accel/tcg/translator.c
  11. +78 −234 accel/tcg/user-exec.c
  12. +45 −0 host/include/aarch64/host/atomic128-cas.h
  13. +79 −0 host/include/aarch64/host/atomic128-ldst.h
  14. +22 −0 host/include/aarch64/host/cpuinfo.h
  15. +47 −0 host/include/generic/host/atomic128-cas.h
  16. +81 −0 host/include/generic/host/atomic128-ldst.h
  17. +4 −0 host/include/generic/host/cpuinfo.h
  18. +39 −0 host/include/i386/host/cpuinfo.h
  19. +1 −0 host/include/x86_64/host/cpuinfo.h
  20. +12 −55 include/exec/cpu_ldst.h
  21. +0 −3 include/exec/exec-all.h
  22. +19 −127 include/qemu/atomic128.h
  23. +17 −0 include/tcg/debug-assert.h
  24. +1 −8 include/tcg/tcg.h
  25. +11 −1 meson.build
  26. +0 −1 migration/meson.build
  27. +3 −31 migration/ram.c
  28. +146 −122 migration/xbzrle.c
  29. +1 −4 migration/xbzrle.h
  30. +2 −2 target/arm/tcg/m_helper.c
  31. +0 −1 target/ppc/cpu.h
  32. +0 −9 target/ppc/helper.h
  33. +0 −48 target/ppc/mem_helper.c
  34. +4 −30 target/ppc/translate.c
  35. +7 −44 target/ppc/translate/fixedpoint-impl.c.inc
  36. +0 −3 target/s390x/cpu.h
  37. +0 −4 target/s390x/helper.h
  38. +1 −1 target/s390x/tcg/insn-data.h.inc
  39. +29 −108 target/s390x/tcg/mem_helper.c
  40. +8 −22 target/s390x/tcg/translate.c
  41. +0 −2 target/sh4/translate.c
  42. +3 −15 target/sparc/ldst_helper.c
  43. +0 −2 target/sparc/translate.c
  44. +0 −40 tcg/aarch64/tcg-target.c.inc
  45. +4 −2 tcg/aarch64/tcg-target.h
  46. +4 −119 tcg/i386/tcg-target.c.inc
  47. +18 −10 tcg/i386/tcg-target.h
  48. +2 −4 tcg/tcg-op-ldst.c
  49. +1 −13 tcg/tcg.c
  50. +0 −6 tests/bench/meson.build
  51. +0 −469 tests/bench/xbzrle-bench.c
  52. +10 −39 tests/unit/test-xbzrle.c
  53. +45 −80 util/bufferiszero.c
  54. +67 −0 util/cpuinfo-aarch64.c
  55. +99 −0 util/cpuinfo-i386.c
  56. +6 −0 util/meson.build
3 changes: 3 additions & 0 deletions MAINTAINERS
Expand Up @@ -157,6 +157,9 @@ F: include/exec/helper*.h
F: include/sysemu/cpus.h
F: include/sysemu/tcg.h
F: include/hw/core/tcg-cpu-ops.h
F: host/include/*/host/cpuinfo.h
F: util/cpuinfo-*.c
F: include/tcg/

FPU emulation
M: Aurelien Jarno <aurelien@aurel32.net>
Expand Down
42 changes: 4 additions & 38 deletions accel/tcg/atomic_common.c.inc
Expand Up @@ -19,20 +19,6 @@ static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr,
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW);
}

#if HAVE_ATOMIC128
static void atomic_trace_ld_post(CPUArchState *env, uint64_t addr,
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
}

static void atomic_trace_st_post(CPUArchState *env, uint64_t addr,
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
#endif

/*
* Atomic helpers callable from TCG.
* These have a common interface and all defer to cpu_atomic_*
Expand Down Expand Up @@ -62,36 +48,16 @@ CMPXCHG_HELPER(cmpxchgo_le, Int128)

#undef CMPXCHG_HELPER

Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, uint64_t addr,
Int128 cmpv, Int128 newv, uint32_t oi)
{
#if TCG_TARGET_REG_BITS == 32
uintptr_t ra = GETPC();
Int128 oldv;

oldv = cpu_ld16_be_mmu(env, addr, oi, ra);
if (int128_eq(oldv, cmpv)) {
cpu_st16_be_mmu(env, addr, newv, oi, ra);
} else {
/* Even with comparison failure, still need a write cycle. */
probe_write(env, addr, 16, get_mmuidx(oi), ra);
}
return oldv;
#else
g_assert_not_reached();
#endif
}

Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, uint64_t addr,
Int128 cmpv, Int128 newv, uint32_t oi)
Int128 HELPER(nonatomic_cmpxchgo)(CPUArchState *env, uint64_t addr,
Int128 cmpv, Int128 newv, uint32_t oi)
{
#if TCG_TARGET_REG_BITS == 32
uintptr_t ra = GETPC();
Int128 oldv;

oldv = cpu_ld16_le_mmu(env, addr, oi, ra);
oldv = cpu_ld16_mmu(env, addr, oi, ra);
if (int128_eq(oldv, cmpv)) {
cpu_st16_le_mmu(env, addr, newv, oi, ra);
cpu_st16_mmu(env, addr, newv, oi, ra);
} else {
/* Even with comparison failure, still need a write cycle. */
probe_write(env, addr, 16, get_mmuidx(oi), ra);
Expand Down
93 changes: 16 additions & 77 deletions accel/tcg/atomic_template.h
Expand Up @@ -73,8 +73,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
DATA_TYPE ret;

#if DATA_SIZE == 16
Expand All @@ -87,38 +86,11 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
return ret;
}

#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ, retaddr);
DATA_TYPE val;

val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, oi);
return val;
}

void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_WRITE, retaddr);

atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_st_post(env, addr, oi);
}
#endif
#else
#if DATA_SIZE < 16
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
DATA_TYPE ret;

ret = qatomic_xchg__nocheck(haddr, val);
Expand All @@ -131,9 +103,8 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
DATA_TYPE *haddr, ret; \
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
ret = qatomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \
Expand Down Expand Up @@ -163,9 +134,8 @@ GEN_ATOMIC_HELPER(xor_fetch)
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE cmp, old, new, val = xval; \
XDATA_TYPE *haddr, cmp, old, new, val = xval; \
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
smp_mb(); \
cmp = qatomic_read__nocheck(haddr); \
do { \
Expand All @@ -188,7 +158,7 @@ GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)

#undef GEN_ATOMIC_HELPER_FN
#endif /* DATA SIZE >= 16 */
#endif /* DATA SIZE < 16 */

#undef END

Expand All @@ -206,8 +176,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
DATA_TYPE ret;

#if DATA_SIZE == 16
Expand All @@ -220,39 +189,11 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
return BSWAP(ret);
}

#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ, retaddr);
DATA_TYPE val;

val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, oi);
return BSWAP(val);
}

void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_WRITE, retaddr);

val = BSWAP(val);
atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_st_post(env, addr, oi);
}
#endif
#else
#if DATA_SIZE < 16
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
ABI_TYPE ret;

ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
Expand All @@ -265,9 +206,8 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
DATA_TYPE *haddr, ret; \
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
ret = qatomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \
Expand All @@ -294,9 +234,8 @@ GEN_ATOMIC_HELPER(xor_fetch)
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE ldo, ldn, old, new, val = xval; \
XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
smp_mb(); \
ldn = qatomic_read__nocheck(haddr); \
do { \
Expand Down Expand Up @@ -326,7 +265,7 @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
#undef ADD

#undef GEN_ATOMIC_HELPER_FN
#endif /* DATA_SIZE >= 16 */
#endif /* DATA_SIZE < 16 */

#undef END
#endif /* DATA_SIZE > 1 */
Expand Down
2 changes: 0 additions & 2 deletions accel/tcg/cpu-exec.c
Expand Up @@ -307,7 +307,6 @@ static void log_cpu_exec(target_ulong pc, CPUState *cpu,
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
tb->flags, tb->cflags, lookup_symbol(pc));

#if defined(DEBUG_DISAS)
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
FILE *logfile = qemu_log_trylock();
if (logfile) {
Expand All @@ -323,7 +322,6 @@ static void log_cpu_exec(target_ulong pc, CPUState *cpu,
qemu_log_unlock(logfile);
}
}
#endif /* DEBUG_DISAS */
}
}

Expand Down