Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
accel/tcg: Honor atomicity of stores
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
  • Loading branch information
rth7680 committed May 15, 2023
1 parent 2a37ccd commit 3564432
Show file tree
Hide file tree
Showing 3 changed files with 545 additions and 66 deletions.
108 changes: 48 additions & 60 deletions accel/tcg/cputlb.c
Expand Up @@ -2599,36 +2599,6 @@ Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr,
* Store Helpers
*/

static inline void QEMU_ALWAYS_INLINE
store_memop(void *haddr, uint64_t val, MemOp op)
{
switch (op) {
case MO_UB:
stb_p(haddr, val);
break;
case MO_BEUW:
stw_be_p(haddr, val);
break;
case MO_LEUW:
stw_le_p(haddr, val);
break;
case MO_BEUL:
stl_be_p(haddr, val);
break;
case MO_LEUL:
stl_le_p(haddr, val);
break;
case MO_BEUQ:
stq_be_p(haddr, val);
break;
case MO_LEUQ:
stq_le_p(haddr, val);
break;
default:
qemu_build_not_reached();
}
}

/**
* do_st_mmio_leN:
* @env: cpu context
Expand All @@ -2655,38 +2625,56 @@ static uint64_t do_st_mmio_leN(CPUArchState *env, MMULookupPageData *p,
return val_le;
}

/**
* do_st_bytes_leN:
* @p: translation parameters
* @val_le: data to store
*
* Store @p->size bytes at @p->haddr, which is RAM.
* The bytes to store are extracted in little-endian order from @val_le;
* return the bytes of @val_le beyond @p->size that have not been stored.
*/
static uint64_t do_st_bytes_leN(MMULookupPageData *p, uint64_t val_le)
{
uint8_t *haddr = p->haddr;
int i, size = p->size;

for (i = 0; i < size; i++, val_le >>= 8) {
haddr[i] = val_le;
}
return val_le;
}

/*
* Wrapper for the above.
*/
static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p,
uint64_t val_le, int mmu_idx, uintptr_t ra)
uint64_t val_le, int mmu_idx,
MemOp mop, uintptr_t ra)
{
MemOp atom;
unsigned tmp, half_size;

if (unlikely(p->flags & TLB_MMIO)) {
return do_st_mmio_leN(env, p, val_le, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
return val_le >> (p->size * 8);
} else {
return do_st_bytes_leN(p, val_le);
}

/*
* It is a given that we cross a page and therefore there is no atomicity
* for the store as a whole, but subobjects may need attention.
*/
atom = mop & MO_ATOM_MASK;
switch (atom) {
case MO_ATOM_SUBALIGN:
return store_parts_leN(p->haddr, p->size, val_le);

case MO_ATOM_IFALIGN_PAIR:
case MO_ATOM_WITHIN16_PAIR:
tmp = mop & MO_SIZE;
tmp = tmp ? tmp - 1 : 0;
half_size = 1 << tmp;
if (atom == MO_ATOM_IFALIGN_PAIR
? p->size == half_size
: p->size >= half_size) {
if (!HAVE_al8_fast && p->size <= 4) {
return store_whole_le4(p->haddr, p->size, val_le);
} else if (HAVE_al8) {
return store_whole_le8(p->haddr, p->size, val_le);
} else {
cpu_loop_exit_atomic(env_cpu(env), ra);
}
}
/* fall through */

case MO_ATOM_IFALIGN:
case MO_ATOM_WITHIN16:
case MO_ATOM_NONE:
return store_bytes_leN(p->haddr, p->size, val_le);

default:
g_assert_not_reached();
}
}

Expand Down Expand Up @@ -2714,7 +2702,7 @@ static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val,
if (memop & MO_BSWAP) {
val = bswap16(val);
}
store_memop(p->haddr, val, MO_UW);
store_atom_2(env, ra, p->haddr, memop, val);
}
}

Expand All @@ -2730,7 +2718,7 @@ static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val,
if (memop & MO_BSWAP) {
val = bswap32(val);
}
store_memop(p->haddr, val, MO_UL);
store_atom_4(env, ra, p->haddr, memop, val);
}
}

Expand All @@ -2746,7 +2734,7 @@ static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val,
if (memop & MO_BSWAP) {
val = bswap64(val);
}
store_memop(p->haddr, val, MO_UQ);
store_atom_8(env, ra, p->haddr, memop, val);
}
}

Expand Down Expand Up @@ -2815,8 +2803,8 @@ static void do_st4_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
if ((l.memop & MO_BSWAP) != MO_LE) {
val = bswap32(val);
}
val = do_st_leN(env, &l.page[0], val, l.mmu_idx, ra);
(void) do_st_leN(env, &l.page[1], val, l.mmu_idx, ra);
val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
(void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
}

void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
Expand Down Expand Up @@ -2849,8 +2837,8 @@ static void do_st8_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
if ((l.memop & MO_BSWAP) != MO_LE) {
val = bswap64(val);
}
val = do_st_leN(env, &l.page[0], val, l.mmu_idx, ra);
(void) do_st_leN(env, &l.page[1], val, l.mmu_idx, ra);
val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
(void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
}

void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
Expand Down

0 comments on commit 3564432

Please sign in to comment.