Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
tcg/mips: Use full load/store helpers in user-only mode
Instead of using helper_unaligned_{ld,st}, use the full load/store helpers.
This will allow the fast path to increase alignment to implement atomicity
while not immediately raising an alignment exception.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
  • Loading branch information
rth7680 committed May 16, 2023
1 parent ab85b80 commit c7efb55
Showing 1 changed file with 2 additions and 55 deletions.
57 changes: 2 additions & 55 deletions tcg/mips/tcg-target.c.inc
Expand Up @@ -1075,7 +1075,6 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
tcg_out_nop(s);
}

#if defined(CONFIG_SOFTMMU)
/* We have four temps, we might as well expose three of them. */
static const TCGLdstHelperParam ldst_helper_param = {
.ntmp = 3, .tmp = { TCG_TMP0, TCG_TMP1, TCG_TMP2 }
Expand All @@ -1088,8 +1087,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)

/* resolve label address */
if (!reloc_pc16(l->label_ptr[0], tgt_rx)
|| (TCG_TARGET_REG_BITS < TARGET_LONG_BITS
&& !reloc_pc16(l->label_ptr[1], tgt_rx))) {
|| (l->label_ptr[1] && !reloc_pc16(l->label_ptr[1], tgt_rx))) {
return false;
}

Expand Down Expand Up @@ -1118,8 +1116,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)

/* resolve label address */
if (!reloc_pc16(l->label_ptr[0], tgt_rx)
|| (TCG_TARGET_REG_BITS < TARGET_LONG_BITS
&& !reloc_pc16(l->label_ptr[1], tgt_rx))) {
|| (l->label_ptr[1] && !reloc_pc16(l->label_ptr[1], tgt_rx))) {
return false;
}

Expand All @@ -1139,56 +1136,6 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
return true;
}

#else
static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
{
void *target;

if (!reloc_pc16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
return false;
}

if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
/* A0 is env, A1 is skipped, A2:A3 is the uint64_t address. */
TCGReg a2 = MIPS_BE ? l->addrhi_reg : l->addrlo_reg;
TCGReg a3 = MIPS_BE ? l->addrlo_reg : l->addrhi_reg;

if (a3 != TCG_REG_A2) {
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2);
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3);
} else if (a2 != TCG_REG_A3) {
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3);
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2);
} else {
tcg_out_mov(s, TCG_TYPE_I32, TCG_TMP0, TCG_REG_A2);
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, TCG_REG_A3);
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, TCG_TMP0);
}
} else {
tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
}
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);

/*
* Tail call to the helper, with the return address back inline.
* We have arrived here via BNEL, so $31 is already set.
*/
target = (l->is_ld ? helper_unaligned_ld : helper_unaligned_st);
tcg_out_call_int(s, target, true);
return true;
}

static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
return tcg_out_fail_alignment(s, l);
}

static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
return tcg_out_fail_alignment(s, l);
}
#endif /* SOFTMMU */

typedef struct {
TCGReg base;
MemOp align;
Expand Down

0 comments on commit c7efb55

Please sign in to comment.