Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
tcg/riscv: Use full load/store helpers in user-only mode
Instead of using helper_unaligned_{ld,st}, use the full load/store helpers.
This will allow the fast path to increase alignment to implement atomicity
while not immediately raising an alignment exception.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
  • Loading branch information
rth7680 committed May 16, 2023
1 parent c602fa4 commit c562d26
Showing 1 changed file with 0 additions and 29 deletions.
29 changes: 0 additions & 29 deletions tcg/riscv/tcg-target.c.inc
Expand Up @@ -846,7 +846,6 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
* Load/store and TLB
*/

#if defined(CONFIG_SOFTMMU)
static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
{
tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
Expand Down Expand Up @@ -893,34 +892,6 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
tcg_out_goto(s, l->raddr);
return true;
}
#else
static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
{
/* resolve label address */
if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
return false;
}

tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);

/* tail call, with the return address back inline. */
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr);
tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld
: helper_unaligned_st), true);
return true;
}

static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
return tcg_out_fail_alignment(s, l);
}

static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
return tcg_out_fail_alignment(s, l);
}
#endif /* CONFIG_SOFTMMU */

/*
* For softmmu, perform the TLB load and compare.
Expand Down

0 comments on commit c562d26

Please sign in to comment.