Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
tcg/ppc: Convert tcg_out_qemu_{ld,st}_slow_path
Use tcg_out_ld_helper_args, tcg_out_ld_helper_ret,
and tcg_out_st_helper_args.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
  • Loading branch information
rth7680 committed May 11, 2023
1 parent f07aaf4 commit ec38941
Showing 1 changed file with 26 additions and 62 deletions.
88 changes: 26 additions & 62 deletions tcg/ppc/tcg-target.c.inc
Expand Up @@ -2003,88 +2003,52 @@ static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
[MO_BEUQ] = helper_be_stq_mmu,
};

static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg)
{
if (arg < 0) {
arg = TCG_REG_TMP1;
}
tcg_out32(s, MFSPR | RT(arg) | LR);
return arg;
}

/*
* For the purposes of ppc32 sorting 4 input registers into 4 argument
* registers, there is an outside chance we would require 3 temps.
* Because of constraints, no inputs are in r3, and env will not be
* placed into r3 until after the sorting is done, and is thus free.
*/
static const TCGLdstHelperParam ldst_helper_param = {
.ra_gen = ldst_ra_gen,
.ntmp = 3,
.tmp = { TCG_REG_TMP1, TCG_REG_R0, TCG_REG_R3 }
};

static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
MemOpIdx oi = lb->oi;
MemOp opc = get_memop(oi);
TCGReg hi, lo, arg = TCG_REG_R3;
MemOp opc = get_memop(lb->oi);

if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
return false;
}

tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);

lo = lb->addrlo_reg;
hi = lb->addrhi_reg;
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN);
tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
} else {
/* If the address needed to be zero-extended, we'll have already
placed it in R4. The only remaining case is 64-bit guest. */
tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
}

tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
tcg_out32(s, MFSPR | RT(arg) | LR);

tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
tcg_out_call_int(s, LK, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);

lo = lb->datalo_reg;
hi = lb->datahi_reg;
if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_REG_R4);
tcg_out_mov(s, TCG_TYPE_I32, hi, TCG_REG_R3);
} else {
tcg_out_movext(s, lb->type, lo,
TCG_TYPE_REG, opc & MO_SSIZE, TCG_REG_R3);
}
tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);

tcg_out_b(s, 0, lb->raddr);
return true;
}

static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
MemOpIdx oi = lb->oi;
MemOp opc = get_memop(oi);
MemOp s_bits = opc & MO_SIZE;
TCGReg hi, lo, arg = TCG_REG_R3;
MemOp opc = get_memop(lb->oi);

if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
return false;
}

tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);

lo = lb->addrlo_reg;
hi = lb->addrhi_reg;
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN);
tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
} else {
/* If the address needed to be zero-extended, we'll have already
placed it in R4. The only remaining case is 64-bit guest. */
tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
}

lo = lb->datalo_reg;
hi = lb->datahi_reg;
if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN);
tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
} else {
tcg_out_movext(s, s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32,
arg++, lb->type, s_bits, lo);
}

tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
tcg_out32(s, MFSPR | RT(arg) | LR);

tcg_out_st_helper_args(s, lb, &ldst_helper_param);
tcg_out_call_int(s, LK, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);

tcg_out_b(s, 0, lb->raddr);
Expand Down

0 comments on commit ec38941

Please sign in to comment.