Skip to content

Commit

Permalink
tcg/riscv: Use tcg_use_softmmu
Browse files Browse the repository at this point in the history
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
  • Loading branch information
rth7680 committed Oct 18, 2023
1 parent 451ae08 commit eb63c85
Showing 1 changed file with 90 additions and 87 deletions.
177 changes: 90 additions & 87 deletions tcg/riscv/tcg-target.c.inc
Original file line number Diff line number Diff line change
Expand Up @@ -1245,105 +1245,110 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
a_mask = (1u << aa.align) - 1;

#ifdef CONFIG_SOFTMMU
unsigned s_bits = opc & MO_SIZE;
unsigned s_mask = (1u << s_bits) - 1;
int mem_index = get_mmuidx(oi);
int fast_ofs = tlb_mask_table_ofs(s, mem_index);
int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
int compare_mask;
TCGReg addr_adj;

ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
ldst->addrlo_reg = addr_reg;

tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);

tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
s->page_bits - CPU_TLB_ENTRY_BITS);
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
if (tcg_use_softmmu) {
unsigned s_bits = opc & MO_SIZE;
unsigned s_mask = (1u << s_bits) - 1;
int mem_index = get_mmuidx(oi);
int fast_ofs = tlb_mask_table_ofs(s, mem_index);
int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
int compare_mask;
TCGReg addr_adj;

/*
* For aligned accesses, we check the first byte and include the alignment
* bits within the address. For unaligned access, we check that we don't
* cross pages using the address of the last byte of the access.
*/
addr_adj = addr_reg;
if (a_mask < s_mask) {
addr_adj = TCG_REG_TMP0;
tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI,
addr_adj, addr_reg, s_mask - a_mask);
}
compare_mask = s->page_mask | a_mask;
if (compare_mask == sextreg(compare_mask, 0, 12)) {
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
} else {
tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask);
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj);
}

/* Load the tlb comparator and the addend. */
QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
is_ld ? offsetof(CPUTLBEntry, addr_read)
: offsetof(CPUTLBEntry, addr_write));
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
offsetof(CPUTLBEntry, addend));

/* Compare masked address with the TLB entry. */
ldst->label_ptr[0] = s->code_ptr;
tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);

/* TLB Hit - translate address using addend. */
if (addr_type != TCG_TYPE_I32) {
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
} else if (have_zba) {
tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
} else {
tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg);
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP0, TCG_REG_TMP2);
}
*pbase = TCG_REG_TMP0;
#else
TCGReg base;

if (a_mask) {
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
ldst->addrlo_reg = addr_reg;

/* We are expecting alignment max 7, so we can always use andi. */
tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12));
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);

tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
s->page_bits - CPU_TLB_ENTRY_BITS);
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);

/*
* For aligned accesses, we check the first byte and include the
* alignment bits within the address. For unaligned access, we
* check that we don't cross pages using the address of the last
* byte of the access.
*/
addr_adj = addr_reg;
if (a_mask < s_mask) {
addr_adj = TCG_REG_TMP0;
tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI,
addr_adj, addr_reg, s_mask - a_mask);
}
compare_mask = s->page_mask | a_mask;
if (compare_mask == sextreg(compare_mask, 0, 12)) {
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
} else {
tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask);
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj);
}

/* Load the tlb comparator and the addend. */
QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
is_ld ? offsetof(CPUTLBEntry, addr_read)
: offsetof(CPUTLBEntry, addr_write));
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
offsetof(CPUTLBEntry, addend));

/* Compare masked address with the TLB entry. */
ldst->label_ptr[0] = s->code_ptr;
tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
}
tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);

if (guest_base != 0) {
base = TCG_REG_TMP0;
/* TLB Hit - translate address using addend. */
if (addr_type != TCG_TYPE_I32) {
tcg_out_opc_reg(s, OPC_ADD, base, addr_reg, TCG_GUEST_BASE_REG);
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
} else if (have_zba) {
tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg, TCG_GUEST_BASE_REG);
tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0,
addr_reg, TCG_REG_TMP2);
} else {
tcg_out_ext32u(s, base, addr_reg);
tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG);
tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg);
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0,
TCG_REG_TMP0, TCG_REG_TMP2);
}
} else if (addr_type != TCG_TYPE_I32) {
base = addr_reg;
*pbase = TCG_REG_TMP0;
} else {
base = TCG_REG_TMP0;
tcg_out_ext32u(s, base, addr_reg);
TCGReg base;

if (a_mask) {
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
ldst->addrlo_reg = addr_reg;

/* We are expecting alignment max 7, so we can always use andi. */
tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12));
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);

ldst->label_ptr[0] = s->code_ptr;
tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
}

if (guest_base != 0) {
base = TCG_REG_TMP0;
if (addr_type != TCG_TYPE_I32) {
tcg_out_opc_reg(s, OPC_ADD, base, addr_reg,
TCG_GUEST_BASE_REG);
} else if (have_zba) {
tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg,
TCG_GUEST_BASE_REG);
} else {
tcg_out_ext32u(s, base, addr_reg);
tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG);
}
} else if (addr_type != TCG_TYPE_I32) {
base = addr_reg;
} else {
base = TCG_REG_TMP0;
tcg_out_ext32u(s, base, addr_reg);
}
*pbase = base;
}
*pbase = base;
#endif

return ldst;
}
Expand Down Expand Up @@ -2075,12 +2080,10 @@ static void tcg_target_qemu_prologue(TCGContext *s)
TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
}

#if !defined(CONFIG_SOFTMMU)
if (guest_base) {
if (!tcg_use_softmmu && guest_base) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
}
#endif

/* Call generated code */
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
Expand Down

0 comments on commit eb63c85

Please sign in to comment.