Skip to content

Commit

Permalink
tcg: TCGMemOp is now accelerator independent MemOp
Browse files Browse the repository at this point in the history
Preparation for collapsing the two byte swaps, adjust_endianness and
handle_bswap, along the I/O path.

Target dependant attributes are conditionalized upon NEED_CPU_H.

Signed-off-by: Tony Nguyen <tony.nguyen@bt.com>
Acked-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Acked-by: Cornelia Huck <cohuck@redhat.com>
Message-Id: <81d9cd7d7f5aaadfa772d6c48ecee834e9cf7882.1566466906.git.tony.nguyen@bt.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
  • Loading branch information
tnguyen-bt authored and rth7680 committed Sep 3, 2019
1 parent fec105c commit 14776ab
Show file tree
Hide file tree
Showing 39 changed files with 421 additions and 399 deletions.
1 change: 1 addition & 0 deletions MAINTAINERS
Expand Up @@ -1890,6 +1890,7 @@ M: Paolo Bonzini <pbonzini@redhat.com>
S: Supported
F: include/exec/ioport.h
F: ioport.c
F: include/exec/memop.h
F: include/exec/memory.h
F: include/exec/ram_addr.h
F: memory.c
Expand Down
2 changes: 1 addition & 1 deletion accel/tcg/cputlb.c
Expand Up @@ -1133,7 +1133,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
target_ulong tlb_addr = tlb_addr_write(tlbe);
TCGMemOp mop = get_memop(oi);
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
int s_bits = mop & MO_SIZE;
void *hostaddr;
Expand Down
110 changes: 110 additions & 0 deletions include/exec/memop.h
@@ -0,0 +1,110 @@
/*
* Constants for memory operations
*
* Authors:
* Richard Henderson <rth@twiddle.net>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/

#ifndef MEMOP_H
#define MEMOP_H

typedef enum MemOp {
MO_8 = 0,
MO_16 = 1,
MO_32 = 2,
MO_64 = 3,
MO_SIZE = 3, /* Mask for the above. */

MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */

MO_BSWAP = 8, /* Host reverse endian. */
#ifdef HOST_WORDS_BIGENDIAN
MO_LE = MO_BSWAP,
MO_BE = 0,
#else
MO_LE = 0,
MO_BE = MO_BSWAP,
#endif
#ifdef NEED_CPU_H
#ifdef TARGET_WORDS_BIGENDIAN
MO_TE = MO_BE,
#else
MO_TE = MO_LE,
#endif
#endif
/*
* MO_UNALN accesses are never checked for alignment.
* MO_ALIGN accesses will result in a call to the CPU's
* do_unaligned_access hook if the guest address is not aligned.
* The default depends on whether the target CPU defines
* TARGET_ALIGNED_ONLY.
*
* Some architectures (e.g. ARMv8) need the address which is aligned
* to a size more than the size of the memory access.
* Some architectures (e.g. SPARCv9) need an address which is aligned,
* but less strictly than the natural alignment.
*
* MO_ALIGN supposes the alignment size is the size of a memory access.
*
* There are three options:
* - unaligned access permitted (MO_UNALN).
* - an alignment to the size of an access (MO_ALIGN);
* - an alignment to a specified size, which may be more or less than
* the access size (MO_ALIGN_x where 'x' is a size in bytes);
*/
MO_ASHIFT = 4,
MO_AMASK = 7 << MO_ASHIFT,
#ifdef NEED_CPU_H
#ifdef TARGET_ALIGNED_ONLY
MO_ALIGN = 0,
MO_UNALN = MO_AMASK,
#else
MO_ALIGN = MO_AMASK,
MO_UNALN = 0,
#endif
#endif
MO_ALIGN_2 = 1 << MO_ASHIFT,
MO_ALIGN_4 = 2 << MO_ASHIFT,
MO_ALIGN_8 = 3 << MO_ASHIFT,
MO_ALIGN_16 = 4 << MO_ASHIFT,
MO_ALIGN_32 = 5 << MO_ASHIFT,
MO_ALIGN_64 = 6 << MO_ASHIFT,

/* Combinations of the above, for ease of use. */
MO_UB = MO_8,
MO_UW = MO_16,
MO_UL = MO_32,
MO_SB = MO_SIGN | MO_8,
MO_SW = MO_SIGN | MO_16,
MO_SL = MO_SIGN | MO_32,
MO_Q = MO_64,

MO_LEUW = MO_LE | MO_UW,
MO_LEUL = MO_LE | MO_UL,
MO_LESW = MO_LE | MO_SW,
MO_LESL = MO_LE | MO_SL,
MO_LEQ = MO_LE | MO_Q,

MO_BEUW = MO_BE | MO_UW,
MO_BEUL = MO_BE | MO_UL,
MO_BESW = MO_BE | MO_SW,
MO_BESL = MO_BE | MO_SL,
MO_BEQ = MO_BE | MO_Q,

#ifdef NEED_CPU_H
MO_TEUW = MO_TE | MO_UW,
MO_TEUL = MO_TE | MO_UL,
MO_TESW = MO_TE | MO_SW,
MO_TESL = MO_TE | MO_SL,
MO_TEQ = MO_TE | MO_Q,
#endif

MO_SSIZE = MO_SIZE | MO_SIGN,
} MemOp;

#endif
2 changes: 1 addition & 1 deletion target/alpha/translate.c
Expand Up @@ -403,7 +403,7 @@ static inline void gen_store_mem(DisasContext *ctx,

static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
int32_t disp16, int mem_idx,
TCGMemOp op)
MemOp op)
{
TCGLabel *lab_fail, *lab_done;
TCGv addr, val;
Expand Down
48 changes: 24 additions & 24 deletions target/arm/translate-a64.c
Expand Up @@ -85,7 +85,7 @@ typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, TCGMemOp);
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);

/* initialize TCG globals. */
void a64_translate_init(void)
Expand Down Expand Up @@ -433,7 +433,7 @@ TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
* Dn, Sn, Hn or Bn).
* (Note that this is not the same mapping as for A32; see cpu.h)
*/
static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size)
static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size)
{
return vec_reg_offset(s, regno, 0, size);
}
Expand Down Expand Up @@ -849,7 +849,7 @@ static void do_gpr_ld_memidx(DisasContext *s,
bool iss_valid, unsigned int iss_srt,
bool iss_sf, bool iss_ar)
{
TCGMemOp memop = s->be_data + size;
MemOp memop = s->be_data + size;

g_assert(size <= 3);

Expand Down Expand Up @@ -926,7 +926,7 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
TCGv_i64 tmphi;

if (size < 4) {
TCGMemOp memop = s->be_data + size;
MemOp memop = s->be_data + size;
tmphi = tcg_const_i64(0);
tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
} else {
Expand Down Expand Up @@ -967,7 +967,7 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)

/* Get value of an element within a vector register */
static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
int element, TCGMemOp memop)
int element, MemOp memop)
{
int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
switch (memop) {
Expand Down Expand Up @@ -999,7 +999,7 @@ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
}

static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
int element, TCGMemOp memop)
int element, MemOp memop)
{
int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
switch (memop) {
Expand All @@ -1026,7 +1026,7 @@ static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,

/* Set value of an element within a vector register */
static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
int element, TCGMemOp memop)
int element, MemOp memop)
{
int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
switch (memop) {
Expand All @@ -1048,7 +1048,7 @@ static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
}

static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
int destidx, int element, TCGMemOp memop)
int destidx, int element, MemOp memop)
{
int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
switch (memop) {
Expand All @@ -1068,7 +1068,7 @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,

/* Store from vector register to memory */
static void do_vec_st(DisasContext *s, int srcidx, int element,
TCGv_i64 tcg_addr, int size, TCGMemOp endian)
TCGv_i64 tcg_addr, int size, MemOp endian)
{
TCGv_i64 tcg_tmp = tcg_temp_new_i64();

Expand All @@ -1080,7 +1080,7 @@ static void do_vec_st(DisasContext *s, int srcidx, int element,

/* Load from memory to vector register */
static void do_vec_ld(DisasContext *s, int destidx, int element,
TCGv_i64 tcg_addr, int size, TCGMemOp endian)
TCGv_i64 tcg_addr, int size, MemOp endian)
{
TCGv_i64 tcg_tmp = tcg_temp_new_i64();

Expand Down Expand Up @@ -2176,7 +2176,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
TCGv_i64 addr, int size, bool is_pair)
{
int idx = get_mem_index(s);
TCGMemOp memop = s->be_data;
MemOp memop = s->be_data;

g_assert(size <= 3);
if (is_pair) {
Expand Down Expand Up @@ -3262,7 +3262,7 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
bool is_postidx = extract32(insn, 23, 1);
bool is_q = extract32(insn, 30, 1);
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
TCGMemOp endian = s->be_data;
MemOp endian = s->be_data;

int ebytes; /* bytes per element */
int elements; /* elements per vector */
Expand Down Expand Up @@ -5431,7 +5431,7 @@ static void disas_fp_csel(DisasContext *s, uint32_t insn)
unsigned int mos, type, rm, cond, rn, rd;
TCGv_i64 t_true, t_false, t_zero;
DisasCompare64 c;
TCGMemOp sz;
MemOp sz;

mos = extract32(insn, 29, 3);
type = extract32(insn, 22, 2);
Expand Down Expand Up @@ -6243,7 +6243,7 @@ static void disas_fp_imm(DisasContext *s, uint32_t insn)
int mos = extract32(insn, 29, 3);
uint64_t imm;
TCGv_i64 tcg_res;
TCGMemOp sz;
MemOp sz;

if (mos || imm5) {
unallocated_encoding(s);
Expand Down Expand Up @@ -7006,7 +7006,7 @@ static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
{
if (esize == size) {
int element;
TCGMemOp msize = esize == 16 ? MO_16 : MO_32;
MemOp msize = esize == 16 ? MO_16 : MO_32;
TCGv_i32 tcg_elem;

/* We should have one register left here */
Expand Down Expand Up @@ -7998,7 +7998,7 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
int shift = (2 * esize) - immhb;
int elements = is_scalar ? 1 : (64 / esize);
bool round = extract32(opcode, 0, 1);
TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
TCGv_i64 tcg_rn, tcg_rd, tcg_round;
TCGv_i32 tcg_rd_narrowed;
TCGv_i64 tcg_final;
Expand Down Expand Up @@ -8157,7 +8157,7 @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
}
};
NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
TCGMemOp memop = scalar ? size : MO_32;
MemOp memop = scalar ? size : MO_32;
int maxpass = scalar ? 1 : is_q ? 4 : 2;

for (pass = 0; pass < maxpass; pass++) {
Expand Down Expand Up @@ -8201,7 +8201,7 @@ static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
TCGv_ptr tcg_fpst = get_fpstatus_ptr(size == MO_16);
TCGv_i32 tcg_shift = NULL;

TCGMemOp mop = size | (is_signed ? MO_SIGN : 0);
MemOp mop = size | (is_signed ? MO_SIGN : 0);
int pass;

if (fracbits || size == MO_64) {
Expand Down Expand Up @@ -9980,7 +9980,7 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
int dsize = is_q ? 128 : 64;
int esize = 8 << size;
int elements = dsize/esize;
TCGMemOp memop = size | (is_u ? 0 : MO_SIGN);
MemOp memop = size | (is_u ? 0 : MO_SIGN);
TCGv_i64 tcg_rn = new_tmp_a64(s);
TCGv_i64 tcg_rd = new_tmp_a64(s);
TCGv_i64 tcg_round;
Expand Down Expand Up @@ -10323,7 +10323,7 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
TCGv_i64 tcg_op1 = tcg_temp_new_i64();
TCGv_i64 tcg_op2 = tcg_temp_new_i64();
TCGv_i64 tcg_passres;
TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);

int elt = pass + is_q * 2;

Expand Down Expand Up @@ -11803,7 +11803,7 @@ static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,

if (size == 2) {
/* 32 + 32 -> 64 op */
TCGMemOp memop = size + (u ? 0 : MO_SIGN);
MemOp memop = size + (u ? 0 : MO_SIGN);

for (pass = 0; pass < maxpass; pass++) {
TCGv_i64 tcg_op1 = tcg_temp_new_i64();
Expand Down Expand Up @@ -12825,7 +12825,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)

switch (is_fp) {
case 1: /* normal fp */
/* convert insn encoded size to TCGMemOp size */
/* convert insn encoded size to MemOp size */
switch (size) {
case 0: /* half-precision */
size = MO_16;
Expand Down Expand Up @@ -12873,7 +12873,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
return;
}

/* Given TCGMemOp size, adjust register and indexing. */
/* Given MemOp size, adjust register and indexing. */
switch (size) {
case MO_16:
index = h << 2 | l << 1 | m;
Expand Down Expand Up @@ -13170,7 +13170,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
TCGv_i64 tcg_res[2];
int pass;
bool satop = extract32(opcode, 0, 1);
TCGMemOp memop = MO_32;
MemOp memop = MO_32;

if (satop || !u) {
memop |= MO_SIGN;
Expand Down
2 changes: 1 addition & 1 deletion target/arm/translate-a64.h
Expand Up @@ -62,7 +62,7 @@ static inline void assert_fp_access_checked(DisasContext *s)
* the FP/vector register Qn.
*/
static inline int vec_reg_offset(DisasContext *s, int regno,
int element, TCGMemOp size)
int element, MemOp size)
{
int element_size = 1 << size;
int offs = element * element_size;
Expand Down
2 changes: 1 addition & 1 deletion target/arm/translate-sve.c
Expand Up @@ -4567,7 +4567,7 @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a)
*/

/* The memory mode of the dtype. */
static const TCGMemOp dtype_mop[16] = {
static const MemOp dtype_mop[16] = {
MO_UB, MO_UB, MO_UB, MO_UB,
MO_SL, MO_UW, MO_UW, MO_UW,
MO_SW, MO_SW, MO_UL, MO_UL,
Expand Down

0 comments on commit 14776ab

Please sign in to comment.