Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
Merge tag 'pull-tcg-20230505' of https://gitlab.com/rth7680/qemu into…
… staging

softfloat: Fix the incorrect computation in float32_exp2
tcg: Remove compatability helpers for qemu ld/st
target/alpha: Remove TARGET_ALIGNED_ONLY
target/hppa: Remove TARGET_ALIGNED_ONLY
target/sparc: Remove TARGET_ALIGNED_ONLY
tcg: Cleanups preparing to unify calls to qemu_ld/st helpers

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmRVc9UdHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9OiAgAgwc6wFOzFtSnYrvH
# b9YgcJLPX8urgx9g1Exv553hbVtt2J0lsLAhlgwKpms3Os4p6znKhUWcGosHFixO
# eBQFqcS22Cu/ZM2s6299GOGDpxCpjx0/bX7JJTjW805SdSgDAuEUIbKe0ZqQT5tx
# ++F9is2+plp95/BeQz2+hbkbbpdktUkkk288Adoz3KRHqt/zd8cer0WrqR2uVAuX
# swpEluwtCfaewc0iPcNjlp9rLzO882wCFm0RG1EC2j9NHtq8O8xyamM9PPEaRXLv
# MiMA2nB6hsGMz33Wuec8cZTMaCLB+Oqhbq7eYPbCA4SmJBE3V9Rgc7GL4B7yCsyI
# OXSK+Q==
# =GIXd
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 05 May 2023 10:23:33 PM BST
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [ultimate]

* tag 'pull-tcg-20230505' of https://gitlab.com/rth7680/qemu: (42 commits)
  tcg: Widen helper_*_st[bw]_mmu val arguments
  tcg: Introduce arg_slot_stk_ofs
  tcg: Replace REG_P with arg_loc_reg_p
  tcg: Move TCGLabelQemuLdst to tcg.c
  tcg/sparc64: Pass TCGType to tcg_out_qemu_{ld,st}
  tcg/sparc64: Drop is_64 test from tcg_out_qemu_ld data return
  tcg/s390x: Introduce HostAddress
  tcg/s390x: Pass TCGType to tcg_out_qemu_{ld,st}
  tcg/riscv: Rationalize args to tcg_out_qemu_{ld,st}
  tcg/riscv: Require TCG_TARGET_REG_BITS == 64
  tcg/ppc: Introduce HostAddress
  tcg/ppc: Rationalize args to tcg_out_qemu_{ld,st}
  tcg/mips: Rationalize args to tcg_out_qemu_{ld,st}
  tcg/loongarch64: Introduce HostAddress
  tcg/loongarch64: Rationalize args to tcg_out_qemu_{ld,st}
  tcg/arm: Introduce HostAddress
  tcg/arm: Rationalize args to tcg_out_qemu_{ld,st}
  tcg/aarch64: Introduce HostAddress
  tcg/aarch64: Rationalize args to tcg_out_qemu_{ld,st}
  tcg/i386: Introduce tcg_out_testi
  ...

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
  • Loading branch information
rth7680 committed May 5, 2023
2 parents 2149a21 + 35a0bd6 commit 47d3878
Show file tree
Hide file tree
Showing 42 changed files with 1,117 additions and 1,288 deletions.
6 changes: 3 additions & 3 deletions accel/tcg/cputlb.c
Expand Up @@ -2508,7 +2508,7 @@ full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
store_helper(env, addr, val, oi, retaddr, MO_UB);
}

void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
full_stb_mmu(env, addr, val, oi, retaddr);
Expand All @@ -2521,7 +2521,7 @@ static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
store_helper(env, addr, val, oi, retaddr, MO_LEUW);
}

void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
full_le_stw_mmu(env, addr, val, oi, retaddr);
Expand All @@ -2534,7 +2534,7 @@ static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
store_helper(env, addr, val, oi, retaddr, MO_BEUW);
}

void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
full_be_stw_mmu(env, addr, val, oi, retaddr);
Expand Down
1 change: 0 additions & 1 deletion configs/targets/alpha-linux-user.mak
@@ -1,4 +1,3 @@
TARGET_ARCH=alpha
TARGET_SYSTBL_ABI=common
TARGET_SYSTBL=syscall.tbl
TARGET_ALIGNED_ONLY=y
1 change: 0 additions & 1 deletion configs/targets/alpha-softmmu.mak
@@ -1,3 +1,2 @@
TARGET_ARCH=alpha
TARGET_ALIGNED_ONLY=y
TARGET_SUPPORTS_MTTCG=y
1 change: 0 additions & 1 deletion configs/targets/hppa-linux-user.mak
@@ -1,5 +1,4 @@
TARGET_ARCH=hppa
TARGET_SYSTBL_ABI=common,32
TARGET_SYSTBL=syscall.tbl
TARGET_ALIGNED_ONLY=y
TARGET_BIG_ENDIAN=y
1 change: 0 additions & 1 deletion configs/targets/hppa-softmmu.mak
@@ -1,4 +1,3 @@
TARGET_ARCH=hppa
TARGET_ALIGNED_ONLY=y
TARGET_BIG_ENDIAN=y
TARGET_SUPPORTS_MTTCG=y
1 change: 0 additions & 1 deletion configs/targets/sparc-linux-user.mak
@@ -1,5 +1,4 @@
TARGET_ARCH=sparc
TARGET_SYSTBL_ABI=common,32
TARGET_SYSTBL=syscall.tbl
TARGET_ALIGNED_ONLY=y
TARGET_BIG_ENDIAN=y
1 change: 0 additions & 1 deletion configs/targets/sparc-softmmu.mak
@@ -1,3 +1,2 @@
TARGET_ARCH=sparc
TARGET_ALIGNED_ONLY=y
TARGET_BIG_ENDIAN=y
1 change: 0 additions & 1 deletion configs/targets/sparc32plus-linux-user.mak
Expand Up @@ -4,5 +4,4 @@ TARGET_BASE_ARCH=sparc
TARGET_ABI_DIR=sparc
TARGET_SYSTBL_ABI=common,32
TARGET_SYSTBL=syscall.tbl
TARGET_ALIGNED_ONLY=y
TARGET_BIG_ENDIAN=y
1 change: 0 additions & 1 deletion configs/targets/sparc64-linux-user.mak
Expand Up @@ -3,5 +3,4 @@ TARGET_BASE_ARCH=sparc
TARGET_ABI_DIR=sparc
TARGET_SYSTBL_ABI=common,64
TARGET_SYSTBL=syscall.tbl
TARGET_ALIGNED_ONLY=y
TARGET_BIG_ENDIAN=y
1 change: 0 additions & 1 deletion configs/targets/sparc64-softmmu.mak
@@ -1,4 +1,3 @@
TARGET_ARCH=sparc64
TARGET_BASE_ARCH=sparc
TARGET_ALIGNED_ONLY=y
TARGET_BIG_ENDIAN=y
2 changes: 1 addition & 1 deletion fpu/softfloat.c
Expand Up @@ -5135,7 +5135,7 @@ float32 float32_exp2(float32 a, float_status *status)
float64_unpack_canonical(&rp, float64_one, status);
for (i = 0 ; i < 15 ; i++) {
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
rp = *parts_muladd(&tp, &xp, &rp, 0, status);
rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
xnp = *parts_mul(&xnp, &xp, status);
}

Expand Down
10 changes: 7 additions & 3 deletions include/tcg/tcg-ldst.h
Expand Up @@ -55,15 +55,19 @@ tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);

void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
/*
* Value extended to at least uint32_t, so that some ABIs do not require
* zero-extension from uint8_t or uint16_t.
*/
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr);
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr);
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr);
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr);
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr);
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr);
Expand Down
55 changes: 0 additions & 55 deletions include/tcg/tcg-op.h
Expand Up @@ -841,61 +841,6 @@ void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, MemOp);
void tcg_gen_qemu_ld_i128(TCGv_i128, TCGv, TCGArg, MemOp);
void tcg_gen_qemu_st_i128(TCGv_i128, TCGv, TCGArg, MemOp);

static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_UB);
}

static inline void tcg_gen_qemu_ld8s(TCGv ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_SB);
}

static inline void tcg_gen_qemu_ld16u(TCGv ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUW);
}

static inline void tcg_gen_qemu_ld16s(TCGv ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESW);
}

static inline void tcg_gen_qemu_ld32u(TCGv ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUL);
}

static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESL);
}

static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ld_i64(ret, addr, mem_index, MO_TEUQ);
}

static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index)
{
tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_UB);
}

static inline void tcg_gen_qemu_st16(TCGv arg, TCGv addr, int mem_index)
{
tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUW);
}

static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index)
{
tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUL);
}

static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
{
tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEUQ);
}

void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32,
TCGArg, MemOp);
void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64,
Expand Down
38 changes: 21 additions & 17 deletions target/alpha/translate.c
Expand Up @@ -72,7 +72,7 @@ struct DisasContext {
#ifdef CONFIG_USER_ONLY
#define UNALIGN(C) (C)->unalign
#else
#define UNALIGN(C) 0
#define UNALIGN(C) MO_ALIGN
#endif

/* Target-specific return values from translate_one, indicating the
Expand Down Expand Up @@ -2399,21 +2399,21 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
switch ((insn >> 12) & 0xF) {
case 0x0:
/* Longword physical access (hw_ldl/p) */
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
break;
case 0x1:
/* Quadword physical access (hw_ldq/p) */
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ);
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
break;
case 0x2:
/* Longword physical access with lock (hw_ldl_l/p) */
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
tcg_gen_mov_i64(cpu_lock_addr, addr);
tcg_gen_mov_i64(cpu_lock_value, va);
break;
case 0x3:
/* Quadword physical access with lock (hw_ldq_l/p) */
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ);
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
tcg_gen_mov_i64(cpu_lock_addr, addr);
tcg_gen_mov_i64(cpu_lock_value, va);
break;
Expand All @@ -2438,11 +2438,13 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
goto invalid_opc;
case 0xA:
/* Longword virtual access with protection check (hw_ldl/w) */
tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
MO_LESL | MO_ALIGN);
break;
case 0xB:
/* Quadword virtual access with protection check (hw_ldq/w) */
tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEUQ);
tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
MO_LEUQ | MO_ALIGN);
break;
case 0xC:
/* Longword virtual access with alt access mode (hw_ldl/a)*/
Expand All @@ -2453,12 +2455,14 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0xE:
/* Longword virtual access with alternate access mode and
protection checks (hw_ldl/wa) */
tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
MO_LESL | MO_ALIGN);
break;
case 0xF:
/* Quadword virtual access with alternate access mode and
protection checks (hw_ldq/wa) */
tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEUQ);
tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
MO_LEUQ | MO_ALIGN);
break;
}
break;
Expand Down Expand Up @@ -2659,25 +2663,25 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
vb = load_gpr(ctx, rb);
tmp = tcg_temp_new();
tcg_gen_addi_i64(tmp, vb, disp12);
tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
break;
case 0x1:
/* Quadword physical access */
va = load_gpr(ctx, ra);
vb = load_gpr(ctx, rb);
tmp = tcg_temp_new();
tcg_gen_addi_i64(tmp, vb, disp12);
tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ);
tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
break;
case 0x2:
/* Longword physical access with lock */
ret = gen_store_conditional(ctx, ra, rb, disp12,
MMU_PHYS_IDX, MO_LESL);
MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
break;
case 0x3:
/* Quadword physical access with lock */
ret = gen_store_conditional(ctx, ra, rb, disp12,
MMU_PHYS_IDX, MO_LEUQ);
MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
break;
case 0x4:
/* Longword virtual access */
Expand Down Expand Up @@ -2771,11 +2775,11 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x2A:
/* LDL_L */
gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 1);
gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1);
break;
case 0x2B:
/* LDQ_L */
gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 1);
gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1);
break;
case 0x2C:
/* STL */
Expand All @@ -2788,12 +2792,12 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0x2E:
/* STL_C */
ret = gen_store_conditional(ctx, ra, rb, disp16,
ctx->mem_idx, MO_LESL);
ctx->mem_idx, MO_LESL | MO_ALIGN);
break;
case 0x2F:
/* STQ_C */
ret = gen_store_conditional(ctx, ra, rb, disp16,
ctx->mem_idx, MO_LEUQ);
ctx->mem_idx, MO_LEUQ | MO_ALIGN);
break;
case 0x30:
/* BR */
Expand Down
16 changes: 8 additions & 8 deletions target/avr/translate.c
Expand Up @@ -1492,7 +1492,7 @@ static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr)
if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
gen_helper_fullwr(cpu_env, data, addr);
} else {
tcg_gen_qemu_st8(data, addr, MMU_DATA_IDX); /* mem[addr] = data */
tcg_gen_qemu_st_tl(data, addr, MMU_DATA_IDX, MO_UB);
}
}

Expand All @@ -1501,7 +1501,7 @@ static void gen_data_load(DisasContext *ctx, TCGv data, TCGv addr)
if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
gen_helper_fullrd(data, cpu_env, addr);
} else {
tcg_gen_qemu_ld8u(data, addr, MMU_DATA_IDX); /* data = mem[addr] */
tcg_gen_qemu_ld_tl(data, addr, MMU_DATA_IDX, MO_UB);
}
}

Expand Down Expand Up @@ -1979,7 +1979,7 @@ static bool trans_LPM1(DisasContext *ctx, arg_LPM1 *a)

tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
tcg_gen_or_tl(addr, addr, L);
tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
return true;
}

Expand All @@ -1996,7 +1996,7 @@ static bool trans_LPM2(DisasContext *ctx, arg_LPM2 *a)

tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
tcg_gen_or_tl(addr, addr, L);
tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
return true;
}

Expand All @@ -2013,7 +2013,7 @@ static bool trans_LPMX(DisasContext *ctx, arg_LPMX *a)

tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
tcg_gen_or_tl(addr, addr, L);
tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
tcg_gen_andi_tl(L, addr, 0xff);
tcg_gen_shri_tl(addr, addr, 8);
Expand Down Expand Up @@ -2045,7 +2045,7 @@ static bool trans_ELPM1(DisasContext *ctx, arg_ELPM1 *a)
TCGv Rd = cpu_r[0];
TCGv addr = gen_get_zaddr();

tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
return true;
}

Expand All @@ -2058,7 +2058,7 @@ static bool trans_ELPM2(DisasContext *ctx, arg_ELPM2 *a)
TCGv Rd = cpu_r[a->rd];
TCGv addr = gen_get_zaddr();

tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
return true;
}

Expand All @@ -2071,7 +2071,7 @@ static bool trans_ELPMX(DisasContext *ctx, arg_ELPMX *a)
TCGv Rd = cpu_r[a->rd];
TCGv addr = gen_get_zaddr();

tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
gen_set_zaddr(addr);
return true;
Expand Down
18 changes: 4 additions & 14 deletions target/cris/translate_v10.c.inc
Expand Up @@ -80,13 +80,9 @@ static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val,
/* Store only if F flag isn't set */
tcg_gen_andi_tl(t1, cpu_PR[PR_CCS], F_FLAG_V10);
tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
if (size == 1) {
tcg_gen_qemu_st8(tval, taddr, mem_index);
} else if (size == 2) {
tcg_gen_qemu_st16(tval, taddr, mem_index);
} else {
tcg_gen_qemu_st32(tval, taddr, mem_index);
}

tcg_gen_qemu_st_tl(tval, taddr, mem_index, ctz32(size) | MO_TE);

gen_set_label(l1);
tcg_gen_shri_tl(t1, t1, 1); /* shift F to P position */
tcg_gen_or_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], t1); /*P=F*/
Expand All @@ -109,13 +105,7 @@ static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
return;
}

if (size == 1) {
tcg_gen_qemu_st8(val, addr, mem_index);
} else if (size == 2) {
tcg_gen_qemu_st16(val, addr, mem_index);
} else {
tcg_gen_qemu_st32(val, addr, mem_index);
}
tcg_gen_qemu_st_tl(val, addr, mem_index, ctz32(size) | MO_TE);
}


Expand Down

0 comments on commit 47d3878

Please sign in to comment.