From 04dec36bb3ce6634dc4031dd98e09154c17251a0 Mon Sep 17 00:00:00 2001 From: Leon Alrae Date: Mon, 4 Aug 2014 12:42:00 +0100 Subject: [PATCH 1/5] target-mips: ll generates AdEL for unaligned addr Signed-off-by: Leon Alrae Signed-off-by: Yongbok Kim --- target-mips/op_helper.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c index 075fef252eac..61e4c3d59ff7 100644 --- a/target-mips/op_helper.c +++ b/target-mips/op_helper.c @@ -304,16 +304,20 @@ static inline hwaddr do_translate_address(CPUMIPSState *env, } } -#define HELPER_LD_ATOMIC(name, insn) \ +#define HELPER_LD_ATOMIC(name, insn, almask) \ target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \ { \ + if (arg & almask) { \ + env->CP0_BadVAddr = arg; \ + helper_raise_exception(env, EXCP_AdEL); \ + } \ env->lladdr = do_translate_address(env, arg, 0); \ env->llval = do_##insn(env, arg, mem_idx); \ return env->llval; \ } -HELPER_LD_ATOMIC(ll, lw) +HELPER_LD_ATOMIC(ll, lw, 0x3) #ifdef TARGET_MIPS64 -HELPER_LD_ATOMIC(lld, ld) +HELPER_LD_ATOMIC(lld, ld, 0x7) #endif #undef HELPER_LD_ATOMIC From f2cf59e8fbd9d6d093408465a65c001f4b4bd42d Mon Sep 17 00:00:00 2001 From: Yongbok Kim Date: Thu, 11 Dec 2014 10:57:18 +0000 Subject: [PATCH 2/5] softmmu: Add size argument to do_unaligned_access() Pass data size arguement to do_unaligned_access(). It is unable to find if an access spans two pages without data size in the call back function. Signed-off-by: Yongbok Kim --- include/qom/cpu.h | 8 +++++--- softmmu_template.h | 24 ++++++++++++------------ target-alpha/cpu-qom.h | 3 ++- target-alpha/mem_helper.c | 3 ++- target-mips/cpu-qom.h | 3 ++- target-mips/op_helper.c | 2 +- target-sparc/cpu-qom.h | 3 ++- target-sparc/ldst_helper.c | 3 ++- target-xtensa/cpu-qom.h | 3 ++- target-xtensa/op_helper.c | 2 +- 10 files changed, 31 insertions(+), 23 deletions(-) diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 2098f1cb5057..3d6fb50137ee 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -121,7 +121,8 @@ typedef struct CPUClass { void (*do_interrupt)(CPUState *cpu); CPUUnassignedAccess do_unassigned_access; void (*do_unaligned_access)(CPUState *cpu, vaddr addr, - int is_write, int is_user, uintptr_t retaddr); + int is_write, int is_user, uintptr_t retaddr, + unsigned size); bool (*virtio_is_big_endian)(CPUState *cpu); int (*memory_rw_debug)(CPUState *cpu, vaddr addr, uint8_t *buf, int len, bool is_write); @@ -576,11 +577,12 @@ static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr, static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, int is_write, int is_user, - uintptr_t retaddr) + uintptr_t retaddr, unsigned size) { CPUClass *cc = CPU_GET_CLASS(cpu); - return cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr); + return cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr, + size); } #endif diff --git a/softmmu_template.h b/softmmu_template.h index 6b4e615dbf28..18548afdd8fe 100644 --- a/softmmu_template.h +++ b/softmmu_template.h @@ -183,7 +183,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); } #endif if (!VICTIM_TLB_HIT(ADDR_READ)) { @@ -218,7 +218,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, do_unaligned_access: #ifdef ALIGNED_ONLY cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); #endif addr1 = addr & ~(DATA_SIZE - 1); addr2 = addr1 + DATA_SIZE; @@ -237,7 +237,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); } #endif @@ -271,7 +271,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); } #endif if (!VICTIM_TLB_HIT(ADDR_READ)) { @@ -306,7 +306,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, do_unaligned_access: #ifdef ALIGNED_ONLY cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); #endif addr1 = addr & ~(DATA_SIZE - 1); addr2 = addr1 + DATA_SIZE; @@ -325,7 +325,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); } #endif @@ -397,7 +397,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); } #endif if (!VICTIM_TLB_HIT(addr_write)) { @@ -429,7 +429,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, do_unaligned_access: #ifdef ALIGNED_ONLY cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); #endif /* XXX: not efficient, but simple */ /* Note: relies on the fact that tlb_fill() does not remove the @@ -449,7 +449,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); } #endif @@ -478,7 +478,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); } #endif if (!VICTIM_TLB_HIT(addr_write)) { @@ -510,7 +510,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, do_unaligned_access: #ifdef ALIGNED_ONLY cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); #endif /* XXX: not efficient, but simple */ /* Note: relies on the fact that tlb_fill() does not remove the @@ -530,7 +530,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); } #endif diff --git a/target-alpha/cpu-qom.h b/target-alpha/cpu-qom.h index b01c6c82eba9..273a8ed8e6f6 100644 --- a/target-alpha/cpu-qom.h +++ b/target-alpha/cpu-qom.h @@ -86,6 +86,7 @@ hwaddr alpha_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int alpha_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int alpha_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void alpha_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - int is_write, int is_user, uintptr_t retaddr); + int is_write, int is_user, uintptr_t retaddr, + unsigned size); #endif diff --git a/target-alpha/mem_helper.c b/target-alpha/mem_helper.c index fc4f57a64478..1d4666ae6d91 100644 --- a/target-alpha/mem_helper.c +++ b/target-alpha/mem_helper.c @@ -97,7 +97,8 @@ uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v) } void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - int is_write, int is_user, uintptr_t retaddr) + int is_write, int is_user, uintptr_t retaddr, + unsigned size) { AlphaCPU *cpu = ALPHA_CPU(cs); CPUAlphaState *env = &cpu->env; diff --git a/target-mips/cpu-qom.h b/target-mips/cpu-qom.h index 2ffc1bf3f217..cbd626b7cddf 100644 --- a/target-mips/cpu-qom.h +++ b/target-mips/cpu-qom.h @@ -82,6 +82,7 @@ hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int mips_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int mips_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - int is_write, int is_user, uintptr_t retaddr); + int is_write, int is_user, uintptr_t retaddr, + unsigned size); #endif diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c index 61e4c3d59ff7..ac5d8a7c49fc 100644 --- a/target-mips/op_helper.c +++ b/target-mips/op_helper.c @@ -2455,7 +2455,7 @@ void helper_wait(CPUMIPSState *env) void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr, int access_type, int is_user, - uintptr_t retaddr) + uintptr_t retaddr, unsigned size) { MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; diff --git a/target-sparc/cpu-qom.h b/target-sparc/cpu-qom.h index 477c4d513693..4bfbb846aaa6 100644 --- a/target-sparc/cpu-qom.h +++ b/target-sparc/cpu-qom.h @@ -83,6 +83,7 @@ int sparc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int sparc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, int is_write, - int is_user, uintptr_t retaddr); + int is_user, uintptr_t retaddr, + unsigned size); #endif diff --git a/target-sparc/ldst_helper.c b/target-sparc/ldst_helper.c index 1a62e193bdb0..b0326085ead7 100644 --- a/target-sparc/ldst_helper.c +++ b/target-sparc/ldst_helper.c @@ -2420,7 +2420,8 @@ void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr, #if !defined(CONFIG_USER_ONLY) void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, int is_write, - int is_user, uintptr_t retaddr) + int is_user, uintptr_t retaddr, + unsigned size) { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; diff --git a/target-xtensa/cpu-qom.h b/target-xtensa/cpu-qom.h index 9de5c6eb9f30..e232bbfae014 100644 --- a/target-xtensa/cpu-qom.h +++ b/target-xtensa/cpu-qom.h @@ -91,6 +91,7 @@ hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int xtensa_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int xtensa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - int is_write, int is_user, uintptr_t retaddr); + int is_write, int is_user, + uintptr_t retaddr, unsigned size); #endif diff --git a/target-xtensa/op_helper.c b/target-xtensa/op_helper.c index 872e5a823b88..3d431dedd0c4 100644 --- a/target-xtensa/op_helper.c +++ b/target-xtensa/op_helper.c @@ -33,7 +33,7 @@ #include "qemu/timer.h" void xtensa_cpu_do_unaligned_access(CPUState *cs, - vaddr addr, int is_write, int is_user, uintptr_t retaddr) + vaddr addr, int is_write, int is_user, uintptr_t retaddr, unsigned size) { XtensaCPU *cpu = XTENSA_CPU(cs); CPUXtensaState *env = &cpu->env; From bc5a035cbedafb0923d9509a051e39cc4ee6b427 Mon Sep 17 00:00:00 2001 From: Yongbok Kim Date: Thu, 11 Dec 2014 11:38:51 +0000 Subject: [PATCH 3/5] target-mips: Misaligned Memory Accesses for R6 Release 6 requires misaligned memory access support for all ordinary memory access instructions (for example, LW/SW, LWC1/SWC1). However, misaligned support is not provided for certain special memory accesses such as atomics (for example, LL/SC). MIPS Architecture For Programmers Volume I-A: Introduction to the MIPS64 Architecture, Appendix B Misaligned Memory Accesses. Available at http://www.imgtec.com/mips/architectures/mips64.asp Signed-off-by: Yongbok Kim --- target-mips/cpu.h | 2 ++ target-mips/helper.c | 33 +++++++++++++++++++++++++++++++++ target-mips/op_helper.c | 13 +++++++++++++ target-mips/translate_init.c | 2 +- 4 files changed, 49 insertions(+), 1 deletion(-) diff --git a/target-mips/cpu.h b/target-mips/cpu.h index bcee18d4535d..66eac9cf88a5 100644 --- a/target-mips/cpu.h +++ b/target-mips/cpu.h @@ -803,6 +803,8 @@ int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra); hwaddr cpu_mips_translate_address (CPUMIPSState *env, target_ulong address, int rw); +bool cpu_mips_validate_access(CPUMIPSState *env, target_ulong address, + target_ulong badvaddr, unsigned data_size, int rw); #endif target_ulong exception_resume_pc (CPUMIPSState *env); diff --git a/target-mips/helper.c b/target-mips/helper.c index e24e36b0bd01..0734556976b6 100644 --- a/target-mips/helper.c +++ b/target-mips/helper.c @@ -744,6 +744,39 @@ hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int r return physical; } } + +bool cpu_mips_validate_access(CPUMIPSState *env, target_ulong address, + target_ulong badvaddr, unsigned data_size, int rw) +{ + hwaddr physical; + int prot; + int access_type = ACCESS_INT; + int ret; + target_ulong addr; + + addr = address & ~(data_size - 1); + ret = get_physical_address(env, &physical, &prot, + addr, rw, access_type); + if (ret != TLBRET_MATCH) { + raise_mmu_exception(env, badvaddr, rw, ret); + return false; + } + if (data_size > 1 + && unlikely((address & ~TARGET_PAGE_MASK) + data_size - 1 + >= TARGET_PAGE_SIZE)) { + addr += data_size; + ret = get_physical_address(env, &physical, &prot, + addr, rw, access_type); + if (ret != TLBRET_MATCH) { + if (ret != TLBRET_BADADDR) { + badvaddr = addr; + } + raise_mmu_exception(env, badvaddr, rw, ret); + return false; + } + } + return true; +} #endif static const char * const excp_names[EXCP_LAST + 1] = { diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c index ac5d8a7c49fc..343309df632e 100644 --- a/target-mips/op_helper.c +++ b/target-mips/op_helper.c @@ -2462,6 +2462,19 @@ void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr, int error_code = 0; int excp; + if (env->insn_flags & ISA_MIPS32R6) { + /* Release 6 provides support for misaligned memory access for + * all ordinary memory reference instructions + * */ + if (!cpu_mips_validate_access(env, addr, addr, size, access_type)) { + CPUState *cs = CPU(mips_env_get_cpu(env)); + do_raise_exception_err(env, cs->exception_index, + env->error_code, retaddr); + return; + } + return; + } + env->CP0_BadVAddr = addr; if (access_type == MMU_DATA_STORE) { diff --git a/target-mips/translate_init.c b/target-mips/translate_init.c index 2765dc4378af..581eb7f6e4dc 100644 --- a/target-mips/translate_init.c +++ b/target-mips/translate_init.c @@ -524,7 +524,7 @@ static const mips_def_t mips_defs[] = }, { /* A generic CPU supporting MIPS64 Release 6 ISA. - FIXME: Support IEEE 754-2008 FP and misaligned memory accesses. + FIXME: Support IEEE 754-2008 FP. Eventually this should be replaced by a real CPU model. */ .name = "MIPS64R6-generic", .CP0_PRid = 0x00010000, From fe96cf5cbfc6f93e59bf817477fa8640e573062d Mon Sep 17 00:00:00 2001 From: Yongbok Kim Date: Thu, 11 Dec 2014 12:10:14 +0000 Subject: [PATCH 4/5] target-mips: Misaligned Memory Accesses for MSA MIPS SIMD Architecture vector loads and stores require misalignment support. MSA Memory access should work as an atomic operation. Therefore, it has to check validity of all the addresses for the operation. Signed-off-by: Yongbok Kim --- target-mips/op_helper.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c index 343309df632e..61678b2ce68a 100644 --- a/target-mips/op_helper.c +++ b/target-mips/op_helper.c @@ -3831,6 +3831,24 @@ FOP_CONDN_S(sne, (float32_lt(fst1, fst0, &env->active_fpu.fp_status) /* Element-by-element access macros */ #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) +#if !defined(CONFIG_USER_ONLY) +static bool cpu_mips_validate_msa_block_access(CPUMIPSState *env, + target_ulong address, int df, int rw) +{ + int i; + for (i = 0; i < DF_ELEMENTS(df); i++) { + if (!cpu_mips_validate_access(env, address + (i << df), + address, (1 << df), rw)) { + CPUState *cs = CPU(mips_env_get_cpu(env)); + do_raise_exception_err(env, cs->exception_index, + env->error_code, GETRA()); + return false; + } + } + return true; +} +#endif + void helper_msa_ld_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs, int32_t s10) { @@ -3838,6 +3856,12 @@ void helper_msa_ld_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs, target_ulong addr = env->active_tc.gpr[rs] + (s10 << df); int i; +#if !defined(CONFIG_USER_ONLY) + if (!cpu_mips_validate_msa_block_access(env, addr, df, MMU_DATA_LOAD)) { + return; + } +#endif + switch (df) { case DF_BYTE: for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { @@ -3873,6 +3897,12 @@ void helper_msa_st_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs, target_ulong addr = env->active_tc.gpr[rs] + (s10 << df); int i; +#if !defined(CONFIG_USER_ONLY) + if (!cpu_mips_validate_msa_block_access(env, addr, df, MMU_DATA_STORE)) { + return; + } +#endif + switch (df) { case DF_BYTE: for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { From dfa9fa852b2e54ed7e907dba032f9a26e75d5fa5 Mon Sep 17 00:00:00 2001 From: Yongbok Kim Date: Thu, 27 Nov 2014 16:01:46 +0000 Subject: [PATCH 5/5] target-mips: fix to clear msacsr fexdo, fexupl and fexupr missed to clear msacsr Signed-off-by: Yongbok Kim --- target-mips/msa_helper.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/target-mips/msa_helper.c b/target-mips/msa_helper.c index b08f37f78778..fd669059df65 100644 --- a/target-mips/msa_helper.c +++ b/target-mips/msa_helper.c @@ -2650,6 +2650,8 @@ void helper_msa_fexdo_df(CPUMIPSState *env, uint32_t df, uint32_t wd, wr_t *pwt = &(env->active_fpu.fpr[wt].wr); uint32_t i; + clear_msacsr_cause(env); + switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { @@ -3201,6 +3203,8 @@ void helper_msa_fexupl_df(CPUMIPSState *env, uint32_t df, uint32_t wd, wr_t *pws = &(env->active_fpu.fpr[ws].wr); uint32_t i; + clear_msacsr_cause(env); + switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { @@ -3233,6 +3237,8 @@ void helper_msa_fexupr_df(CPUMIPSState *env, uint32_t df, uint32_t wd, wr_t *pws = &(env->active_fpu.fpr[ws].wr); uint32_t i; + clear_msacsr_cause(env); + switch (df) { case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {