Permalink
Browse files

fixed carry flag calculation in negc op

added more tests for negc
sanitize bits stored in SR just as they are in FPSCR
removed unneeded disp_pc struct in sh4_instr union
emits calls to invalid_instr callback for INVALID ops
  • Loading branch information...
inolen committed May 16, 2017
1 parent c13a579 commit fe43c2415af73145315b76ddf02060d301fd2acc
View
@@ -13,8 +13,13 @@ static struct sh4_interrupt_info sh4_interrupts[NUM_SH_INTERRUPTS] = {
void sh4_intc_update_pending(struct sh4 *sh4) {
int min_priority = (sh4->ctx.sr & I_MASK) >> I_BIT;
uint64_t priority_mask =
(sh4->ctx.sr & BL_MASK) ? 0 : ~sh4->priority_mask[min_priority];
uint64_t priority_mask = ~sh4->priority_mask[min_priority];
/* mask all interrupts if interrupt block bit is set */
if (sh4->ctx.sr & BL_MASK) {
priority_mask = 0;
}
sh4->ctx.pending_interrupts = sh4->requested_interrupts & priority_mask;
}
@@ -49,8 +54,11 @@ void sh4_intc_reprioritize(struct sh4 *sh4) {
uint64_t old = sh4->requested_interrupts;
sh4->requested_interrupts = 0;
for (int level = 0, n = 0; level < 16; level++) {
/* for even priorities, give precedence to lower id interrupts */
int n = 0;
for (int level = 0; level < 16; level++) {
/* iterate backwards, giving priority to lower id interrupts when the
priorities are equal */
for (int i = NUM_SH_INTERRUPTS - 1; i >= 0; i--) {
struct sh4_interrupt_info *int_info = &sh4_interrupts[i];
@@ -34,6 +34,10 @@
#define RB_MASK (1u << RB_BIT)
#define MD_MASK (1u << MD_BIT)
#define SR_MASK \
(MD_MASK | RB_MASK | BL_MASK | FD_MASK | M_MASK | Q_MASK | I_MASK | S_MASK | \
T_MASK)
/* FPSCR bits */
/* denormalization mode */
@@ -46,11 +50,18 @@
#define FR_BIT 21
#define RM_MASK 0x3
#define FLAG_MASK 0x7c
#define ENABLE_MASK 0xf80
#define CAUSE_MASK 0x3f000
#define DN_MASK (1u << DN_BIT)
#define PR_MASK (1u << PR_BIT)
#define SZ_MASK (1u << SZ_BIT)
#define FR_MASK (1u << FR_BIT)
#define FPSCR_MASK \
(RM_MASK | FLAG_MASK | ENABLE_MASK | CAUSE_MASK | DN_MASK | PR_MASK | \
SZ_MASK | FR_MASK)
struct sh4_ctx {
/* there are 24 32-bit general registers, r0_bank0-r7_bank0, r0_bank1-r7_bank1
and r8-r15. r contains the active bank's r0-r7 as well as r8-r15. ralt
@@ -47,12 +47,6 @@ union sh4_instr {
uint32_t : 4;
} imm;
struct {
uint32_t disp : 8;
uint32_t rn : 4;
uint32_t : 4;
} disp_pc;
struct {
uint32_t disp : 8;
uint32_t : 8;
@@ -15,19 +15,19 @@ static uint32_t load_sr(struct sh4_ctx *ctx) {
static void store_sr(struct sh4_guest *guest, struct sh4_ctx *ctx,
uint32_t new_sr) {
uint32_t old_sr = load_sr(ctx);
ctx->sr = new_sr;
ctx->sr = new_sr & SR_MASK;
sh4_explode_sr(ctx);
guest->sr_updated(guest->data, old_sr);
}
static uint32_t load_fpscr(struct sh4_ctx *ctx) {
return ctx->fpscr & 0x003fffff;
return ctx->fpscr;
}
static void store_fpscr(struct sh4_guest *guest, struct sh4_ctx *ctx,
uint32_t new_fpscr) {
uint32_t old_fpscr = load_fpscr(ctx);
ctx->fpscr = new_fpscr & 0x003fffff;
ctx->fpscr = new_fpscr & FPSCR_MASK;
guest->fpscr_updated(guest->data, old_fpscr);
}
@@ -109,7 +109,7 @@ typedef int32_t int128_t[4];
#define LOAD_T_I32() (CTX->sr_t)
#define STORE_T_I8(v) (CTX->sr_t = v)
#define STORE_T_I32(v) STORE_T_I8(v);
#define STORE_T_I32(v) STORE_T_I8(v)
#define STORE_T_IMM_I32(v) STORE_T_I8(v)
#define LOAD_S_I32() (CTX->sr_s)
@@ -379,6 +379,8 @@ typedef int32_t int128_t[4];
#define BRANCH_TRUE_IMM_I32(c, d) if (c) { CTX->pc = d; return; }
#define BRANCH_FALSE_IMM_I32(c, d) if (!c) { CTX->pc = d; return; }
#define INVALID_INSTR() guest->invalid_instr(guest->data, addr)
#define PREF_SQ_COND(c, addr) if (c) { guest->sq_prefetch(guest->data, addr); }
/* clang-format on */
@@ -1,6 +1,5 @@
INSTR(INVALID) {
__asm__("int $3");
LOG_FATAL("INVALID unsupported");
INVALID_INSTR();
}
/* MOV #imm,Rn */
@@ -12,18 +11,17 @@ INSTR(MOVI) {
/* MOV.W @(disp,PC),Rn */
INSTR(MOVWLPC) {
uint32_t ea = (i.disp_pc.disp * 2) + addr + 4;
I32 v = LOAD_IMM_I16(ea);
v = SEXT_I16_I32(v);
STORE_GPR_I32(i.disp_pc.rn, v);
uint32_t ea = (i.imm.imm * 2) + addr + 4;
I32 v = SEXT_I16_I32(LOAD_IMM_I16(ea));
STORE_GPR_I32(i.imm.rn, v);
NEXT_INSTR();
}
/* MOV.L @(disp,PC),Rn */
INSTR(MOVLLPC) {
uint32_t ea = (i.disp_pc.disp * 4) + (addr & ~3) + 4;
uint32_t ea = (i.imm.imm * 4) + (addr & ~3) + 4;
I32 v = LOAD_IMM_I32(ea);
STORE_GPR_I32(i.disp_pc.rn, v);
STORE_GPR_I32(i.imm.rn, v);
NEXT_INSTR();
}
@@ -699,8 +697,11 @@ INSTR(NEGC) {
I32 rm = LOAD_GPR_I32(i.def.rm);
I32 v = SUB_I32(NEG_I32(rm), t);
STORE_GPR_I32(i.def.rn, v);
I32 c = OR_I32(t, rm);
STORE_T_I32(c);
/* compute carry flag, taken from Hacker's Delight */
I32 carry = LSHR_IMM_I32(OR_I32(rm, v), 31);
STORE_T_I32(carry);
NEXT_INSTR();
}
@@ -42,19 +42,20 @@ static struct ir_value *ir_load_sr(struct ir *ir) {
}
static void ir_store_sr(struct sh4_guest *guest, struct ir *ir,
struct ir_value *sr) {
CHECK_EQ(sr->type, VALUE_I32);
struct ir_value *v) {
CHECK_EQ(v->type, VALUE_I32);
v = ir_and(ir, v, ir_alloc_i32(ir, SR_MASK));
struct ir_value *sr_updated = ir_alloc_ptr(ir, guest->sr_updated);
struct ir_value *data = ir_alloc_ptr(ir, guest->data);
struct ir_value *old_sr = ir_load_sr(ir);
ir_store_context(ir, offsetof(struct sh4_ctx, sr), sr);
ir_store_context(ir, offsetof(struct sh4_ctx, sr), v);
/* inline version of sh4_explode_sr */
struct ir_value *sr_t = ir_and(ir, sr, ir_alloc_i32(ir, T_MASK));
struct ir_value *sr_t = ir_and(ir, v, ir_alloc_i32(ir, T_MASK));
struct ir_value *sr_s =
ir_lshri(ir, ir_and(ir, sr, ir_alloc_i32(ir, S_MASK)), S_BIT);
ir_lshri(ir, ir_and(ir, v, ir_alloc_i32(ir, S_MASK)), S_BIT);
ir_store_context(ir, offsetof(struct sh4_ctx, sr_t), sr_t);
ir_store_context(ir, offsetof(struct sh4_ctx, sr_s), sr_s);
@@ -67,13 +68,13 @@ static void ir_store_sr(struct sh4_guest *guest, struct ir *ir,
static struct ir_value *ir_load_fpscr(struct ir *ir) {
struct ir_value *fpscr =
ir_load_context(ir, offsetof(struct sh4_ctx, fpscr), VALUE_I32);
return ir_and(ir, fpscr, ir_alloc_i32(ir, 0x003fffff));
return fpscr;
}
static void ir_store_fpscr(struct sh4_guest *guest, struct ir *ir,
struct ir_value *v) {
CHECK_EQ(v->type, VALUE_I32);
v = ir_and(ir, v, ir_alloc_i32(ir, 0x003fffff));
v = ir_and(ir, v, ir_alloc_i32(ir, FPSCR_MASK));
struct ir_value *fpscr_updated = ir_alloc_ptr(ir, guest->fpscr_updated);
struct ir_value *data = ir_alloc_ptr(ir, guest->data);
@@ -423,6 +424,12 @@ static void ir_store_fpscr(struct sh4_guest *guest, struct ir *ir,
#define BRANCH_TRUE_IMM_I32(c, d) ir_branch_true(ir, c, ir_alloc_i32(ir, d))
#define BRANCH_FALSE_IMM_I32(c, d) ir_branch_false(ir, c, ir_alloc_i32(ir, d))
#define INVALID_INSTR() { \
struct ir_value *invalid_instr = ir_alloc_i64(ir, (uint64_t)guest->invalid_instr); \
struct ir_value *data = ir_alloc_i64(ir, (uint64_t)guest->data); \
ir_call_2(ir, invalid_instr, data, ir_alloc_i32(ir, addr)); \
}
#define PREF_SQ_COND(c, addr) { \
struct ir_value *sq_prefetch = ir_alloc_i64(ir, (uint64_t)guest->sq_prefetch); \
struct ir_value *data = ir_alloc_i64(ir, (uint64_t)guest->data); \
View
@@ -1,17 +1,65 @@
test_negc_nocarry:
test_negc_zero_nocarry:
# REGISTER_IN r0 0x700000f0
# REGISTER_IN r1 -4
# REGISTER_IN r1 0x0
ldc r0, SR
negc r1, r2
stc SR, r0
rts
nop
# REGISTER_OUT r2 4
# REGISTER_OUT r0 0x700000f0
# REGISTER_OUT r2 0x0
test_negc_carry:
test_negc_zero_carry:
# REGISTER_IN r0 0x700000f1
# REGISTER_IN r1 -4
# REGISTER_IN r1 0x0
ldc r0, SR
negc r1, r2
stc SR, r0
rts
nop
# REGISTER_OUT r2 3
# REGISTER_OUT r0 0x700000f1
# REGISTER_OUT r2 0xffffffff
test_negc_neg_nocarry:
# REGISTER_IN r0 0x700000f0
# REGISTER_IN r1 0x80000000
ldc r0, SR
negc r1, r2
stc SR, r0
rts
nop
# REGISTER_OUT r0 0x700000f1
# REGISTER_OUT r2 0x80000000
test_negc_neg_carry:
# REGISTER_IN r0 0x700000f1
# REGISTER_IN r1 0x80000000
ldc r0, SR
negc r1, r2
stc SR, r0
rts
nop
# REGISTER_OUT r0 0x700000f1
# REGISTER_OUT r2 0x7fffffff
test_negc_pos_nocarry:
# REGISTER_IN r0 0x700000f0
# REGISTER_IN r1 0x7fffffff
ldc r0, SR
negc r1, r2
stc SR, r0
rts
nop
# REGISTER_OUT r0 0x700000f1
# REGISTER_OUT r2 0x80000001
test_negc_pos_carry:
# REGISTER_IN r0 0x700000f1
# REGISTER_IN r1 0x7fffffff
ldc r0, SR
negc r1, r2
stc SR, r0
rts
nop
# REGISTER_OUT r0 0x700000f1
# REGISTER_OUT r2 0x80000000
Oops, something went wrong.

0 comments on commit fe43c24

Please sign in to comment.