Permalink
Browse files

arm64jit: Reuse code in I2R funcs.

  • Loading branch information...
unknownbrackets committed Dec 29, 2017
1 parent 2498ce5 commit 1ecce2a2e11d65c622d753f601c72fa34d1f1ce1
Showing with 36 additions and 58 deletions.
  1. +25 −47 Common/Arm64Emitter.cpp
  2. +6 −6 Common/Arm64Emitter.h
  3. +3 −3 Core/MIPS/ARM64/Arm64CompALU.cpp
  4. +2 −2 Core/MIPS/ARM64/Arm64Jit.h
View
@@ -3664,56 +3664,42 @@ void ARM64FloatEmitter::ABI_PopRegisters(uint32_t registers, uint32_t fp_registe
}
void ARM64XEmitter::ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) {
unsigned int n, imm_s, imm_r;
// It's probably okay to AND by extra bits.
if (!Is64Bit(Rn))
imm &= 0xFFFFFFFF;
if (IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32, &n, &imm_s, &imm_r)) {
AND(Rd, Rn, imm_r, imm_s, n != 0);
} else if (imm == 0) {
MOVI2R(Rd, 0);
} else {
if (!TryANDI2R(Rd, Rn, imm)) {
_assert_msg_(JIT, scratch != INVALID_REG, "ANDI2R - failed to construct logical immediate value from %08x, need scratch", (u32)imm);
MOVI2R(scratch, imm);
AND(Rd, Rn, scratch);
}
}
void ARM64XEmitter::ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) {
unsigned int n, imm_s, imm_r;
if (IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32, &n, &imm_s, &imm_r)) {
ORR(Rd, Rn, imm_r, imm_s, n != 0);
} else if (imm == 0) {
if (Rd != Rn) {
MOV(Rd, Rn);
}
} else {
_assert_msg_(JIT, Is64Bit(Rn) || (imm & 0xFFFFFFFF00000000UL) == 0, "ORRI2R - more bits in imm than Rn");
if (!TryORRI2R(Rd, Rn, imm)) {
_assert_msg_(JIT, scratch != INVALID_REG, "ORRI2R - failed to construct logical immediate value from %08x, need scratch", (u32)imm);
MOVI2R(scratch, imm);
ORR(Rd, Rn, scratch);
}
}
void ARM64XEmitter::EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) {
unsigned int n, imm_s, imm_r;
if (IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32, &n, &imm_s, &imm_r)) {
EOR(Rd, Rn, imm_r, imm_s, n != 0);
} else if (imm == 0) {
if (Rd != Rn) {
MOV(Rd, Rn);
}
} else {
_assert_msg_(JIT, Is64Bit(Rn) || (imm & 0xFFFFFFFF00000000UL) == 0, "EORI2R - more bits in imm than Rn");
if (!TryEORI2R(Rd, Rn, imm)) {
_assert_msg_(JIT, scratch != INVALID_REG, "EORI2R - failed to construct logical immediate value from %08x, need scratch", (u32)imm);
MOVI2R(scratch, imm);
EOR(Rd, Rn, scratch);
}
}
void ARM64XEmitter::ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) {
if (!Is64Bit(Rn))
imm &= 0xFFFFFFFF;
unsigned int n, imm_s, imm_r;
if (IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32, &n, &imm_s, &imm_r)) {
ANDS(Rd, Rn, imm_r, imm_s, n != 0);
} else if (imm == 0) {
ANDS(Rd, Rn, Is64Bit(Rn) ? ZR : WZR, ArithOption(Rd, ST_LSL, 0));
ANDS(Rd, Rn, Is64Bit(Rn) ? ZR : WZR);
} else {
_assert_msg_(JIT, scratch != INVALID_REG, "ANDSI2R - failed to construct logical immediate value from %08x, need scratch", (u32)imm);
MOVI2R(scratch, imm);
@@ -3722,42 +3708,30 @@ void ARM64XEmitter::ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch)
}
void ARM64XEmitter::ADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) {
u32 val;
bool shift;
if (IsImmArithmetic(imm, &val, &shift)) {
ADD(Rd, Rn, val, shift);
} else {
if (!TryADDI2R(Rd, Rn, imm)) {
_assert_msg_(JIT, scratch != INVALID_REG, "ADDI2R - failed to construct arithmetic immediate value from %08x, need scratch", (u32)imm);
MOVI2R(scratch, imm);
ADD(Rd, Rn, scratch);
}
}
void ARM64XEmitter::SUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) {
u32 val;
bool shift;
if (IsImmArithmetic(imm, &val, &shift)) {
SUB(Rd, Rn, val, shift);
} else {
if (!TrySUBI2R(Rd, Rn, imm)) {
_assert_msg_(JIT, scratch != INVALID_REG, "SUBI2R - failed to construct arithmetic immediate value from %08x, need scratch", (u32)imm);
MOVI2R(scratch, imm);
SUB(Rd, Rn, scratch);
}
}
void ARM64XEmitter::CMPI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch) {
u32 val;
bool shift;
if (IsImmArithmetic(imm, &val, &shift)) {
CMP(Rn, val, shift);
} else {
if (!TryCMPI2R(Rn, imm)) {
_assert_msg_(JIT, scratch != INVALID_REG, "CMPI2R - failed to construct arithmetic immediate value from %08x, need scratch", (u32)imm);
MOVI2R(scratch, imm);
CMP(Rn, scratch);
}
}
bool ARM64XEmitter::TryADDI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm) {
bool ARM64XEmitter::TryADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) {
u32 val;
bool shift;
if (IsImmArithmetic(imm, &val, &shift)) {
@@ -3768,7 +3742,7 @@ bool ARM64XEmitter::TryADDI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm) {
}
}
bool ARM64XEmitter::TrySUBI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm) {
bool ARM64XEmitter::TrySUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) {
u32 val;
bool shift;
if (IsImmArithmetic(imm, &val, &shift)) {
@@ -3779,7 +3753,7 @@ bool ARM64XEmitter::TrySUBI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm) {
}
}
bool ARM64XEmitter::TryCMPI2R(ARM64Reg Rn, u32 imm) {
bool ARM64XEmitter::TryCMPI2R(ARM64Reg Rn, u64 imm) {
u32 val;
bool shift;
if (IsImmArithmetic(imm, &val, &shift)) {
@@ -3790,9 +3764,11 @@ bool ARM64XEmitter::TryCMPI2R(ARM64Reg Rn, u32 imm) {
}
}
bool ARM64XEmitter::TryANDI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm) {
bool ARM64XEmitter::TryANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) {
if (!Is64Bit(Rn))
imm &= 0xFFFFFFFF;
u32 n, imm_r, imm_s;
if (IsImmLogical(imm, 32, &n, &imm_s, &imm_r)) {
if (IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32, &n, &imm_s, &imm_r)) {
AND(Rd, Rn, imm_r, imm_s, n != 0);
return true;
} else if (imm == 0) {
@@ -3802,9 +3778,10 @@ bool ARM64XEmitter::TryANDI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm) {
return false;
}
}
bool ARM64XEmitter::TryORRI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm) {
bool ARM64XEmitter::TryORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) {
_assert_msg_(JIT, Is64Bit(Rn) || (imm & 0xFFFFFFFF00000000UL) == 0, "TryORRI2R - more bits in imm than Rn");
u32 n, imm_r, imm_s;
if (IsImmLogical(imm, 32, &n, &imm_s, &imm_r)) {
if (IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32, &n, &imm_s, &imm_r)) {
ORR(Rd, Rn, imm_r, imm_s, n != 0);
return true;
} else if (imm == 0) {
@@ -3816,9 +3793,10 @@ bool ARM64XEmitter::TryORRI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm) {
return false;
}
}
bool ARM64XEmitter::TryEORI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm) {
bool ARM64XEmitter::TryEORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) {
_assert_msg_(JIT, Is64Bit(Rn) || (imm & 0xFFFFFFFF00000000UL) == 0, "TryEORI2R - more bits in imm than Rn");
u32 n, imm_r, imm_s;
if (IsImmLogical(imm, 32, &n, &imm_s, &imm_r)) {
if (IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32, &n, &imm_s, &imm_r)) {
EOR(Rd, Rn, imm_r, imm_s, n != 0);
return true;
} else if (imm == 0) {
View
@@ -714,13 +714,13 @@ class ARM64XEmitter
void SUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG);
void SUBSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG);
bool TryADDI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm);
bool TrySUBI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm);
bool TryCMPI2R(ARM64Reg Rn, u32 imm);
bool TryADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm);
bool TrySUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm);
bool TryCMPI2R(ARM64Reg Rn, u64 imm);
bool TryANDI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm);
bool TryORRI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm);
bool TryEORI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm);
bool TryANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm);
bool TryORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm);
bool TryEORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm);
// Pseudo-instruction for convenience. PUSH pushes 16 bytes even though we only push a single register.
// This is so the stack pointer is always 16-byte aligned, which is checked by hardware!
@@ -57,7 +57,7 @@ static u32 EvalAnd(u32 a, u32 b) { return a & b; }
static u32 EvalAdd(u32 a, u32 b) { return a + b; }
static u32 EvalSub(u32 a, u32 b) { return a - b; }
void Arm64Jit::CompImmLogic(MIPSGPReg rs, MIPSGPReg rt, u32 uimm, void (ARM64XEmitter::*arith)(ARM64Reg dst, ARM64Reg src, ARM64Reg src2), bool (ARM64XEmitter::*tryArithI2R)(ARM64Reg dst, ARM64Reg src, u32 val), u32 (*eval)(u32 a, u32 b)) {
void Arm64Jit::CompImmLogic(MIPSGPReg rs, MIPSGPReg rt, u32 uimm, void (ARM64XEmitter::*arith)(ARM64Reg dst, ARM64Reg src, ARM64Reg src2), bool (ARM64XEmitter::*tryArithI2R)(ARM64Reg dst, ARM64Reg src, u64 val), u32 (*eval)(u32 a, u32 b)) {
if (gpr.IsImm(rs)) {
gpr.SetImm(rt, (*eval)(gpr.GetImm(rs), uimm));
} else {
@@ -119,7 +119,7 @@ void Arm64Jit::Comp_IType(MIPSOpcode op) {
break;
}
gpr.MapDirtyIn(rt, rs);
if (!TryCMPI2R(gpr.R(rs), simm)) {
if (!TryCMPI2R(gpr.R(rs), (u32)simm)) {
gpr.SetRegImm(SCRATCH1, simm);
CMP(gpr.R(rs), SCRATCH1);
}
@@ -196,7 +196,7 @@ void Arm64Jit::Comp_RType2(MIPSOpcode op) {
}
}
void Arm64Jit::CompType3(MIPSGPReg rd, MIPSGPReg rs, MIPSGPReg rt, void (ARM64XEmitter::*arith)(ARM64Reg dst, ARM64Reg rm, ARM64Reg rn), bool (ARM64XEmitter::*tryArithI2R)(ARM64Reg dst, ARM64Reg rm, u32 val), u32(*eval)(u32 a, u32 b), bool symmetric) {
void Arm64Jit::CompType3(MIPSGPReg rd, MIPSGPReg rs, MIPSGPReg rt, void (ARM64XEmitter::*arith)(ARM64Reg dst, ARM64Reg rm, ARM64Reg rn), bool (ARM64XEmitter::*tryArithI2R)(ARM64Reg dst, ARM64Reg rm, u64 val), u32(*eval)(u32 a, u32 b), bool symmetric) {
if (gpr.IsImm(rs) && gpr.IsImm(rt)) {
gpr.SetImm(rd, (*eval)(gpr.GetImm(rs), gpr.GetImm(rt)));
return;
@@ -220,8 +220,8 @@ class Arm64Jit : public Arm64Gen::ARM64CodeBlock, public JitInterface, public MI
void BranchRSRTComp(MIPSOpcode op, CCFlags cc, bool likely);
// Utilities to reduce duplicated code
void CompImmLogic(MIPSGPReg rs, MIPSGPReg rt, u32 uimm, void (ARM64XEmitter::*arith)(Arm64Gen::ARM64Reg dst, Arm64Gen::ARM64Reg src, Arm64Gen::ARM64Reg src2), bool (ARM64XEmitter::*tryArithI2R)(Arm64Gen::ARM64Reg dst, Arm64Gen::ARM64Reg src, u32 val), u32 (*eval)(u32 a, u32 b));
void CompType3(MIPSGPReg rd, MIPSGPReg rs, MIPSGPReg rt, void (ARM64XEmitter::*arithOp2)(Arm64Gen::ARM64Reg dst, Arm64Gen::ARM64Reg rm, Arm64Gen::ARM64Reg rn), bool (ARM64XEmitter::*tryArithI2R)(Arm64Gen::ARM64Reg dst, Arm64Gen::ARM64Reg rm, u32 val), u32 (*eval)(u32 a, u32 b), bool symmetric = false);
void CompImmLogic(MIPSGPReg rs, MIPSGPReg rt, u32 uimm, void (ARM64XEmitter::*arith)(Arm64Gen::ARM64Reg dst, Arm64Gen::ARM64Reg src, Arm64Gen::ARM64Reg src2), bool (ARM64XEmitter::*tryArithI2R)(Arm64Gen::ARM64Reg dst, Arm64Gen::ARM64Reg src, u64 val), u32 (*eval)(u32 a, u32 b));
void CompType3(MIPSGPReg rd, MIPSGPReg rs, MIPSGPReg rt, void (ARM64XEmitter::*arithOp2)(Arm64Gen::ARM64Reg dst, Arm64Gen::ARM64Reg rm, Arm64Gen::ARM64Reg rn), bool (ARM64XEmitter::*tryArithI2R)(Arm64Gen::ARM64Reg dst, Arm64Gen::ARM64Reg rm, u64 val), u32 (*eval)(u32 a, u32 b), bool symmetric = false);
void CompShiftImm(MIPSOpcode op, Arm64Gen::ShiftType shiftType, int sa);
void CompShiftVar(MIPSOpcode op, Arm64Gen::ShiftType shiftType);
void CompVrotShuffle(u8 *dregs, int imm, VectorSize sz, bool negSin);

0 comments on commit 1ecce2a

Please sign in to comment.