Skip to content

Commit

Permalink
Remove pow() splitting and cleanup backends.
Browse files Browse the repository at this point in the history
  • Loading branch information
Mike Pall committed May 23, 2020
1 parent 5655be4 commit b2307c8
Show file tree
Hide file tree
Showing 14 changed files with 95 additions and 222 deletions.
3 changes: 0 additions & 3 deletions src/lj_arch.h
Expand Up @@ -586,9 +586,6 @@
#if defined(__ANDROID__) || defined(__symbian__) || LJ_TARGET_XBOX360 || LJ_TARGET_WINDOWS
#define LUAJIT_NO_LOG2
#endif
#if defined(__symbian__) || LJ_TARGET_WINDOWS
#define LUAJIT_NO_EXP2
#endif
#if LJ_TARGET_CONSOLE || (LJ_TARGET_IOS && __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_8_0)
#define LJ_NO_SYSTEM 1
#endif
Expand Down
106 changes: 68 additions & 38 deletions src/lj_asm.c
Expand Up @@ -1308,32 +1308,6 @@ static void asm_call(ASMState *as, IRIns *ir)
asm_gencall(as, ci, args);
}

#if !LJ_SOFTFP32
static void asm_fppow(ASMState *as, IRIns *ir, IRRef lref, IRRef rref)
{
const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow];
IRRef args[2];
args[0] = lref;
args[1] = rref;
asm_setupresult(as, ir, ci);
asm_gencall(as, ci, args);
}

static int asm_fpjoin_pow(ASMState *as, IRIns *ir)
{
IRIns *irp = IR(ir->op1);
if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
IRIns *irpp = IR(irp->op1);
if (irpp == ir-2 && irpp->o == IR_FPMATH &&
irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
asm_fppow(as, ir, irpp->op1, irp->op2);
return 1;
}
}
return 0;
}
#endif

/* -- PHI and loop handling ----------------------------------------------- */

/* Break a PHI cycle by renaming to a free register (evict if needed). */
Expand Down Expand Up @@ -1604,6 +1578,62 @@ static void asm_loop(ASMState *as)
#error "Missing assembler for target CPU"
#endif

/* -- Common instruction helpers ------------------------------------------ */

#if !LJ_SOFTFP32
#if !LJ_TARGET_X86ORX64
#define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp)
#define asm_fppowi(as, ir) asm_callid(as, ir, IRCALL_lj_vm_powi)
#endif

static void asm_pow(ASMState *as, IRIns *ir)
{
#if LJ_64 && LJ_HASFFI
if (!irt_isnum(ir->t))
asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
IRCALL_lj_carith_powu64);
else
#endif
if (irt_isnum(IR(ir->op2)->t))
asm_callid(as, ir, IRCALL_pow);
else
asm_fppowi(as, ir);
}

static void asm_div(ASMState *as, IRIns *ir)
{
#if LJ_64 && LJ_HASFFI
if (!irt_isnum(ir->t))
asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
IRCALL_lj_carith_divu64);
else
#endif
asm_fpdiv(as, ir);
}
#endif

static void asm_mod(ASMState *as, IRIns *ir)
{
#if LJ_64 && LJ_HASFFI
if (!irt_isint(ir->t))
asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
IRCALL_lj_carith_modu64);
else
#endif
asm_callid(as, ir, IRCALL_lj_vm_modi);
}

static void asm_fuseequal(ASMState *as, IRIns *ir)
{
/* Fuse HREF + EQ/NE. */
if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
as->curins--;
asm_href(as, ir-1, (IROp)ir->o);
} else {
asm_equal(as, ir);
}
}

/* -- Instruction dispatch ------------------------------------------------ */

/* Assemble a single instruction. */
Expand All @@ -1626,14 +1656,7 @@ static void asm_ir(ASMState *as, IRIns *ir)
case IR_ABC:
asm_comp(as, ir);
break;
case IR_EQ: case IR_NE:
if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
as->curins--;
asm_href(as, ir-1, (IROp)ir->o);
} else {
asm_equal(as, ir);
}
break;
case IR_EQ: case IR_NE: asm_fuseequal(as, ir); break;

case IR_RETF: asm_retf(as, ir); break;

Expand Down Expand Up @@ -1702,7 +1725,13 @@ static void asm_ir(ASMState *as, IRIns *ir)
case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
case IR_TNEW: asm_tnew(as, ir); break;
case IR_TDUP: asm_tdup(as, ir); break;
case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
case IR_CNEW: case IR_CNEWI:
#if LJ_HASFFI
asm_cnew(as, ir);
#else
lua_assert(0);
#endif
break;

/* Buffer operations. */
case IR_BUFHDR: asm_bufhdr(as, ir); break;
Expand Down Expand Up @@ -2167,6 +2196,10 @@ static void asm_setup_regsp(ASMState *as)
if (inloop)
as->modset |= RSET_SCRATCH;
#if LJ_TARGET_X86
if (irt_isnum(IR(ir->op2)->t)) {
if (as->evenspill < 4) /* Leave room to call pow(). */
as->evenspill = 4;
}
break;
#else
ir->prev = REGSP_HINT(RID_FPRET);
Expand All @@ -2192,9 +2225,6 @@ static void asm_setup_regsp(ASMState *as)
continue;
}
break;
} else if (ir->op2 == IRFPM_EXP2 && !LJ_64) {
if (as->evenspill < 4) /* Leave room to call pow(). */
as->evenspill = 4;
}
#endif
if (inloop)
Expand Down
10 changes: 1 addition & 9 deletions src/lj_asm_arm.h
Expand Up @@ -1268,8 +1268,6 @@ static void asm_cnew(ASMState *as, IRIns *ir)
ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
ra_releasetmp(as, ASMREF_TMP1));
}
#else
#define asm_cnew(as, ir) ((void)0)
#endif

/* -- Write barriers ------------------------------------------------------ */
Expand Down Expand Up @@ -1364,8 +1362,6 @@ static void asm_callround(ASMState *as, IRIns *ir, int id)

static void asm_fpmath(ASMState *as, IRIns *ir)
{
if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
return;
if (ir->op2 <= IRFPM_TRUNC)
asm_callround(as, ir, ir->op2);
else if (ir->op2 == IRFPM_SQRT)
Expand Down Expand Up @@ -1507,14 +1503,10 @@ static void asm_mul(ASMState *as, IRIns *ir)
#define asm_mulov(as, ir) asm_mul(as, ir)

#if !LJ_SOFTFP
#define asm_div(as, ir) asm_fparith(as, ir, ARMI_VDIV_D)
#define asm_pow(as, ir) asm_callid(as, ir, IRCALL_lj_vm_powi)
#define asm_fpdiv(as, ir) asm_fparith(as, ir, ARMI_VDIV_D)
#define asm_abs(as, ir) asm_fpunary(as, ir, ARMI_VABS_D)
#define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp)
#endif

#define asm_mod(as, ir) asm_callid(as, ir, IRCALL_lj_vm_modi)

static void asm_neg(ASMState *as, IRIns *ir)
{
#if !LJ_SOFTFP
Expand Down
39 changes: 1 addition & 38 deletions src/lj_asm_arm64.h
Expand Up @@ -1242,8 +1242,6 @@ static void asm_cnew(ASMState *as, IRIns *ir)
ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
ra_releasetmp(as, ASMREF_TMP1));
}
#else
#define asm_cnew(as, ir) ((void)0)
#endif

/* -- Write barriers ------------------------------------------------------ */
Expand Down Expand Up @@ -1320,8 +1318,6 @@ static void asm_fpmath(ASMState *as, IRIns *ir)
} else if (fpm <= IRFPM_TRUNC) {
asm_fpunary(as, ir, fpm == IRFPM_FLOOR ? A64I_FRINTMd :
fpm == IRFPM_CEIL ? A64I_FRINTPd : A64I_FRINTZd);
} else if (fpm == IRFPM_EXP2 && asm_fpjoin_pow(as, ir)) {
return;
} else {
asm_callid(as, ir, IRCALL_lj_vm_floor + fpm);
}
Expand Down Expand Up @@ -1428,45 +1424,12 @@ static void asm_mul(ASMState *as, IRIns *ir)
asm_intmul(as, ir);
}

static void asm_div(ASMState *as, IRIns *ir)
{
#if LJ_HASFFI
if (!irt_isnum(ir->t))
asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
IRCALL_lj_carith_divu64);
else
#endif
asm_fparith(as, ir, A64I_FDIVd);
}

static void asm_pow(ASMState *as, IRIns *ir)
{
#if LJ_HASFFI
if (!irt_isnum(ir->t))
asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
IRCALL_lj_carith_powu64);
else
#endif
asm_callid(as, ir, IRCALL_lj_vm_powi);
}

#define asm_addov(as, ir) asm_add(as, ir)
#define asm_subov(as, ir) asm_sub(as, ir)
#define asm_mulov(as, ir) asm_mul(as, ir)

#define asm_fpdiv(as, ir) asm_fparith(as, ir, A64I_FDIVd)
#define asm_abs(as, ir) asm_fpunary(as, ir, A64I_FABS)
#define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp)

static void asm_mod(ASMState *as, IRIns *ir)
{
#if LJ_HASFFI
if (!irt_isint(ir->t))
asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
IRCALL_lj_carith_modu64);
else
#endif
asm_callid(as, ir, IRCALL_lj_vm_modi);
}

static void asm_neg(ASMState *as, IRIns *ir)
{
Expand Down
38 changes: 2 additions & 36 deletions src/lj_asm_mips.h
Expand Up @@ -1607,8 +1607,6 @@ static void asm_cnew(ASMState *as, IRIns *ir)
ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
ra_releasetmp(as, ASMREF_TMP1));
}
#else
#define asm_cnew(as, ir) ((void)0)
#endif

/* -- Write barriers ------------------------------------------------------ */
Expand Down Expand Up @@ -1677,8 +1675,6 @@ static void asm_fpunary(ASMState *as, IRIns *ir, MIPSIns mi)
#if !LJ_SOFTFP32
static void asm_fpmath(ASMState *as, IRIns *ir)
{
if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
return;
#if !LJ_SOFTFP
if (ir->op2 <= IRFPM_TRUNC)
asm_callround(as, ir, IRCALL_lj_vm_floor + ir->op2);
Expand Down Expand Up @@ -1766,41 +1762,13 @@ static void asm_mul(ASMState *as, IRIns *ir)
}
}

static void asm_mod(ASMState *as, IRIns *ir)
{
#if LJ_64 && LJ_HASFFI
if (!irt_isint(ir->t))
asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
IRCALL_lj_carith_modu64);
else
#endif
asm_callid(as, ir, IRCALL_lj_vm_modi);
}

#if !LJ_SOFTFP32
static void asm_pow(ASMState *as, IRIns *ir)
{
#if LJ_64 && LJ_HASFFI
if (!irt_isnum(ir->t))
asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
IRCALL_lj_carith_powu64);
else
#endif
asm_callid(as, ir, IRCALL_lj_vm_powi);
}

static void asm_div(ASMState *as, IRIns *ir)
static void asm_fpdiv(ASMState *as, IRIns *ir)
{
#if LJ_64 && LJ_HASFFI
if (!irt_isnum(ir->t))
asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
IRCALL_lj_carith_divu64);
else
#endif
#if !LJ_SOFTFP
asm_fparith(as, ir, MIPSI_DIV_D);
#else
asm_callid(as, ir, IRCALL_softfp_div);
asm_callid(as, ir, IRCALL_softfp_div);
#endif
}
#endif
Expand Down Expand Up @@ -1838,8 +1806,6 @@ static void asm_abs(ASMState *as, IRIns *ir)
}
#endif

#define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp)

static void asm_arithov(ASMState *as, IRIns *ir)
{
/* TODO MIPSR6: bovc/bnvc. Caveat: no delay slot to load RID_TMP. */
Expand Down
9 changes: 1 addition & 8 deletions src/lj_asm_ppc.h
Expand Up @@ -1174,8 +1174,6 @@ static void asm_cnew(ASMState *as, IRIns *ir)
ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
ra_releasetmp(as, ASMREF_TMP1));
}
#else
#define asm_cnew(as, ir) ((void)0)
#endif

/* -- Write barriers ------------------------------------------------------ */
Expand Down Expand Up @@ -1246,8 +1244,6 @@ static void asm_fpunary(ASMState *as, IRIns *ir, PPCIns pi)

static void asm_fpmath(ASMState *as, IRIns *ir)
{
if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
return;
if (ir->op2 == IRFPM_SQRT && (as->flags & JIT_F_SQRT))
asm_fpunary(as, ir, PPCI_FSQRT);
else
Expand Down Expand Up @@ -1361,9 +1357,7 @@ static void asm_mul(ASMState *as, IRIns *ir)
}
}

#define asm_div(as, ir) asm_fparith(as, ir, PPCI_FDIV)
#define asm_mod(as, ir) asm_callid(as, ir, IRCALL_lj_vm_modi)
#define asm_pow(as, ir) asm_callid(as, ir, IRCALL_lj_vm_powi)
#define asm_fpdiv(as, ir) asm_fparith(as, ir, PPCI_FDIV)

static void asm_neg(ASMState *as, IRIns *ir)
{
Expand All @@ -1387,7 +1381,6 @@ static void asm_neg(ASMState *as, IRIns *ir)
}

#define asm_abs(as, ir) asm_fpunary(as, ir, PPCI_FABS)
#define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp)

static void asm_arithov(ASMState *as, IRIns *ir, PPCIns pi)
{
Expand Down

0 comments on commit b2307c8

Please sign in to comment.