Skip to content

Commit

Permalink
Revert to trival pow() optimizations to prevent inaccuracies.
Browse files Browse the repository at this point in the history
  • Loading branch information
Mike Pall committed Mar 8, 2022
1 parent aa0550e commit 96d6d50
Show file tree
Hide file tree
Showing 18 changed files with 45 additions and 157 deletions.
3 changes: 1 addition & 2 deletions src/lj_asm.c
Expand Up @@ -1680,8 +1680,7 @@ static void asm_pow(ASMState *as, IRIns *ir)
IRCALL_lj_carith_powu64);
else
#endif
asm_callid(as, ir, irt_isnum(IR(ir->op2)->t) ? IRCALL_lj_vm_pow :
IRCALL_lj_vm_powi);
asm_callid(as, ir, IRCALL_pow);
}

static void asm_div(ASMState *as, IRIns *ir)
Expand Down
2 changes: 1 addition & 1 deletion src/lj_dispatch.h
Expand Up @@ -44,7 +44,7 @@ extern double __divdf3(double a, double b);
#define GOTDEF(_) \
_(floor) _(ceil) _(trunc) _(log) _(log10) _(exp) _(sin) _(cos) _(tan) \
_(asin) _(acos) _(atan) _(sinh) _(cosh) _(tanh) _(frexp) _(modf) _(atan2) \
_(lj_vm_pow) _(fmod) _(ldexp) _(lj_vm_modi) \
_(pow) _(fmod) _(ldexp) _(lj_vm_modi) \
_(lj_dispatch_call) _(lj_dispatch_ins) _(lj_dispatch_stitch) \
_(lj_dispatch_profile) _(lj_err_throw) \
_(lj_ffh_coroutine_wrap_err) _(lj_func_closeuv) _(lj_func_newL_gc) \
Expand Down
4 changes: 2 additions & 2 deletions src/lj_ffrecord.c
Expand Up @@ -638,8 +638,8 @@ static void LJ_FASTCALL recff_math_call(jit_State *J, RecordFFData *rd)

static void LJ_FASTCALL recff_math_pow(jit_State *J, RecordFFData *rd)
{
J->base[0] = lj_opt_narrow_pow(J, J->base[0], J->base[1],
&rd->argv[0], &rd->argv[1]);
J->base[0] = lj_opt_narrow_arith(J, J->base[0], J->base[1],
&rd->argv[0], &rd->argv[1], IR_POW);
UNUSED(rd);
}

Expand Down
3 changes: 1 addition & 2 deletions src/lj_ircall.h
Expand Up @@ -217,8 +217,7 @@ typedef struct CCallInfo {
_(FPMATH, sqrt, 1, N, NUM, XA_FP) \
_(ANY, log, 1, N, NUM, XA_FP) \
_(ANY, lj_vm_log2, 1, N, NUM, XA_FP) \
_(ANY, lj_vm_powi, 2, N, NUM, XA_FP) \
_(ANY, lj_vm_pow, 2, N, NUM, XA2_FP) \
_(ANY, pow, 2, N, NUM, XA2_FP) \
_(ANY, atan2, 2, N, NUM, XA2_FP) \
_(ANY, ldexp, 2, N, NUM, XA_FP) \
_(SOFTFP, lj_vm_tobit, 1, N, INT, XA_FP32) \
Expand Down
1 change: 0 additions & 1 deletion src/lj_iropt.h
Expand Up @@ -145,7 +145,6 @@ LJ_FUNC TRef lj_opt_narrow_arith(jit_State *J, TRef rb, TRef rc,
TValue *vb, TValue *vc, IROp op);
LJ_FUNC TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc);
LJ_FUNC TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vb, TValue *vc);
LJ_FUNC TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vb, TValue *vc);
LJ_FUNC IRType lj_opt_narrow_forl(jit_State *J, cTValue *forbase);

/* Optimization passes. */
Expand Down
37 changes: 8 additions & 29 deletions src/lj_opt_fold.c
Expand Up @@ -236,14 +236,10 @@ LJFOLDF(kfold_fpcall2)
return NEXTFOLD;
}

LJFOLD(POW KNUM KINT)
LJFOLD(POW KNUM KNUM)
LJFOLDF(kfold_numpow)
{
lua_Number a = knumleft;
lua_Number b = fright->o == IR_KINT ? (lua_Number)fright->i : knumright;
lua_Number y = lj_vm_foldarith(a, b, IR_POW - IR_ADD);
return lj_ir_knum(J, y);
return lj_ir_knum(J, lj_vm_foldarith(knumleft, knumright, IR_POW - IR_ADD));
}

/* Must not use kfold_kref for numbers (could be NaN). */
Expand Down Expand Up @@ -1113,34 +1109,17 @@ LJFOLDF(simplify_nummuldiv_negneg)
return RETRYFOLD;
}

LJFOLD(POW any KINT)
LJFOLDF(simplify_numpow_xkint)
LJFOLD(POW any KNUM)
LJFOLDF(simplify_numpow_k)
{
int32_t k = fright->i;
TRef ref = fins->op1;
if (k == 0) /* x ^ 0 ==> 1 */
if (knumright == 0) /* x ^ 0 ==> 1 */
return lj_ir_knum_one(J); /* Result must be a number, not an int. */
if (k == 1) /* x ^ 1 ==> x */
else if (knumright == 1) /* x ^ 1 ==> x */
return LEFTFOLD;
if ((uint32_t)(k+65536) > 2*65536u) /* Limit code explosion. */
else if (knumright == 2) /* x ^ 2 ==> x * x */
return emitir(IRTN(IR_MUL), fins->op1, fins->op1);
else
return NEXTFOLD;
if (k < 0) { /* x ^ (-k) ==> (1/x) ^ k. */
ref = emitir(IRTN(IR_DIV), lj_ir_knum_one(J), ref);
k = -k;
}
/* Unroll x^k for 1 <= k <= 65536. */
for (; (k & 1) == 0; k >>= 1) /* Handle leading zeros. */
ref = emitir(IRTN(IR_MUL), ref, ref);
if ((k >>= 1) != 0) { /* Handle trailing bits. */
TRef tmp = emitir(IRTN(IR_MUL), ref, ref);
for (; k != 1; k >>= 1) {
if (k & 1)
ref = emitir(IRTN(IR_MUL), ref, tmp);
tmp = emitir(IRTN(IR_MUL), tmp, tmp);
}
ref = emitir(IRTN(IR_MUL), ref, tmp);
}
return ref;
}

/* -- Simplify conversions ------------------------------------------------ */
Expand Down
24 changes: 0 additions & 24 deletions src/lj_opt_narrow.c
Expand Up @@ -584,30 +584,6 @@ TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vb, TValue *vc)
return emitir(IRTN(IR_SUB), rb, tmp);
}

/* Narrowing of power operator or math.pow. */
TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vb, TValue *vc)
{
rb = conv_str_tonum(J, rb, vb);
rb = lj_ir_tonum(J, rb); /* Left arg is always treated as an FP number. */
rc = conv_str_tonum(J, rc, vc);
if (tvisint(vc) || numisint(numV(vc))) {
int32_t k = numberVint(vc);
if (!(k >= -65536 && k <= 65536)) goto force_pow_num;
if (!tref_isinteger(rc)) {
/* Guarded conversion to integer! */
rc = emitir(IRTGI(IR_CONV), rc, IRCONV_INT_NUM|IRCONV_CHECK);
}
if (!tref_isk(rc)) { /* Range guard: -65536 <= i <= 65536 */
TRef tmp = emitir(IRTI(IR_ADD), rc, lj_ir_kint(J, 65536));
emitir(IRTGI(IR_ULE), tmp, lj_ir_kint(J, 2*65536));
}
} else {
force_pow_num:
rc = lj_ir_tonum(J, rc); /* Want POW(num, num), not POW(num, int). */
}
return emitir(IRTN(IR_POW), rb, rc);
}

/* -- Predictive narrowing of induction variables ------------------------- */

/* Narrow a single runtime value. */
Expand Down
2 changes: 1 addition & 1 deletion src/lj_opt_split.c
Expand Up @@ -400,7 +400,7 @@ static void split_ir(jit_State *J)
hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_div);
break;
case IR_POW:
hi = split_call_li(J, hisubst, oir, ir, IRCALL_lj_vm_powi);
hi = split_call_li(J, hisubst, oir, ir, IRCALL_pow);
break;
case IR_FPMATH:
hi = split_call_l(J, hisubst, oir, ir, IRCALL_lj_vm_floor + ir->op2);
Expand Down
2 changes: 1 addition & 1 deletion src/lj_record.c
Expand Up @@ -2394,7 +2394,7 @@ void lj_record_ins(jit_State *J)

case BC_POW:
if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
rc = lj_opt_narrow_pow(J, rb, rc, rbv, rcv);
rc = lj_opt_narrow_arith(J, rb, rc, rbv, rcv, IR_POW);
else
rc = rec_mm_arith(J, &ix, MM_pow);
break;
Expand Down
3 changes: 0 additions & 3 deletions src/lj_vm.h
Expand Up @@ -98,9 +98,6 @@ LJ_ASMF int lj_vm_errno(void);
LJ_ASMF TValue *lj_vm_next(GCtab *t, uint32_t idx);
#endif

LJ_ASMF double lj_vm_powi(double, int32_t);
LJ_ASMF double lj_vm_pow(double, double);

/* Continuations for metamethods. */
LJ_ASMF void lj_cont_cat(void); /* Continue with concatenation. */
LJ_ASMF void lj_cont_ra(void); /* Store result in RA from instruction. */
Expand Down
44 changes: 2 additions & 42 deletions src/lj_vmmath.c
Expand Up @@ -30,52 +30,12 @@ LJ_FUNCA double lj_wrap_sinh(double x) { return sinh(x); }
LJ_FUNCA double lj_wrap_cosh(double x) { return cosh(x); }
LJ_FUNCA double lj_wrap_tanh(double x) { return tanh(x); }
LJ_FUNCA double lj_wrap_atan2(double x, double y) { return atan2(x, y); }
LJ_FUNCA double lj_wrap_pow(double x, double y) { return pow(x, y); }
LJ_FUNCA double lj_wrap_fmod(double x, double y) { return fmod(x, y); }
#endif

/* -- Helper functions ---------------------------------------------------- */

/* Unsigned x^k. */
static double lj_vm_powui(double x, uint32_t k)
{
double y;
lj_assertX(k != 0, "pow with zero exponent");
for (; (k & 1) == 0; k >>= 1) x *= x;
y = x;
if ((k >>= 1) != 0) {
for (;;) {
x *= x;
if (k == 1) break;
if (k & 1) y *= x;
k >>= 1;
}
y *= x;
}
return y;
}

/* Signed x^k. */
double lj_vm_powi(double x, int32_t k)
{
if (k > 1)
return lj_vm_powui(x, (uint32_t)k);
else if (k == 1)
return x;
else if (k == 0)
return 1.0;
else
return 1.0 / lj_vm_powui(x, (uint32_t)-k);
}

double lj_vm_pow(double x, double y)
{
int32_t k = lj_num2int(y);
if ((k >= -65536 && k <= 65536) && y == (double)k)
return lj_vm_powi(x, k);
else
return pow(x, y);
}

double lj_vm_foldarith(double x, double y, int op)
{
switch (op) {
Expand All @@ -84,7 +44,7 @@ double lj_vm_foldarith(double x, double y, int op)
case IR_MUL - IR_ADD: return x*y; break;
case IR_DIV - IR_ADD: return x/y; break;
case IR_MOD - IR_ADD: return x-lj_vm_floor(x/y)*y; break;
case IR_POW - IR_ADD: return lj_vm_pow(x, y); break;
case IR_POW - IR_ADD: return pow(x, y); break;
case IR_NEG - IR_ADD: return -x; break;
case IR_ABS - IR_ADD: return fabs(x); break;
#if LJ_HASJIT
Expand Down
13 changes: 5 additions & 8 deletions src/vm_arm.dasc
Expand Up @@ -1477,11 +1477,11 @@ static void build_subroutines(BuildCtx *ctx)
|.endif
|.endmacro
|
|.macro math_extern2, name, func
|.macro math_extern2, func
|.if HFABI
| .ffunc_dd math_ .. name
| .ffunc_dd math_ .. func
|.else
| .ffunc_nn math_ .. name
| .ffunc_nn math_ .. func
|.endif
| .IOS mov RA, BASE
| bl extern func
Expand All @@ -1492,9 +1492,6 @@ static void build_subroutines(BuildCtx *ctx)
| b ->fff_restv
|.endif
|.endmacro
|.macro math_extern2, func
| math_extern2 func, func
|.endmacro
|
|.if FPU
| .ffunc_d math_sqrt
Expand Down Expand Up @@ -1540,7 +1537,7 @@ static void build_subroutines(BuildCtx *ctx)
| math_extern sinh
| math_extern cosh
| math_extern tanh
| math_extern2 pow, lj_vm_pow
| math_extern2 pow
| math_extern2 atan2
| math_extern2 fmod
|
Expand Down Expand Up @@ -3206,7 +3203,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
break;
case BC_POW:
| // NYI: (partial) integer arithmetic.
| ins_arithfp extern, extern lj_vm_pow
| ins_arithfp extern, extern pow
break;

case BC_CAT:
Expand Down
11 changes: 4 additions & 7 deletions src/vm_arm64.dasc
Expand Up @@ -1387,14 +1387,11 @@ static void build_subroutines(BuildCtx *ctx)
| b ->fff_resn
|.endmacro
|
|.macro math_extern2, name, func
| .ffunc_nn math_ .. name
|.macro math_extern2, func
| .ffunc_nn math_ .. func
| bl extern func
| b ->fff_resn
|.endmacro
|.macro math_extern2, func
| math_extern2 func, func
|.endmacro
|
|.ffunc_n math_sqrt
| fsqrt d0, d0
Expand Down Expand Up @@ -1423,7 +1420,7 @@ static void build_subroutines(BuildCtx *ctx)
| math_extern sinh
| math_extern cosh
| math_extern tanh
| math_extern2 pow, lj_vm_pow
| math_extern2 pow
| math_extern2 atan2
| math_extern2 fmod
|
Expand Down Expand Up @@ -2677,7 +2674,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
| ins_arithload FARG1, FARG2
| ins_arithfallback ins_arithcheck_num
|.if "fpins" == "fpow"
| bl extern lj_vm_pow
| bl extern pow
|.else
| fpins FARG1, FARG1, FARG2
|.endif
Expand Down
11 changes: 4 additions & 7 deletions src/vm_mips.dasc
Expand Up @@ -1623,17 +1623,14 @@ static void build_subroutines(BuildCtx *ctx)
|. nop
|.endmacro
|
|.macro math_extern2, name, func
| .ffunc_nn math_ .. name
|.macro math_extern2, func
| .ffunc_nn math_ .. func
|. load_got func
| call_extern
|. nop
| b ->fff_resn
|. nop
|.endmacro
|.macro math_extern2, func
| math_extern2 func, func
|.endmacro
|
|// TODO: Return integer type if result is integer (own sf implementation).
|.macro math_round, func
Expand Down Expand Up @@ -1687,7 +1684,7 @@ static void build_subroutines(BuildCtx *ctx)
| math_extern sinh
| math_extern cosh
| math_extern tanh
| math_extern2 pow, lj_vm_pow
| math_extern2 pow
| math_extern2 atan2
| math_extern2 fmod
|
Expand Down Expand Up @@ -3692,7 +3689,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
| sltiu AT, SFARG1HI, LJ_TISNUM
| sltiu TMP0, SFARG2HI, LJ_TISNUM
| and AT, AT, TMP0
| load_got lj_vm_pow
| load_got pow
| beqz AT, ->vmeta_arith
|. addu RA, BASE, RA
|.if FPU
Expand Down
11 changes: 4 additions & 7 deletions src/vm_mips64.dasc
Expand Up @@ -1667,17 +1667,14 @@ static void build_subroutines(BuildCtx *ctx)
|. nop
|.endmacro
|
|.macro math_extern2, name, func
| .ffunc_nn math_ .. name
|.macro math_extern2, func
| .ffunc_nn math_ .. func
|. load_got func
| call_extern
|. nop
| b ->fff_resn
|. nop
|.endmacro
|.macro math_extern2, func
| math_extern2 func, func
|.endmacro
|
|// TODO: Return integer type if result is integer (own sf implementation).
|.macro math_round, func
Expand Down Expand Up @@ -1731,7 +1728,7 @@ static void build_subroutines(BuildCtx *ctx)
| math_extern sinh
| math_extern cosh
| math_extern tanh
| math_extern2 pow, lj_vm_pow
| math_extern2 pow
| math_extern2 atan2
| math_extern2 fmod
|
Expand Down Expand Up @@ -3918,7 +3915,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
| sltiu TMP0, TMP0, LJ_TISNUM
| sltiu TMP1, TMP1, LJ_TISNUM
| and AT, TMP0, TMP1
| load_got lj_vm_pow
| load_got pow
| beqz AT, ->vmeta_arith
|. daddu RA, BASE, RA
|.if FPU
Expand Down

0 comments on commit 96d6d50

Please sign in to comment.