diff --git a/src/bench_internal.c b/src/bench_internal.c index 9071724331e1e..a8f4e9e12f6d1 100644 --- a/src/bench_internal.c +++ b/src/bench_internal.c @@ -253,7 +253,7 @@ void bench_wnaf_const(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 20000; i++) { - secp256k1_wnaf_const(data->wnaf, data->scalar_x, WINDOW_A, 256); + secp256k1_wnaf_const(data->wnaf, &data->scalar_x, WINDOW_A, 256); secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); } } diff --git a/src/ecmult_const_impl.h b/src/ecmult_const_impl.h index 8411752eb069f..b599316b86adb 100644 --- a/src/ecmult_const_impl.h +++ b/src/ecmult_const_impl.h @@ -48,7 +48,7 @@ * * Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335 */ -static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w, int size) { +static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w, int size) { int global_sign; int skew = 0; int word = 0; @@ -59,7 +59,7 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w, int size) int flip; int bit; - secp256k1_scalar neg_s; + secp256k1_scalar s; int not_neg_one; /* Note that we cannot handle even numbers by negating them to be odd, as is * done in other implementations, since if our scalars were specified to have @@ -75,12 +75,13 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w, int size) * {1, 2} we want to add to the scalar when ensuring that it's odd. Further * complicating things, -1 interacts badly with `secp256k1_scalar_cadd_bit` and * we need to special-case it in this logic. */ - flip = secp256k1_scalar_is_high(&s); + flip = secp256k1_scalar_is_high(scalar); /* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */ - bit = flip ^ !secp256k1_scalar_is_even(&s); + bit = flip ^ !secp256k1_scalar_is_even(scalar); /* We check for negative one, since adding 2 to it will cause an overflow */ - secp256k1_scalar_negate(&neg_s, &s); - not_neg_one = !secp256k1_scalar_is_one(&neg_s); + secp256k1_scalar_negate(&s, scalar); + not_neg_one = !secp256k1_scalar_is_one(&s); + s = *scalar; secp256k1_scalar_cadd_bit(&s, bit, not_neg_one); /* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects * that we added two to it and flipped it. In fact for -1 these operations are @@ -132,7 +133,6 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)]; int i; - secp256k1_scalar sc = *scalar; /* build wnaf representation for q. */ int rsize = size; @@ -140,13 +140,13 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons if (size > 128) { rsize = 128; /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */ - secp256k1_scalar_split_lambda(&q_1, &q_lam, &sc); - skew_1 = secp256k1_wnaf_const(wnaf_1, q_1, WINDOW_A - 1, 128); - skew_lam = secp256k1_wnaf_const(wnaf_lam, q_lam, WINDOW_A - 1, 128); + secp256k1_scalar_split_lambda(&q_1, &q_lam, scalar); + skew_1 = secp256k1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128); + skew_lam = secp256k1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128); } else #endif { - skew_1 = secp256k1_wnaf_const(wnaf_1, sc, WINDOW_A - 1, size); + skew_1 = secp256k1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size); #ifdef USE_ENDOMORPHISM skew_lam = 0; #endif diff --git a/src/tests.c b/src/tests.c index f1c4db929a776..5566560e88957 100644 --- a/src/tests.c +++ b/src/tests.c @@ -3050,7 +3050,7 @@ void test_constant_wnaf(const secp256k1_scalar *number, int w) { } bits = 128; #endif - skew = secp256k1_wnaf_const(wnaf, num, w, bits); + skew = secp256k1_wnaf_const(wnaf, &num, w, bits); for (i = WNAF_SIZE_BITS(bits, w); i >= 0; --i) { secp256k1_scalar t;