From 4bc429019dc4bff6af0f9824ad6ab6745f09f8ba Mon Sep 17 00:00:00 2001 From: Russell O'Connor Date: Tue, 25 Oct 2022 10:37:22 -0400 Subject: [PATCH] Add a secp256k1_i128_to_u64 function. --- src/int128.h | 7 ++++- src/int128_native_impl.h | 5 +++ src/int128_struct_impl.h | 8 ++++- src/modinv64_impl.h | 66 ++++++++++++++++++++-------------------- src/tests.c | 16 +++++----- 5 files changed, 59 insertions(+), 43 deletions(-) diff --git a/src/int128.h b/src/int128.h index 84d969a2367ba..8c1c061a8c854 100644 --- a/src/int128.h +++ b/src/int128.h @@ -66,7 +66,12 @@ static SECP256K1_INLINE void secp256k1_i128_det(secp256k1_int128 *r, int64_t a, */ static SECP256K1_INLINE void secp256k1_i128_rshift(secp256k1_int128 *r, unsigned int b); -/* Return the low 64-bits of a 128-bit value interpreted as an signed 64-bit value. */ +/* Return the input value modulo 2^64. */ +static SECP256K1_INLINE uint64_t secp256k1_i128_to_u64(const secp256k1_int128 *a); + +/* Return the value as a signed 64-bit value. + * Requires the input to be between INT64_MIN and INT64_MAX. + */ static SECP256K1_INLINE int64_t secp256k1_i128_to_i64(const secp256k1_int128 *a); /* Write a signed 64-bit value to r. */ diff --git a/src/int128_native_impl.h b/src/int128_native_impl.h index e4b7f4106c0ff..30c26f2d39578 100644 --- a/src/int128_native_impl.h +++ b/src/int128_native_impl.h @@ -67,7 +67,12 @@ static SECP256K1_INLINE void secp256k1_i128_rshift(secp256k1_int128 *r, unsigned *r >>= n; } +static SECP256K1_INLINE uint64_t secp256k1_i128_to_u64(const secp256k1_int128 *a) { + return (uint64_t)*a; +} + static SECP256K1_INLINE int64_t secp256k1_i128_to_i64(const secp256k1_int128 *a) { + VERIFY_CHECK(INT64_MIN <= *a && *a <= INT64_MAX); return *a; } diff --git a/src/int128_struct_impl.h b/src/int128_struct_impl.h index b5f8fb7b650c9..298a7bb1acd6a 100644 --- a/src/int128_struct_impl.h +++ b/src/int128_struct_impl.h @@ -170,8 +170,14 @@ static SECP256K1_INLINE void secp256k1_i128_rshift(secp256k1_int128 *r, unsigned } } +static SECP256K1_INLINE uint64_t secp256k1_i128_to_u64(const secp256k1_int128 *a) { + return a->lo; +} + static SECP256K1_INLINE int64_t secp256k1_i128_to_i64(const secp256k1_int128 *a) { - return (int64_t)a->lo; + /* Verify that a represents a 64 bit signed value by checking that the high bits are a sign extension of the low bits. */ + VERIFY_CHECK(a->hi == -(a->lo >> 63)); + return (int64_t)secp256k1_i128_to_u64(a); } static SECP256K1_INLINE void secp256k1_i128_from_i64(secp256k1_int128 *r, int64_t a) { diff --git a/src/modinv64_impl.h b/src/modinv64_impl.h index 50be2e5e784a2..2b5463a3ff4ca 100644 --- a/src/modinv64_impl.h +++ b/src/modinv64_impl.h @@ -39,13 +39,13 @@ static const secp256k1_modinv64_signed62 SECP256K1_SIGNED62_ONE = {{1}}; /* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^62). */ static void secp256k1_modinv64_mul_62(secp256k1_modinv64_signed62 *r, const secp256k1_modinv64_signed62 *a, int alen, int64_t factor) { - const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const uint64_t M62 = UINT64_MAX >> 2; secp256k1_int128 c, d; int i; secp256k1_i128_from_i64(&c, 0); for (i = 0; i < 4; ++i) { if (i < alen) secp256k1_i128_accum_mul(&c, a->v[i], factor); - r->v[i] = secp256k1_i128_to_i64(&c) & M62; secp256k1_i128_rshift(&c, 62); + r->v[i] = secp256k1_i128_to_u64(&c) & M62; secp256k1_i128_rshift(&c, 62); } if (4 < alen) secp256k1_i128_accum_mul(&c, a->v[4], factor); secp256k1_i128_from_i64(&d, secp256k1_i128_to_i64(&c)); @@ -314,7 +314,7 @@ static int64_t secp256k1_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint * This implements the update_de function from the explanation. */ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp256k1_modinv64_signed62 *e, const secp256k1_modinv64_trans2x2 *t, const secp256k1_modinv64_modinfo* modinfo) { - const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const uint64_t M62 = UINT64_MAX >> 2; const int64_t d0 = d->v[0], d1 = d->v[1], d2 = d->v[2], d3 = d->v[3], d4 = d->v[4]; const int64_t e0 = e->v[0], e1 = e->v[1], e2 = e->v[2], e3 = e->v[3], e4 = e->v[4]; const int64_t u = t->u, v = t->v, q = t->q, r = t->r; @@ -327,8 +327,8 @@ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ VERIFY_CHECK((secp256k1_modinv64_abs(u) + secp256k1_modinv64_abs(v)) >= 0); /* |u|+|v| doesn't overflow */ VERIFY_CHECK((secp256k1_modinv64_abs(q) + secp256k1_modinv64_abs(r)) >= 0); /* |q|+|r| doesn't overflow */ - VERIFY_CHECK((secp256k1_modinv64_abs(u) + secp256k1_modinv64_abs(v)) <= M62 + 1); /* |u|+|v| <= 2^62 */ - VERIFY_CHECK((secp256k1_modinv64_abs(q) + secp256k1_modinv64_abs(r)) <= M62 + 1); /* |q|+|r| <= 2^62 */ + VERIFY_CHECK((secp256k1_modinv64_abs(u) + secp256k1_modinv64_abs(v)) <= (int64_t)1 << 62); /* |u|+|v| <= 2^62 */ + VERIFY_CHECK((secp256k1_modinv64_abs(q) + secp256k1_modinv64_abs(r)) <= (int64_t)1 << 62); /* |q|+|r| <= 2^62 */ #endif /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */ sd = d4 >> 63; @@ -341,14 +341,14 @@ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp secp256k1_i128_mul(&ce, q, d0); secp256k1_i128_accum_mul(&ce, r, e0); /* Correct md,me so that t*[d,e]+modulus*[md,me] has 62 zero bottom bits. */ - md -= (modinfo->modulus_inv62 * (uint64_t)secp256k1_i128_to_i64(&cd) + md) & M62; - me -= (modinfo->modulus_inv62 * (uint64_t)secp256k1_i128_to_i64(&ce) + me) & M62; + md -= (modinfo->modulus_inv62 * secp256k1_i128_to_u64(&cd) + md) & M62; + me -= (modinfo->modulus_inv62 * secp256k1_i128_to_u64(&ce) + me) & M62; /* Update the beginning of computation for t*[d,e]+modulus*[md,me] now md,me are known. */ secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[0], md); secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[0], me); /* Verify that the low 62 bits of the computation are indeed zero, and then throw them away. */ - VERIFY_CHECK((secp256k1_i128_to_i64(&cd) & M62) == 0); secp256k1_i128_rshift(&cd, 62); - VERIFY_CHECK((secp256k1_i128_to_i64(&ce) & M62) == 0); secp256k1_i128_rshift(&ce, 62); + VERIFY_CHECK((secp256k1_i128_to_u64(&cd) & M62) == 0); secp256k1_i128_rshift(&cd, 62); + VERIFY_CHECK((secp256k1_i128_to_u64(&ce) & M62) == 0); secp256k1_i128_rshift(&ce, 62); /* Compute limb 1 of t*[d,e]+modulus*[md,me], and store it as output limb 0 (= down shift). */ secp256k1_i128_accum_mul(&cd, u, d1); secp256k1_i128_accum_mul(&cd, v, e1); @@ -358,8 +358,8 @@ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[1], md); secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[1], me); } - d->v[0] = secp256k1_i128_to_i64(&cd) & M62; secp256k1_i128_rshift(&cd, 62); - e->v[0] = secp256k1_i128_to_i64(&ce) & M62; secp256k1_i128_rshift(&ce, 62); + d->v[0] = secp256k1_i128_to_u64(&cd) & M62; secp256k1_i128_rshift(&cd, 62); + e->v[0] = secp256k1_i128_to_u64(&ce) & M62; secp256k1_i128_rshift(&ce, 62); /* Compute limb 2 of t*[d,e]+modulus*[md,me], and store it as output limb 1. */ secp256k1_i128_accum_mul(&cd, u, d2); secp256k1_i128_accum_mul(&cd, v, e2); @@ -369,8 +369,8 @@ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[2], md); secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[2], me); } - d->v[1] = secp256k1_i128_to_i64(&cd) & M62; secp256k1_i128_rshift(&cd, 62); - e->v[1] = secp256k1_i128_to_i64(&ce) & M62; secp256k1_i128_rshift(&ce, 62); + d->v[1] = secp256k1_i128_to_u64(&cd) & M62; secp256k1_i128_rshift(&cd, 62); + e->v[1] = secp256k1_i128_to_u64(&ce) & M62; secp256k1_i128_rshift(&ce, 62); /* Compute limb 3 of t*[d,e]+modulus*[md,me], and store it as output limb 2. */ secp256k1_i128_accum_mul(&cd, u, d3); secp256k1_i128_accum_mul(&cd, v, e3); @@ -380,8 +380,8 @@ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[3], md); secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[3], me); } - d->v[2] = secp256k1_i128_to_i64(&cd) & M62; secp256k1_i128_rshift(&cd, 62); - e->v[2] = secp256k1_i128_to_i64(&ce) & M62; secp256k1_i128_rshift(&ce, 62); + d->v[2] = secp256k1_i128_to_u64(&cd) & M62; secp256k1_i128_rshift(&cd, 62); + e->v[2] = secp256k1_i128_to_u64(&ce) & M62; secp256k1_i128_rshift(&ce, 62); /* Compute limb 4 of t*[d,e]+modulus*[md,me], and store it as output limb 3. */ secp256k1_i128_accum_mul(&cd, u, d4); secp256k1_i128_accum_mul(&cd, v, e4); @@ -389,8 +389,8 @@ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp secp256k1_i128_accum_mul(&ce, r, e4); secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[4], md); secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[4], me); - d->v[3] = secp256k1_i128_to_i64(&cd) & M62; secp256k1_i128_rshift(&cd, 62); - e->v[3] = secp256k1_i128_to_i64(&ce) & M62; secp256k1_i128_rshift(&ce, 62); + d->v[3] = secp256k1_i128_to_u64(&cd) & M62; secp256k1_i128_rshift(&cd, 62); + e->v[3] = secp256k1_i128_to_u64(&ce) & M62; secp256k1_i128_rshift(&ce, 62); /* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */ d->v[4] = secp256k1_i128_to_i64(&cd); e->v[4] = secp256k1_i128_to_i64(&ce); @@ -407,7 +407,7 @@ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp * This implements the update_fg function from the explanation. */ static void secp256k1_modinv64_update_fg_62(secp256k1_modinv64_signed62 *f, secp256k1_modinv64_signed62 *g, const secp256k1_modinv64_trans2x2 *t) { - const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const uint64_t M62 = UINT64_MAX >> 2; const int64_t f0 = f->v[0], f1 = f->v[1], f2 = f->v[2], f3 = f->v[3], f4 = f->v[4]; const int64_t g0 = g->v[0], g1 = g->v[1], g2 = g->v[2], g3 = g->v[3], g4 = g->v[4]; const int64_t u = t->u, v = t->v, q = t->q, r = t->r; @@ -418,36 +418,36 @@ static void secp256k1_modinv64_update_fg_62(secp256k1_modinv64_signed62 *f, secp secp256k1_i128_mul(&cg, q, f0); secp256k1_i128_accum_mul(&cg, r, g0); /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */ - VERIFY_CHECK((secp256k1_i128_to_i64(&cf) & M62) == 0); secp256k1_i128_rshift(&cf, 62); - VERIFY_CHECK((secp256k1_i128_to_i64(&cg) & M62) == 0); secp256k1_i128_rshift(&cg, 62); + VERIFY_CHECK((secp256k1_i128_to_u64(&cf) & M62) == 0); secp256k1_i128_rshift(&cf, 62); + VERIFY_CHECK((secp256k1_i128_to_u64(&cg) & M62) == 0); secp256k1_i128_rshift(&cg, 62); /* Compute limb 1 of t*[f,g], and store it as output limb 0 (= down shift). */ secp256k1_i128_accum_mul(&cf, u, f1); secp256k1_i128_accum_mul(&cf, v, g1); secp256k1_i128_accum_mul(&cg, q, f1); secp256k1_i128_accum_mul(&cg, r, g1); - f->v[0] = secp256k1_i128_to_i64(&cf) & M62; secp256k1_i128_rshift(&cf, 62); - g->v[0] = secp256k1_i128_to_i64(&cg) & M62; secp256k1_i128_rshift(&cg, 62); + f->v[0] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62); + g->v[0] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62); /* Compute limb 2 of t*[f,g], and store it as output limb 1. */ secp256k1_i128_accum_mul(&cf, u, f2); secp256k1_i128_accum_mul(&cf, v, g2); secp256k1_i128_accum_mul(&cg, q, f2); secp256k1_i128_accum_mul(&cg, r, g2); - f->v[1] = secp256k1_i128_to_i64(&cf) & M62; secp256k1_i128_rshift(&cf, 62); - g->v[1] = secp256k1_i128_to_i64(&cg) & M62; secp256k1_i128_rshift(&cg, 62); + f->v[1] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62); + g->v[1] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62); /* Compute limb 3 of t*[f,g], and store it as output limb 2. */ secp256k1_i128_accum_mul(&cf, u, f3); secp256k1_i128_accum_mul(&cf, v, g3); secp256k1_i128_accum_mul(&cg, q, f3); secp256k1_i128_accum_mul(&cg, r, g3); - f->v[2] = secp256k1_i128_to_i64(&cf) & M62; secp256k1_i128_rshift(&cf, 62); - g->v[2] = secp256k1_i128_to_i64(&cg) & M62; secp256k1_i128_rshift(&cg, 62); + f->v[2] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62); + g->v[2] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62); /* Compute limb 4 of t*[f,g], and store it as output limb 3. */ secp256k1_i128_accum_mul(&cf, u, f4); secp256k1_i128_accum_mul(&cf, v, g4); secp256k1_i128_accum_mul(&cg, q, f4); secp256k1_i128_accum_mul(&cg, r, g4); - f->v[3] = secp256k1_i128_to_i64(&cf) & M62; secp256k1_i128_rshift(&cf, 62); - g->v[3] = secp256k1_i128_to_i64(&cg) & M62; secp256k1_i128_rshift(&cg, 62); + f->v[3] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62); + g->v[3] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62); /* What remains is limb 5 of t*[f,g]; store it as output limb 4. */ f->v[4] = secp256k1_i128_to_i64(&cf); g->v[4] = secp256k1_i128_to_i64(&cg); @@ -460,7 +460,7 @@ static void secp256k1_modinv64_update_fg_62(secp256k1_modinv64_signed62 *f, secp * This implements the update_fg function from the explanation. */ static void secp256k1_modinv64_update_fg_62_var(int len, secp256k1_modinv64_signed62 *f, secp256k1_modinv64_signed62 *g, const secp256k1_modinv64_trans2x2 *t) { - const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const uint64_t M62 = UINT64_MAX >> 2; const int64_t u = t->u, v = t->v, q = t->q, r = t->r; int64_t fi, gi; secp256k1_int128 cf, cg; @@ -474,8 +474,8 @@ static void secp256k1_modinv64_update_fg_62_var(int len, secp256k1_modinv64_sign secp256k1_i128_mul(&cg, q, fi); secp256k1_i128_accum_mul(&cg, r, gi); /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */ - VERIFY_CHECK((secp256k1_i128_to_i64(&cf) & M62) == 0); secp256k1_i128_rshift(&cf, 62); - VERIFY_CHECK((secp256k1_i128_to_i64(&cg) & M62) == 0); secp256k1_i128_rshift(&cg, 62); + VERIFY_CHECK((secp256k1_i128_to_u64(&cf) & M62) == 0); secp256k1_i128_rshift(&cf, 62); + VERIFY_CHECK((secp256k1_i128_to_u64(&cg) & M62) == 0); secp256k1_i128_rshift(&cg, 62); /* Now iteratively compute limb i=1..len of t*[f,g], and store them in output limb i-1 (shifting * down by 62 bits). */ for (i = 1; i < len; ++i) { @@ -485,8 +485,8 @@ static void secp256k1_modinv64_update_fg_62_var(int len, secp256k1_modinv64_sign secp256k1_i128_accum_mul(&cf, v, gi); secp256k1_i128_accum_mul(&cg, q, fi); secp256k1_i128_accum_mul(&cg, r, gi); - f->v[i - 1] = secp256k1_i128_to_i64(&cf) & M62; secp256k1_i128_rshift(&cf, 62); - g->v[i - 1] = secp256k1_i128_to_i64(&cg) & M62; secp256k1_i128_rshift(&cg, 62); + f->v[i - 1] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62); + g->v[i - 1] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62); } /* What remains is limb (len) of t*[f,g]; store it as output limb (len-1). */ f->v[len - 1] = secp256k1_i128_to_i64(&cf); diff --git a/src/tests.c b/src/tests.c index 6d61dc811e8f4..fd96d635606ac 100644 --- a/src/tests.c +++ b/src/tests.c @@ -1806,7 +1806,7 @@ void load256i128(uint16_t* out, const secp256k1_int128* v) { uint64_t lo; int64_t hi; secp256k1_int128 c = *v; - lo = secp256k1_i128_to_i64(&c); + lo = secp256k1_i128_to_u64(&c); secp256k1_i128_rshift(&c, 64); hi = secp256k1_i128_to_i64(&c); load256two64(out, hi, lo, 1); @@ -1923,8 +1923,8 @@ void run_int128_test_case(void) { secp256k1_i128_rshift(&swz, uc % 127); load256i128(rswz, &swz); CHECK(secp256k1_memcmp_var(rswr, rswz, 16) == 0); - /* test secp256k1_i128_to_i64 */ - CHECK((uint64_t)secp256k1_i128_to_i64(&swa) == v[0]); + /* test secp256k1_i128_to_u64 */ + CHECK(secp256k1_i128_to_u64(&swa) == v[0]); /* test secp256k1_i128_from_i64 */ secp256k1_i128_from_i64(&swz, sb); load256i128(rswz, &swz); @@ -1988,20 +1988,20 @@ void run_int128_tests(void) { /* Compute INT128_MAX = 2^127 - 1 with secp256k1_i128_accum_mul */ secp256k1_i128_mul(&res, INT64_MAX, INT64_MAX); secp256k1_i128_accum_mul(&res, INT64_MAX, INT64_MAX); - CHECK(secp256k1_i128_to_i64(&res) == 2); + CHECK(secp256k1_i128_to_u64(&res) == 2); secp256k1_i128_accum_mul(&res, 4, 9223372036854775807); secp256k1_i128_accum_mul(&res, 1, 1); - CHECK((uint64_t)secp256k1_i128_to_i64(&res) == UINT64_MAX); + CHECK(secp256k1_i128_to_u64(&res) == UINT64_MAX); secp256k1_i128_rshift(&res, 64); CHECK(secp256k1_i128_to_i64(&res) == INT64_MAX); /* Compute INT128_MIN = - 2^127 with secp256k1_i128_accum_mul */ secp256k1_i128_mul(&res, INT64_MAX, INT64_MIN); - CHECK(secp256k1_i128_to_i64(&res) == INT64_MIN); + CHECK(secp256k1_i128_to_u64(&res) == (uint64_t)INT64_MIN); secp256k1_i128_accum_mul(&res, INT64_MAX, INT64_MIN); - CHECK(secp256k1_i128_to_i64(&res) == 0); + CHECK(secp256k1_i128_to_u64(&res) == 0); secp256k1_i128_accum_mul(&res, 2, INT64_MIN); - CHECK(secp256k1_i128_to_i64(&res) == 0); + CHECK(secp256k1_i128_to_u64(&res) == 0); secp256k1_i128_rshift(&res, 64); CHECK(secp256k1_i128_to_i64(&res) == INT64_MIN); }