Skip to content

Commit

Permalink
Merge bitcoin-core/secp256k1#1156: Followups to int128_struct arithmetic
Browse files Browse the repository at this point in the history
99bd335 Make int128 overflow test use secp256k1_[ui]128_mul (Pieter Wuille)
3afce0a Avoid signed overflow in MSVC AMR64 secp256k1_mul128 (Pieter Wuille)
9b5f589 Heuristically decide whether to use int128_struct (Pieter Wuille)
63ff064 int128: Add test override for testing __(u)mulh on MSVC X64 (Tim Ruffing)
f2b7e88 Add int128 randomized tests (Pieter Wuille)

Pull request description:

  This is a follow-up to bitcoin#1000:
  * Add randomized unit tests for int128 logic.
  * Add CI for the `_(u)mulh` code path (on non-ARM64 MSVC).
  * Add heuristic logic to enable int128_struct based arithmetic on 64-bit MSVC, or systems with pointers wider than 32 bits.
  * Fix signed overflow in ARM64 MSVC code.

ACKs for top commit:
  roconnor-blockstream:
    utACK 99bd335
  real-or-random:
    ACK 99bd335 tested this also on MSVC locally with the override, including all the benchmark binaries
  jonasnick:
    utACK 99bd335

Tree-SHA512: 5ea897362293b45a86650593e1fdc8c4004a1d9452eed2fa070d22dffc7ed7ca1ec50a4df61e3a33dbe35e08132ad9686286ac44af6742b32b82f11c9d3341c6
  • Loading branch information
real-or-random committed Nov 18, 2022
2 parents 6138d73 + 99bd335 commit e40fd27
Show file tree
Hide file tree
Showing 6 changed files with 391 additions and 85 deletions.
4 changes: 4 additions & 0 deletions .cirrus.yml
Expand Up @@ -288,6 +288,10 @@ task:
- name: "x86_64 (MSVC): Windows (Debian stable, Wine, int128_struct)"
env:
WIDEMUL: int128_struct
- name: "x86_64 (MSVC): Windows (Debian stable, Wine, int128_struct with __(u)mulh)"
env:
WIDEMUL: int128_struct
CPPFLAGS: -DSECP256K1_MSVC_MULH_TEST_OVERRIDE
- name: "i686 (MSVC): Windows (Debian stable, Wine)"
env:
HOST: i686-w64-mingw32
Expand Down
6 changes: 6 additions & 0 deletions src/int128.h
Expand Up @@ -12,6 +12,9 @@
# error "Please select int128 implementation"
# endif

/* Construct an unsigned 128-bit value from a high and a low 64-bit value. */
static SECP256K1_INLINE void secp256k1_u128_load(secp256k1_uint128 *r, uint64_t hi, uint64_t lo);

/* Multiply two unsigned 64-bit values a and b and write the result to r. */
static SECP256K1_INLINE void secp256k1_u128_mul(secp256k1_uint128 *r, uint64_t a, uint64_t b);

Expand Down Expand Up @@ -44,6 +47,9 @@ static SECP256K1_INLINE void secp256k1_u128_from_u64(secp256k1_uint128 *r, uint6
*/
static SECP256K1_INLINE int secp256k1_u128_check_bits(const secp256k1_uint128 *r, unsigned int n);

/* Construct an signed 128-bit value from a high and a low 64-bit value. */
static SECP256K1_INLINE void secp256k1_i128_load(secp256k1_int128 *r, int64_t hi, uint64_t lo);

/* Multiply two signed 64-bit values a and b and write the result to r. */
static SECP256K1_INLINE void secp256k1_i128_mul(secp256k1_int128 *r, int64_t a, int64_t b);

Expand Down
8 changes: 8 additions & 0 deletions src/int128_native_impl.h
Expand Up @@ -3,6 +3,10 @@

#include "int128.h"

static SECP256K1_INLINE void secp256k1_u128_load(secp256k1_uint128 *r, uint64_t hi, uint64_t lo) {
*r = (((uint128_t)hi) << 64) + lo;
}

static SECP256K1_INLINE void secp256k1_u128_mul(secp256k1_uint128 *r, uint64_t a, uint64_t b) {
*r = (uint128_t)a * b;
}
Expand Down Expand Up @@ -37,6 +41,10 @@ static SECP256K1_INLINE int secp256k1_u128_check_bits(const secp256k1_uint128 *r
return (*r >> n == 0);
}

static SECP256K1_INLINE void secp256k1_i128_load(secp256k1_int128 *r, int64_t hi, uint64_t lo) {
*r = (((uint128_t)(uint64_t)hi) << 64) + lo;
}

static SECP256K1_INLINE void secp256k1_i128_mul(secp256k1_int128 *r, int64_t a, int64_t b) {
*r = (int128_t)a * b;
}
Expand Down
29 changes: 22 additions & 7 deletions src/int128_struct_impl.h
Expand Up @@ -5,21 +5,26 @@

#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) /* MSVC */
# include <intrin.h>
# if defined(_M_X64)
/* On x84_64 MSVC, use native _(u)mul128 for 64x64->128 multiplications. */
# define secp256k1_umul128 _umul128
# define secp256k1_mul128 _mul128
# else
/* On ARM64 MSVC, use __(u)mulh for the upper half of 64x64 multiplications. */
# if defined(_M_ARM64) || defined(SECP256K1_MSVC_MULH_TEST_OVERRIDE)
/* On ARM64 MSVC, use __(u)mulh for the upper half of 64x64 multiplications.
(Define SECP256K1_MSVC_MULH_TEST_OVERRIDE to test this code path on X64,
which supports both __(u)mulh and _umul128.) */
# if defined(SECP256K1_MSVC_MULH_TEST_OVERRIDE)
# pragma message(__FILE__ ": SECP256K1_MSVC_MULH_TEST_OVERRIDE is defined, forcing use of __(u)mulh.")
# endif
static SECP256K1_INLINE uint64_t secp256k1_umul128(uint64_t a, uint64_t b, uint64_t* hi) {
*hi = __umulh(a, b);
return a * b;
}

static SECP256K1_INLINE int64_t secp256k1_mul128(int64_t a, int64_t b, int64_t* hi) {
*hi = __mulh(a, b);
return a * b;
return (uint64_t)a * (uint64_t)b;
}
# else
/* On x84_64 MSVC, use native _(u)mul128 for 64x64->128 multiplications. */
# define secp256k1_umul128 _umul128
# define secp256k1_mul128 _mul128
# endif
#else
/* On other systems, emulate 64x64->128 multiplications using 32x32->64 multiplications. */
Expand All @@ -44,6 +49,11 @@ static SECP256K1_INLINE int64_t secp256k1_mul128(int64_t a, int64_t b, int64_t*
}
#endif

static SECP256K1_INLINE void secp256k1_u128_load(secp256k1_uint128 *r, uint64_t hi, uint64_t lo) {
r->hi = hi;
r->lo = lo;
}

static SECP256K1_INLINE void secp256k1_u128_mul(secp256k1_uint128 *r, uint64_t a, uint64_t b) {
r->lo = secp256k1_umul128(a, b, &r->hi);
}
Expand Down Expand Up @@ -93,6 +103,11 @@ static SECP256K1_INLINE int secp256k1_u128_check_bits(const secp256k1_uint128 *r
: r->hi == 0 && r->lo >> n == 0;
}

static SECP256K1_INLINE void secp256k1_i128_load(secp256k1_int128 *r, int64_t hi, uint64_t lo) {
r->hi = hi;
r->lo = lo;
}

static SECP256K1_INLINE void secp256k1_i128_mul(secp256k1_int128 *r, int64_t a, int64_t b) {
int64_t hi;
r->lo = (uint64_t)secp256k1_mul128(a, b, &hi);
Expand Down

0 comments on commit e40fd27

Please sign in to comment.