Skip to content

Commit

Permalink
Make secp256k1_scalar_b32 detect overflow in scalar_low
Browse files Browse the repository at this point in the history
  • Loading branch information
sipa committed Sep 18, 2020
1 parent c498366 commit 8bcd78c
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 11 deletions.
16 changes: 9 additions & 7 deletions src/modules/recovery/tests_exhaustive_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,18 +38,20 @@ void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1
CHECK(r == expected_r);
CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER ||
(k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER);
/* In computing the recid, there is an overflow condition that is disabled in
* scalar_low_impl.h `secp256k1_scalar_set_b32` because almost every r.y value
* will exceed the group order, and our signing code always holds out for r
* values that don't overflow, so with a proper overflow check the tests would
* loop indefinitely. */
/* The recid's second bit is for conveying overflow (R.x value >= group order).
* In the actual secp256k1 this is an astronomically unlikely event, but in the
* small group used here, it will always be the case.
* Note that this isn't actually useful; full recovery would need to convey
* floor(R.x / group_order), but only one bit is used as that is sufficient
* in the real group. */
expected_recid = 2;
r_dot_y_normalized = group[k].y;
secp256k1_fe_normalize(&r_dot_y_normalized);
/* Also the recovery id is flipped depending if we hit the low-s branch */
if ((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER) {
expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 1 : 0;
expected_recid |= secp256k1_fe_is_odd(&r_dot_y_normalized);
} else {
expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 0 : 1;
expected_recid |= !secp256k1_fe_is_odd(&r_dot_y_normalized);
}
CHECK(recid == expected_recid);

Expand Down
11 changes: 7 additions & 4 deletions src/scalar_low_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,14 +48,17 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
}

static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
const int base = 0x100 % EXHAUSTIVE_TEST_ORDER;
int i;
int over = 0;
*r = 0;
for (i = 0; i < 32; i++) {
*r = ((*r * base) + b32[i]) % EXHAUSTIVE_TEST_ORDER;
*r = (*r * 0x100) + b32[i];
if (*r >= EXHAUSTIVE_TEST_ORDER) {
over = 1;
*r %= EXHAUSTIVE_TEST_ORDER;
}
}
/* just deny overflow, it basically always happens */
if (overflow) *overflow = 0;
if (overflow) *overflow = over;
}

static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
Expand Down

0 comments on commit 8bcd78c

Please sign in to comment.