Skip to content

Commit

Permalink
variable signing precompute table
Browse files Browse the repository at this point in the history
  • Loading branch information
djb authored and douglasbakkum committed May 24, 2019
1 parent 1a02d6c commit 4ca3222
Show file tree
Hide file tree
Showing 3 changed files with 38 additions and 31 deletions.
23 changes: 15 additions & 8 deletions src/ecmult_gen.h
Expand Up @@ -10,20 +10,27 @@
#include "scalar.h"
#include "group.h"

#ifndef ECMULT_GEN_PREC_BITS
#define ECMULT_GEN_PREC_BITS 4/* 4 bits: 64kB table, fastest; 2 bits: 32kB table, ~75% speed */
#endif
#define ECMULT_GEN_PREC_B ECMULT_GEN_PREC_BITS
#define ECMULT_GEN_PREC_G (1 << ECMULT_GEN_PREC_B)
#define ECMULT_GEN_PREC_N (256 / ECMULT_GEN_PREC_B)

typedef struct {
/* For accelerating the computation of a*G:
* To harden against timing attacks, use the following mechanism:
* * Break up the multiplicand into groups of 4 bits, called n_0, n_1, n_2, ..., n_63.
* * Compute sum(n_i * 16^i * G + U_i, i=0..63), where:
* * U_i = U * 2^i (for i=0..62)
* * U_i = U * (1-2^63) (for i=63)
* where U is a point with no known corresponding scalar. Note that sum(U_i, i=0..63) = 0.
* For each i, and each of the 16 possible values of n_i, (n_i * 16^i * G + U_i) is
* precomputed (call it prec(i, n_i)). The formula now becomes sum(prec(i, n_i), i=0..63).
* * Break up the multiplicand into groups of PREC_B bits, called n_0, n_1, n_2, ..., n_(PREC_N-1).
* * Compute sum(n_i * (PREC_G)^i * G + U_i, i=0 ... PREC_N-1), where:
* * U_i = U * 2^i, for i=0 ... PREC_N-2
* * U_i = U * (1-2^(PREC_N-1)), for i=PREC_N-1
* where U is a point with no known corresponding scalar. Note that sum(U_i, i=0 ... PREC_N-1) = 0.
* For each i, and each of the PREC_G possible values of n_i, (n_i * (PREC_G)^i * G + U_i) is
* precomputed (call it prec(i, n_i)). The formula now becomes sum(prec(i, n_i), i=0 ... PREC_N-1).
* None of the resulting prec group elements have a known scalar, and neither do any of
* the intermediate sums while computing a*G.
*/
secp256k1_ge_storage (*prec)[64][16]; /* prec[j][i] = 16^j * i * G + U_i */
secp256k1_ge_storage (*prec)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G]; /* prec[j][i] = (PREC_G)^j * i * G + U_i */
secp256k1_scalar blind;
secp256k1_gej initial;
} secp256k1_ecmult_gen_context;
Expand Down
36 changes: 18 additions & 18 deletions src/ecmult_gen_impl.h
Expand Up @@ -20,7 +20,7 @@ static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context *ctx)

static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx, const secp256k1_callback* cb) {
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
secp256k1_ge prec[1024];
secp256k1_ge prec[ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G];
secp256k1_gej gj;
secp256k1_gej nums_gej;
int i, j;
Expand All @@ -30,7 +30,7 @@ static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx
return;
}
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
ctx->prec = (secp256k1_ge_storage (*)[64][16])checked_malloc(cb, sizeof(*ctx->prec));
ctx->prec = (secp256k1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])checked_malloc(cb, sizeof(*ctx->prec));

/* get the generator */
secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g);
Expand All @@ -54,39 +54,39 @@ static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx

/* compute prec. */
{
secp256k1_gej precj[1024]; /* Jacobian versions of prec. */
secp256k1_gej precj[ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G]; /* Jacobian versions of prec. */
secp256k1_gej gbase;
secp256k1_gej numsbase;
gbase = gj; /* 16^j * G */
numsbase = nums_gej; /* 2^j * nums. */
for (j = 0; j < 64; j++) {
for (j = 0; j < ECMULT_GEN_PREC_N; j++) {
/* Set precj[j*16 .. j*16+15] to (numsbase, numsbase + gbase, ..., numsbase + 15*gbase). */
precj[j*16] = numsbase;
for (i = 1; i < 16; i++) {
secp256k1_gej_add_var(&precj[j*16 + i], &precj[j*16 + i - 1], &gbase, NULL);
precj[j*ECMULT_GEN_PREC_G] = numsbase;
for (i = 1; i < ECMULT_GEN_PREC_G; i++) {
secp256k1_gej_add_var(&precj[j*ECMULT_GEN_PREC_G + i], &precj[j*ECMULT_GEN_PREC_G + i - 1], &gbase, NULL);
}
/* Multiply gbase by 16. */
for (i = 0; i < 4; i++) {
for (i = 0; i < ECMULT_GEN_PREC_B; i++) {
secp256k1_gej_double_var(&gbase, &gbase, NULL);
}
/* Multiply numbase by 2. */
secp256k1_gej_double_var(&numsbase, &numsbase, NULL);
if (j == 62) {
if (j == ECMULT_GEN_PREC_N - 2) {
/* In the last iteration, numsbase is (1 - 2^j) * nums instead. */
secp256k1_gej_neg(&numsbase, &numsbase);
secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL);
}
}
secp256k1_ge_set_all_gej_var(prec, precj, 1024);
secp256k1_ge_set_all_gej_var(prec, precj, ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G);
}
for (j = 0; j < 64; j++) {
for (i = 0; i < 16; i++) {
secp256k1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*16 + i]);
for (j = 0; j < ECMULT_GEN_PREC_N; j++) {
for (i = 0; i < ECMULT_GEN_PREC_G; i++) {
secp256k1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*ECMULT_GEN_PREC_G + i]);
}
}
#else
(void)cb;
ctx->prec = (secp256k1_ge_storage (*)[64][16])secp256k1_ecmult_static_context;
ctx->prec = (secp256k1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])secp256k1_ecmult_static_context;
#endif
secp256k1_ecmult_gen_blind(ctx, NULL);
}
Expand All @@ -101,7 +101,7 @@ static void secp256k1_ecmult_gen_context_clone(secp256k1_ecmult_gen_context *dst
dst->prec = NULL;
} else {
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
dst->prec = (secp256k1_ge_storage (*)[64][16])checked_malloc(cb, sizeof(*dst->prec));
dst->prec = (secp256k1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])checked_malloc(cb, sizeof(*dst->prec));
memcpy(dst->prec, src->prec, sizeof(*dst->prec));
#else
(void)cb;
Expand Down Expand Up @@ -132,9 +132,9 @@ static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp25
/* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */
secp256k1_scalar_add(&gnb, gn, &ctx->blind);
add.infinity = 0;
for (j = 0; j < 64; j++) {
bits = secp256k1_scalar_get_bits(&gnb, j * 4, 4);
for (i = 0; i < 16; i++) {
for (j = 0; j < ECMULT_GEN_PREC_N; j++) {
bits = secp256k1_scalar_get_bits(&gnb, j * ECMULT_GEN_PREC_B, ECMULT_GEN_PREC_B);
for (i = 0; i < ECMULT_GEN_PREC_G; i++) {
/** This uses a conditional move to avoid any secret data in array indexes.
* _Any_ use of secret indexes has been demonstrated to result in timing
* sidechannels, even when the cache-line access patterns are uniform.
Expand Down
10 changes: 5 additions & 5 deletions src/gen_context.c
Expand Up @@ -43,21 +43,21 @@ int main(int argc, char **argv) {
fprintf(fp, "#define _SECP256K1_ECMULT_STATIC_CONTEXT_\n");
fprintf(fp, "#include \"src/group.h\"\n");
fprintf(fp, "#define SC SECP256K1_GE_STORAGE_CONST\n");
fprintf(fp, "static const secp256k1_ge_storage secp256k1_ecmult_static_context[64][16] = {\n");
fprintf(fp, "static const secp256k1_ge_storage secp256k1_ecmult_static_context[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G] = {\n");

secp256k1_ecmult_gen_context_init(&ctx);
secp256k1_ecmult_gen_context_build(&ctx, &default_error_callback);
for(outer = 0; outer != 64; outer++) {
for(outer = 0; outer != ECMULT_GEN_PREC_N; outer++) {
fprintf(fp,"{\n");
for(inner = 0; inner != 16; inner++) {
for(inner = 0; inner != ECMULT_GEN_PREC_G; inner++) {
fprintf(fp," SC(%uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu)", SECP256K1_GE_STORAGE_CONST_GET((*ctx.prec)[outer][inner]));
if (inner != 15) {
if (inner != ECMULT_GEN_PREC_G - 1) {
fprintf(fp,",\n");
} else {
fprintf(fp,"\n");
}
}
if (outer != 63) {
if (outer != ECMULT_GEN_PREC_N - 1) {
fprintf(fp,"},\n");
} else {
fprintf(fp,"}\n");
Expand Down

0 comments on commit 4ca3222

Please sign in to comment.