diff --git a/configure.ac b/configure.ac index cfc287f60ef21..2145a2563f437 100644 --- a/configure.ac +++ b/configure.ac @@ -2055,7 +2055,7 @@ LIBS_TEMP="$LIBS" unset LIBS LIBS="$LIBS_TEMP" -ac_configure_args="${ac_configure_args} --disable-shared --with-pic --enable-benchmark=no --enable-module-recovery --enable-module-schnorrsig" +ac_configure_args="${ac_configure_args} --disable-shared --with-pic --enable-benchmark=no --enable-module-recovery --enable-module-schnorrsig --enable-module-generator --enable-module-commitment --enable-module-bulletproof --enable-module-aggsig" AC_CONFIG_SUBDIRS([src/secp256k1]) AC_OUTPUT diff --git a/src/secp256k1/Makefile.am b/src/secp256k1/Makefile.am index 51c5960301a49..8c1772103dda5 100644 --- a/src/secp256k1/Makefile.am +++ b/src/secp256k1/Makefile.am @@ -228,3 +228,19 @@ endif if ENABLE_MODULE_SCHNORRSIG include src/modules/schnorrsig/Makefile.am.include endif + +if ENABLE_MODULE_GENERATOR +include src/modules/generator/Makefile.am.include +endif + +if ENABLE_MODULE_COMMITMENT +include src/modules/commitment/Makefile.am.include +endif + +if ENABLE_MODULE_BULLETPROOF +include src/modules/bulletproofs/Makefile.am.include +endif + +if ENABLE_MODULE_AGGSIG +include src/modules/aggsig/Makefile.am.include +endif diff --git a/src/secp256k1/configure.ac b/src/secp256k1/configure.ac index 2db59a8ff32e1..2e5a25993d9e8 100644 --- a/src/secp256k1/configure.ac +++ b/src/secp256k1/configure.ac @@ -156,6 +156,22 @@ AC_ARG_ENABLE(module_schnorrsig, AS_HELP_STRING([--enable-module-schnorrsig],[enable schnorrsig module [default=no]]), [], [SECP_SET_DEFAULT([enable_module_schnorrsig], [no], [yes])]) +AC_ARG_ENABLE(module_generator, + AS_HELP_STRING([--enable-module-generator],[enable NUMS generator module [default=no]]), [], + [SECP_SET_DEFAULT([enable_module_generator], [no], [yes])]) + +AC_ARG_ENABLE(module_commitment, + AS_HELP_STRING([--enable-module-commitment],[enable Pedersen commitments module [default=no]]), [], + [SECP_SET_DEFAULT([enable_module_commitment], [no], [yes])]) + +AC_ARG_ENABLE(module_bulletproof, + AS_HELP_STRING([--enable-module-bulletproof],[enable Pedersen / zero-knowledge bulletproofs module [default=no]]), [], + [SECP_SET_DEFAULT([enable_module_bulletproof], [no], [yes])]) + +AC_ARG_ENABLE(module_aggsig, + AS_HELP_STRING([--enable-module-aggsig],[enable Grin aggsig modules[default=no]]), [], + [SECP_SET_DEFAULT([enable_module_aggsig], [no], [yes])]) + AC_ARG_ENABLE(external_default_callbacks, AS_HELP_STRING([--enable-external-default-callbacks],[enable external default callback functions [default=no]]), [], [SECP_SET_DEFAULT([enable_external_default_callbacks], [no], [no])]) @@ -352,6 +368,22 @@ if test x"$enable_module_extrakeys" = x"yes"; then AC_DEFINE(ENABLE_MODULE_EXTRAKEYS, 1, [Define this symbol to enable the extrakeys module]) fi +if test x"$enable_module_generator" = x"yes"; then + AC_DEFINE(ENABLE_MODULE_GENERATOR, 1, [Define this symbol to enable the NUMS generator module]) +fi + +if test x"$enable_module_commitment" = x"yes"; then + AC_DEFINE(ENABLE_MODULE_COMMITMENT, 1, [Define this symbol to enable the Pedersen commitment module]) +fi + +if test x"$enable_module_bulletproof" = x"yes"; then + AC_DEFINE(ENABLE_MODULE_BULLETPROOF, 1, [Define this symbol to enable the Pedersen / zero knowledge bulletproof module]) +fi + +if test x"$enable_module_aggsig" = x"yes"; then + AC_DEFINE(ENABLE_MODULE_AGGSIG, 1, [Define this symbol to enable the Grin Aggsig module]) +fi + if test x"$enable_external_default_callbacks" = x"yes"; then AC_DEFINE(USE_EXTERNAL_DEFAULT_CALLBACKS, 1, [Define this symbol if an external implementation of the default callbacks is used]) fi @@ -391,6 +423,10 @@ AM_CONDITIONAL([ENABLE_MODULE_ECDH], [test x"$enable_module_ecdh" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_RECOVERY], [test x"$enable_module_recovery" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_EXTRAKEYS], [test x"$enable_module_extrakeys" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_SCHNORRSIG], [test x"$enable_module_schnorrsig" = x"yes"]) +AM_CONDITIONAL([ENABLE_MODULE_GENERATOR], [test x"$enable_module_generator" = x"yes"]) +AM_CONDITIONAL([ENABLE_MODULE_COMMITMENT], [test x"$enable_module_commitment" = x"yes"]) +AM_CONDITIONAL([ENABLE_MODULE_BULLETPROOF], [test x"$enable_module_bulletproof" = x"yes"]) +AM_CONDITIONAL([ENABLE_MODULE_AGGSIG], [test x"$enable_module_aggsig" = x"yes"]) AM_CONDITIONAL([USE_EXTERNAL_ASM], [test x"$enable_external_asm" = x"yes"]) AM_CONDITIONAL([USE_ASM_ARM], [test x"$set_asm" = x"arm"]) AM_CONDITIONAL([BUILD_WINDOWS], [test "$build_windows" = "yes"]) @@ -411,6 +447,10 @@ echo " module ecdh = $enable_module_ecdh" echo " module recovery = $enable_module_recovery" echo " module extrakeys = $enable_module_extrakeys" echo " module schnorrsig = $enable_module_schnorrsig" +echo " module generator = $enable_module_generator" +echo " module commitment = $enable_module_commitment" +echo " module bulletproof = $enable_module_bulletproof" +echo " module aggsig = $enable_module_aggsig" echo echo " asm = $set_asm" echo " ecmult window size = $set_ecmult_window" diff --git a/src/secp256k1/include/secp256k1.h b/src/secp256k1/include/secp256k1.h index dddab346ae272..af4adea8e6529 100644 --- a/src/secp256k1/include/secp256k1.h +++ b/src/secp256k1/include/secp256k1.h @@ -810,6 +810,16 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_combine( size_t n ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); +/** Tweak a private key by inverting it. + * Returns: 0 if the input was out of range. 1 otherwise. + * Args: ctx: pointer to a context object (cannot be NULL). + * In/Out: seckey: pointer to a 32-byte private key. + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_tweak_inv( + const secp256k1_context* ctx, + unsigned char *seckey +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); + /** Compute a tagged hash as defined in BIP-340. * * This is useful for creating a message hash and achieving domain separation diff --git a/src/secp256k1/include/secp256k1_aggsig.h b/src/secp256k1/include/secp256k1_aggsig.h new file mode 100644 index 0000000000000..0005b983a582d --- /dev/null +++ b/src/secp256k1/include/secp256k1_aggsig.h @@ -0,0 +1,248 @@ +#ifndef _SECP256K1_AGGSIG_ +# define _SECP256K1_AGGSIG_ + +# include "secp256k1.h" + +# ifdef __cplusplus +extern "C" { +# endif + +/** Opaque data structure that holds context for the aggregated signature state machine + * + * During execution of an aggregated signature this context object will contain secret + * data. It MUST be destroyed by `secp256k1_aggsig_context_destroy` to erase this data + * before freeing it. Context objects are sized based on the number of signatures to + * aggregate, and can be reused for multiple signature runs, provided that each run + * aggregates the same number of signatures. + * + * Destroying and recreating a context object is essentially just deallocating and + * reallocating memory, there is no expensive precomputation as there is with the general + * libsecp256k1 context. + * + * Once a context object is created with `secp256k1_aggsig_context_create` the workflow + * is as follows. + * + * 1. For each index controlled by the user, use `secp256k1_aggsig_generate_nonce` + * to generate a public/private nonce pair for that index. [TODO export the + * public nonce for other users] + * 2. [TODO import others' public nonces] + * 3. For each index controlled by the user, use `secp256k1_aggsig_partial_sign` + * to generate a partial signature that should be distributed to all peers. + */ +typedef struct secp256k1_aggsig_context_struct secp256k1_aggsig_context; + +/** Opaque data structure that holds a partial signature + * + * The exact representation of data inside is implementation defined and not + * guaranteed to be portable between different platforms or versions. It is + * however guaranteed to be 32 bytes in size, and can be safely copied, moved. + * and transmitted as raw bytes. + */ +typedef struct { + unsigned char data[32]; +} secp256k1_aggsig_partial_signature; + + +/** Create an aggregated signature context object with a given size + * + * Returns: a newly created context object. + * Args: ctx: an existing context object (cannot be NULL) + * In: pubkeys: public keys for each signature (cannot be NULL) + * n_pubkeys: number of public keys/signatures to aggregate + * seed: a 32-byte seed to use for the nonce-generating RNG (cannot be NULL) + */ +SECP256K1_API secp256k1_aggsig_context* secp256k1_aggsig_context_create( + const secp256k1_context *ctx, + const secp256k1_pubkey *pubkeys, + size_t n_pubkeys, + const unsigned char *seed +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4) SECP256K1_WARN_UNUSED_RESULT; + + +/** Destroy an aggregated signature context object. If passed NULL, is a no-op. + * + * Args: aggctx: an existing context object + */ +SECP256K1_API void secp256k1_aggsig_context_destroy( + secp256k1_aggsig_context *aggctx +); + +/** Generate a nonce pair for a single signature part in an aggregated signature + * + * Returns: 1 on success + * 0 if a nonce has already been generated for this index + * Args: ctx: an existing context object, initialized for signing (cannot be NULL) + * aggctx: an aggsig context object (cannot be NULL) + * In: index: which signature to generate a nonce for + */ +SECP256K1_API int secp256k1_aggsig_generate_nonce( + const secp256k1_context* ctx, + secp256k1_aggsig_context* aggctx, + size_t index +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_WARN_UNUSED_RESULT; + +/** Generates and exports a secure nonce, of which the public part can be shared + * and fed back for a later signature + * + * Returns: 1 on success + * Args: ctx: an existing context object, initialized for signing (cannot be NULL) + * In: seed: A random seed value + * Out: secnonce32: The secure nonce (scalar), guaranteed to be Jacobi 1 + */ +SECP256K1_API int secp256k1_aggsig_export_secnonce_single( + const secp256k1_context* ctx, + unsigned char* secnonce32, + const unsigned char* seed +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_WARN_UNUSED_RESULT; + +/** Generate a single-signer signature (or partial sig), without a stored context + * + * Returns: 1 on success, 0 on failure + * Args: ctx: an existing context object, initialized for signing (cannot be NULL) + * Out: sig64: the completed signature (cannot be NULL) + * In: msg32: the message to sign (cannot be NULL) + * seckey32: the secret signing key (cannot be NULL) + * secnonce32: secret nonce to use. If NULL, a nonce will be generated + * extra32: if non-NULL, add this key to s + * pubnonce_for_e: If this is non-NULL, encode this value in e instead of the derived + * pubnonce_total: If non-NULL, allow this signature to be included in combined sig + * in all cases by negating secnonce32 if the public nonce total has jacobi symbol + * -1. secnonce32 must also be provided + * pubkey_for_e: If this is non-NULL, encode this value in e + * seed: a 32-byte seed to use for the nonce-generating RNG (cannot be NULL) + */ +SECP256K1_API int secp256k1_aggsig_sign_single( + const secp256k1_context* ctx, + unsigned char *sig64, + const unsigned char *msg32, + const unsigned char *seckey32, + const unsigned char* secnonce32, + const unsigned char* extra32, + const secp256k1_pubkey *pubnonce_for_e, + const secp256k1_pubkey* pubnonce_total, + const secp256k1_pubkey* pubkey_for_e, + const unsigned char* seed) +SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(10) SECP256K1_WARN_UNUSED_RESULT; + +/** Generate a single signature part in an aggregated signature + * + * Returns: 1 on success, 0 on failure + * Args: ctx: an existing context object, initialized for signing (cannot be NULL) + * aggctx: an aggsig context object (cannot be NULL) + * Out: partial: the generated signature part (cannot be NULL) + * In: msg32: the message to sign (cannot be NULL) + * seckey32: the secret signing key (cannot be NULL) + * index: the index of this signature in the aggregate signature + */ +SECP256K1_API int secp256k1_aggsig_partial_sign( + const secp256k1_context* ctx, + secp256k1_aggsig_context* aggctx, + secp256k1_aggsig_partial_signature *partial, + const unsigned char *msg32, + const unsigned char *seckey32, + size_t index +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5) SECP256K1_WARN_UNUSED_RESULT; + + +/** Aggregate multiple signature parts into a single aggregated signature + * + * Returns: 1 on success, 0 on failure + * Args: ctx: an existing context object, initialized for signing (cannot be NULL) + * aggctx: an aggsig context object (cannot be NULL) + * Out: sig64: the completed signature (cannot be NULL) + * In: partial: an array of partial signatures to aggregate (cannot be NULL) + * n_sigs: the number of partial signatures provided + */ +SECP256K1_API int secp256k1_aggsig_combine_signatures( + const secp256k1_context* ctx, + secp256k1_aggsig_context* aggctx, + unsigned char *sig64, + const secp256k1_aggsig_partial_signature *partial, + size_t n_sigs +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_WARN_UNUSED_RESULT; + +/** Simple addition of two signatures + two public nonces into a single signature + * + * Returns: 1 on success, 0 on failure + * Args: ctx: an existing context object, initialized for signing (cannot be NULL) + * Out: sig64: the completed signature (s1+s2,n1+n2) (cannot be NULL) + * In: sig1_64: a signature (from which s1 will2be taken) + * sig2_64: another signature (from which s1 will be taken) + * pubnonce_total: the total of all public nonces, will simple become R (negated if needed) + */ + +SECP256K1_API int secp256k1_aggsig_add_signatures_single( + const secp256k1_context* ctx, + unsigned char *sig64, + const unsigned char** sigs, + size_t num_sigs, + const secp256k1_pubkey* pubnonce_total +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(5) SECP256K1_WARN_UNUSED_RESULT; + + +/** Verify a single-signer signature, without a stored context + * + * Returns: 1 on success, 0 on failure + * Args: ctx: an existing context object, initialized for signing (cannot be NULL) + * In: sig64: signature (cannot be NULL) + * msg32: the message to verify (cannot be NULL) + * pubnonce: if non-NULL, override the public nonce used to calculate e + * pubkey: the public key (cannot be NULL) + * pubkey_total: if non-NULL, encode this value in e + * extra_pubkey: if non-NULL, subtract this pubkey from sG + * is_partial: whether to ignore the jacobi symbol of the combined R, set this to 1 + * to verify partial signatures that may have had their secret nonces negated + */ +SECP256K1_API int secp256k1_aggsig_verify_single( + const secp256k1_context* ctx, + const unsigned char *sig64, + const unsigned char *msg32, + const secp256k1_pubkey *pubnonce, + const secp256k1_pubkey *pubkey, + const secp256k1_pubkey *pubkey_total, + const secp256k1_pubkey *extra_pubkey, + const int is_partial) +SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(5) SECP256K1_WARN_UNUSED_RESULT; + +/** Verify an aggregate signature + * + * Returns: 1 if the signature is valid, 0 if not + * Args: ctx: an existing context object (cannot be NULL) + * scratch: a scratch space (cannot be NULL) + * In: sig64: the signature to verify (cannot be NULL) + * msg32: the message that should be signed (cannot be NULL) + * pubkeys: array of public keys (cannot be NULL) + * n_keys: the number of public keys + */ +SECP256K1_API int secp256k1_aggsig_verify( + const secp256k1_context* ctx, + secp256k1_scratch_space* scratch, + const unsigned char *sig64, + const unsigned char *msg32, + const secp256k1_pubkey *pubkeys, + size_t n_pubkeys +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5) SECP256K1_WARN_UNUSED_RESULT; + +/** Verify an aggregate signature, building scratch space interally beforehand + * + * Returns: 1 if the signature is valid, 0 if not + * Args: ctx: an existing context object (cannot be NULL) + * In: sig64: the signature to verify (cannot be NULL) + * msg32: the message that should be signed (cannot be NULL) + * pubkeys: array of public keys (cannot be NULL) + * n_keys: the number of public keys + */ + +SECP256K1_API int secp256k1_aggsig_build_scratch_and_verify( + const secp256k1_context* ctx, + const unsigned char *sig64, + const unsigned char *msg32, + const secp256k1_pubkey *pubkeys, + size_t n_pubkeys +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_WARN_UNUSED_RESULT; + +# ifdef __cplusplus +} +# endif + +#endif diff --git a/src/secp256k1/include/secp256k1_bulletproofs.h b/src/secp256k1/include/secp256k1_bulletproofs.h new file mode 100644 index 0000000000000..e22baaf461fc4 --- /dev/null +++ b/src/secp256k1/include/secp256k1_bulletproofs.h @@ -0,0 +1,188 @@ +#ifndef _SECP256K1_BULLETPROOF_ +# define _SECP256K1_BULLETPROOF_ + +# include "secp256k1.h" +# include "secp256k1_generator.h" +# include "secp256k1_commitment.h" + +# ifdef __cplusplus +extern "C" { +# endif + +/** Opaque structure representing a large number of NUMS generators */ +typedef struct secp256k1_bulletproof_generators secp256k1_bulletproof_generators; + +/* Maximum depth of 31 lets us validate an aggregate of 2^25 64-bit proofs */ +#define SECP256K1_BULLETPROOF_MAX_DEPTH 31 + +/* Size of a hypothetical 31-depth rangeproof, in bytes */ +#define SECP256K1_BULLETPROOF_MAX_PROOF (160 + 36*32 + 7) + +/** Allocates and initializes a list of NUMS generators, along with precomputation data + * Returns a list of generators, or NULL if allocation failed. + * Args: ctx: pointer to a context object (cannot be NULL) + * In: blinding_gen: generator that blinding factors will be multiplied by (cannot be NULL) + * n: number of NUMS generators to produce + */ +SECP256K1_API secp256k1_bulletproof_generators *secp256k1_bulletproof_generators_create( + const secp256k1_context* ctx, + const secp256k1_generator *blinding_gen, + size_t n +) SECP256K1_ARG_NONNULL(1); + +/** Destroys a list of NUMS generators, freeing allocated memory + * Args: ctx: pointer to a context object (cannot be NULL) + * gen: pointer to the generator set to be destroyed + */ +SECP256K1_API void secp256k1_bulletproof_generators_destroy( + const secp256k1_context* ctx, + secp256k1_bulletproof_generators *gen +) SECP256K1_ARG_NONNULL(1); + +/** Verifies a single bulletproof (aggregate) rangeproof + * Returns: 1: rangeproof was valid + * 0: rangeproof was invalid, or out of memory + * Args: ctx: pointer to a context object initialized for verification (cannot be NULL) + * scratch: scratch space with enough memory for verification (cannot be NULL) + * gens: generator set with at least 2*nbits*n_commits many generators (cannot be NULL) + * In: proof: byte-serialized rangeproof (cannot be NULL) + * plen: length of the proof + * min_value: array of minimum values to prove ranges above, or NULL for all-zeroes + * commit: array of pedersen commitment that this rangeproof is over (cannot be NULL) + * n_commits: number of commitments in the above array (cannot be 0) + * nbits: number of bits proven for each range + * value_gen: generator multiplied by value in pedersen commitments (cannot be NULL) + * extra_commit: additonal data committed to by the rangeproof (may be NULL if `extra_commit_len` is 0) + * extra_commit_len: length of additional data + */ +SECP256K1_WARN_UNUSED_RESULT SECP256K1_API int secp256k1_bulletproof_rangeproof_verify( + const secp256k1_context* ctx, + secp256k1_scratch_space* scratch, + const secp256k1_bulletproof_generators *gens, + const unsigned char* proof, + size_t plen, + const uint64_t* min_value, + const secp256k1_pedersen_commitment* commit, + size_t n_commits, + size_t nbits, + const secp256k1_generator* value_gen, + const unsigned char* extra_commit, + size_t extra_commit_len +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(7) SECP256K1_ARG_NONNULL(10); + +/** Batch-verifies multiple bulletproof (aggregate) rangeproofs of the same size using same generator + * Returns: 1: all rangeproofs were valid + * 0: some rangeproof was invalid, or out of memory + * Args: ctx: pointer to a context object initialized for verification (cannot be NULL) + * scratch: scratch space with enough memory for verification (cannot be NULL) + * gens: generator set with at least 2*nbits*n_commits many generators (cannot be NULL) + * In: proof: array of byte-serialized rangeproofs (cannot be NULL) + * n_proofs: number of proofs in the above array, and number of arrays in the `commit` array + * plen: length of every individual proof + * min_value: array of arrays of minimum values to prove ranges above, or NULL for all-zeroes + * commit: array of arrays of pedersen commitment that the rangeproofs is over (cannot be NULL) + * n_commits: number of commitments in each element of the above array (cannot be 0) + * nbits: number of bits in each proof + * value_gen: generator multiplied by value in pedersen commitments (cannot be NULL) + * extra_commit: additonal data committed to by the rangeproof (may be NULL if `extra_commit_len` is 0) + * extra_commit_len: array of lengths of additional data + */ +SECP256K1_WARN_UNUSED_RESULT SECP256K1_API int secp256k1_bulletproof_rangeproof_verify_multi( + const secp256k1_context* ctx, + secp256k1_scratch_space* scratch, + const secp256k1_bulletproof_generators *gens, + const unsigned char* const* proof, + size_t n_proofs, + size_t plen, + const uint64_t* const* min_value, + const secp256k1_pedersen_commitment* const* commit, + size_t n_commits, + size_t nbits, + const secp256k1_generator* value_gen, + const unsigned char* const* extra_commit, + size_t *extra_commit_len +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(8); + +/** Extracts the value and blinding factor from a single-commit rangeproof given a secret nonce + * Returns: 1: value and blinding factor were extracted and matched the input commit + * 0: one of the above was not true, extraction failed + * Args: ctx: pointer to a context object (cannot be NULL) + * Out: value: pointer to value that will be extracted + * blind: pointer to 32-byte array for blinding factor to be extracted + * In: proof: byte-serialized rangeproof (cannot be NULL) + * plen: length of every individual proof + * min_value: minimum value that the proof ranges over + * commit: pedersen commitment that the rangeproof is over (cannot be NULL) + * value_gen: generator multiplied by value in pedersen commitments (cannot be NULL) + * nonce: random 32-byte seed used to derive blinding factors (cannot be NULL) + * extra_commit: additional data committed to by the rangeproof + * extra_commit_len: length of additional data + * message: optional 20 bytes of message to recover + */ +SECP256K1_WARN_UNUSED_RESULT SECP256K1_API int secp256k1_bulletproof_rangeproof_rewind( + const secp256k1_context* ctx, + uint64_t* value, + unsigned char* blind, + const unsigned char* proof, + size_t plen, + uint64_t min_value, + const secp256k1_pedersen_commitment* commit, + const secp256k1_generator* value_gen, + const unsigned char* nonce, + const unsigned char* extra_commit, + size_t extra_commit_len, + unsigned char* message +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(7) SECP256K1_ARG_NONNULL(8); + +/** Produces an aggregate Bulletproof rangeproof for a set of Pedersen commitments + * Returns: 1: rangeproof was successfully created + * 0: rangeproof could not be created, or out of memory + * Args: ctx: pointer to a context object initialized for signing and verification (cannot be NULL) + * scratch: scratch space with enough memory for verification (cannot be NULL) + * gens: generator set with at least 2*nbits*n_commits many generators (cannot be NULL) + * Out: proof: byte-serialized rangeproof (cannot be NULL) + * In/out: plen: pointer to size of `proof`, to be replaced with actual length of proof (cannot be NULL) + * tau_x: only for multi-party; 32-byte, output in second step or input in final step + * t_one: only for multi-party; public key, output in first step or input for the others + * t_two: only for multi-party; public key, output in first step or input for the others + * In: value: array of values committed by the Pedersen commitments (cannot be NULL) + * min_value: array of minimum values to prove ranges above, or NULL for all-zeroes + * blind: array of blinding factors of the Pedersen commitments (cannot be NULL) + * commits: only for multi-party; array of pointers to commitments + * n_commits: number of entries in the `value` and `blind` arrays + * value_gen: generator multiplied by value in pedersen commitments (cannot be NULL) + * nbits: number of bits proven for each range + * nonce: random 32-byte seed used to derive blinding factors (cannot be NULL) + * private_nonce: only for multi-party; random 32-byte seed used to derive private blinding factors + * extra_commit: additonal data committed to by the rangeproof + * extra_commit_len: length of additional data + * message: optional 20 bytes of message that can be recovered by rewinding with the correct nonce + */ +SECP256K1_WARN_UNUSED_RESULT SECP256K1_API int secp256k1_bulletproof_rangeproof_prove( + const secp256k1_context* ctx, + secp256k1_scratch_space* scratch, + const secp256k1_bulletproof_generators* gens, + unsigned char* proof, + size_t* plen, + unsigned char* tau_x, + secp256k1_pubkey* t_one, + secp256k1_pubkey* t_two, + const uint64_t* value, + const uint64_t* min_value, + const unsigned char* const* blind, + const secp256k1_pedersen_commitment* const* commits, + size_t n_commits, + const secp256k1_generator* value_gen, + size_t nbits, + const unsigned char* nonce, + const unsigned char* private_nonce, + const unsigned char* extra_commit, + size_t extra_commit_len, + const unsigned char* message +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(9) SECP256K1_ARG_NONNULL(11) SECP256K1_ARG_NONNULL(14) SECP256K1_ARG_NONNULL(16); + +# ifdef __cplusplus +} +# endif + +#endif diff --git a/src/secp256k1/include/secp256k1_commitment.h b/src/secp256k1/include/secp256k1_commitment.h new file mode 100644 index 0000000000000..6d534ebf48e3f --- /dev/null +++ b/src/secp256k1/include/secp256k1_commitment.h @@ -0,0 +1,251 @@ +#ifndef _SECP256K1_COMMITMENT_ +# define _SECP256K1_COMMITMENT_ + +# include "secp256k1.h" +# include "secp256k1_generator.h" + +# ifdef __cplusplus +extern "C" { +# endif + +#include + +/** Opaque data structure that stores a Pedersen commitment + * + * The exact representation of data inside is implementation defined and not + * guaranteed to be portable between different platforms or versions. It is + * however guaranteed to be 64 bytes in size, and can be safely copied/moved. + * If you need to convert to a format suitable for storage, transmission, or + * comparison, use secp256k1_pedersen_commitment_serialize and + * secp256k1_pedersen_commitment_parse. + */ +typedef struct { + unsigned char data[64]; +} secp256k1_pedersen_commitment; + +/** + * Static constant generator 'h' maintained for historical reasons. + */ +SECP256K1_API extern const secp256k1_generator *secp256k1_generator_h; + +/** Parse a 33-byte commitment into a commitment object. + * + * Returns: 1 if input contains a valid commitment. + * Args: ctx: a secp256k1 context object. + * Out: commit: pointer to the output commitment object + * In: input: pointer to a 33-byte serialized commitment key + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_pedersen_commitment_parse( + const secp256k1_context* ctx, + secp256k1_pedersen_commitment* commit, + const unsigned char *input +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + +/** Serialize a commitment object into a serialized byte sequence. + * + * Returns: 1 always. + * Args: ctx: a secp256k1 context object. + * Out: output: a pointer to a 33-byte byte array + * In: commit: a pointer to a secp256k1_pedersen_commitment containing an + * initialized commitment + */ +SECP256K1_API int secp256k1_pedersen_commitment_serialize( + const secp256k1_context* ctx, + unsigned char *output, + const secp256k1_pedersen_commitment* commit +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + +/** Initialize a context for usage with Pedersen commitments. */ +void secp256k1_pedersen_context_initialize(secp256k1_context* ctx); + +/** Generate a Pedersen commitment. + * Returns 1: Commitment successfully created. + * 0: Error. The blinding factor is larger than the group order + * (probability for random 32 byte number < 2^-127) or results in the + * point at infinity. Retry with a different factor. + * In: ctx: pointer to a context object (cannot be NULL) + * blind: pointer to a 32-byte blinding factor (cannot be NULL) + * value: unsigned 64-bit integer value to commit to. + * value_gen: value generator 'h' + * blind_gen: blinding factor generator 'g' + * Out: commit: pointer to the commitment (cannot be NULL) + * + * Blinding factors can be generated and verified in the same way as secp256k1 private keys for ECDSA. + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_pedersen_commit( + const secp256k1_context* ctx, + secp256k1_pedersen_commitment *commit, + const unsigned char *blind, + uint64_t value, + const secp256k1_generator *value_gen, + const secp256k1_generator *blind_gen +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(5) SECP256K1_ARG_NONNULL(6); + +/** Generate a Pedersen commitment from two blinding factors. + * Returns 1: Commitment successfully created. + * 0: Error. The blinding factor is larger than the group order + * (probability for random 32 byte number < 2^-127) or results in the + * point at infinity. Retry with a different factor. + * In: ctx: pointer to a context object (cannot be NULL) + * blind: pointer to a 32-byte blinding factor (cannot be NULL) + * value: pointer to a 32-byte blinding factor (cannot be NULL) + * value_gen: value generator 'h' + * blind_gen: blinding factor generator 'g' + * Out: commit: pointer to the commitment (cannot be NULL) + * + * Blinding factors can be generated and verified in the same way as secp256k1 private keys for ECDSA. + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_pedersen_blind_commit( + const secp256k1_context* ctx, + secp256k1_pedersen_commitment *commit, + const unsigned char *blind, + const unsigned char *value, + const secp256k1_generator *value_gen, + const secp256k1_generator *blind_gen +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5) SECP256K1_ARG_NONNULL(6); + +/** Computes the sum of multiple positive and negative blinding factors. + * Returns 1: Sum successfully computed. + * 0: Error. A blinding factor is larger than the group order + * (probability for random 32 byte number < 2^-127). Retry with + * different factors. + * In: ctx: pointer to a context object (cannot be NULL) + * blinds: pointer to pointers to 32-byte character arrays for blinding factors. (cannot be NULL) + * n: number of factors pointed to by blinds. + * npositive: how many of the input factors should be treated with a positive sign. + * Out: blind_out: pointer to a 32-byte array for the sum (cannot be NULL) + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_pedersen_blind_sum( + const secp256k1_context* ctx, + unsigned char *blind_out, + const unsigned char * const *blinds, + size_t n, + size_t npositive +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + +/** Computes the sum of multiple positive and negative pedersen commitments + * Returns 1: sum successfully computed. + * In: ctx: pointer to a context object, initialized for Pedersen commitment (cannot be NULL) + * commits: pointer to array of pointers to the commitments. (cannot be NULL if pcnt is non-zero) + * pcnt: number of commitments pointed to by commits. + * ncommits: pointer to array of pointers to the negative commitments. (cannot be NULL if ncnt is non-zero) + * ncnt: number of commitments pointed to by ncommits. + * Out: commit_out: pointer to the commitment (cannot be NULL) + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_pedersen_commit_sum( + const secp256k1_context* ctx, + secp256k1_pedersen_commitment *commit_out, + const secp256k1_pedersen_commitment * const* commits, + size_t pcnt, + const secp256k1_pedersen_commitment * const* ncommits, + size_t ncnt +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); + +/** Verify a tally of Pedersen commitments + * Returns 1: commitments successfully sum to zero. + * 0: Commitments do not sum to zero or other error. + * In: ctx: pointer to a context object (cannot be NULL) + * pos: pointer to array of pointers to the commitments. (cannot be NULL if `n_pos` is non-zero) + * n_pos: number of commitments pointed to by `pos`. + * neg: pointer to array of pointers to the negative commitments. (cannot be NULL if `n_neg` is non-zero) + * n_neg: number of commitments pointed to by `neg`. + * + * This computes sum(pos[0..n_pos)) - sum(neg[0..n_neg)) == 0. + * + * A Pedersen commitment is xG + vA where G and A are generators for the secp256k1 group and x is a blinding factor, + * while v is the committed value. For a collection of commitments to sum to zero, for each distinct generator + * A all blinding factors and all values must sum to zero. + * + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_pedersen_verify_tally( + const secp256k1_context* ctx, + const secp256k1_pedersen_commitment * const* pos, + size_t n_pos, + const secp256k1_pedersen_commitment * const* neg, + size_t n_neg +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4); + +/** Sets the final Pedersen blinding factor correctly when the generators themselves + * have blinding factors. + * + * Consider a generator of the form A' = A + rG, where A is the "real" generator + * but A' is the generator provided to verifiers. Then a Pedersen commitment + * P = vA' + r'G really has the form vA + (vr + r')G. To get all these (vr + r') + * to sum to zero for multiple commitments, we take three arrays consisting of + * the `v`s, `r`s, and `r'`s, respectively called `value`s, `generator_blind`s + * and `blinding_factor`s, and sum them. + * + * The function then subtracts the sum of all (vr + r') from the last element + * of the `blinding_factor` array, setting the total sum to zero. + * + * Returns 1: Blinding factor successfully computed. + * 0: Error. A blinding_factor or generator_blind are larger than the group + * order (probability for random 32 byte number < 2^-127). Retry with + * different values. + * + * In: ctx: pointer to a context object + * value: array of asset values, `v` in the above paragraph. + * May not be NULL unless `n_total` is 0. + * generator_blind: array of asset blinding factors, `r` in the above paragraph + * May not be NULL unless `n_total` is 0. + * n_total: Total size of the above arrays + * n_inputs: How many of the initial array elements represent commitments that + * will be negated in the final sum + * In/Out: blinding_factor: array of commitment blinding factors, `r'` in the above paragraph + * May not be NULL unless `n_total` is 0. + * the last value will be modified to get the total sum to zero. + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_pedersen_blind_generator_blind_sum( + const secp256k1_context* ctx, + const uint64_t *value, + const unsigned char* const* generator_blind, + unsigned char* const* blinding_factor, + size_t n_total, + size_t n_inputs +); + +/** Calculates the blinding factor x' = x + SHA256(xG+vH | xJ), used in the switch commitment x'G+vH + * + * Returns 1: Blinding factor successfully computed. + * 0: Error. Retry with different values. + * + * Args: ctx: pointer to a context object + * Out: blind_switch: blinding factor for the switch commitment + * In: blind: pointer to a 32-byte blinding factor + * value: unsigned 64-bit integer value to commit to + * value_gen: value generator 'h' + * blind_gen: blinding factor generator 'g' + * switch_pubkey: pointer to public key 'j' + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_blind_switch( + const secp256k1_context* ctx, + unsigned char* blind_switch, + const unsigned char* blind, + uint64_t value, + const secp256k1_generator* value_gen, + const secp256k1_generator* blind_gen, + const secp256k1_pubkey* switch_pubkey +); + +/** Converts a pedersent commit to a pubkey + * + * Returns 1: Public key succesfully computed. + * 0: Error. +* + * In: ctx: pointer to a context object + * commit: pointer to a single commit + * Out: pubkey: resulting pubkey + * + */ + +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_pedersen_commitment_to_pubkey( + const secp256k1_context* ctx, + secp256k1_pubkey* pubkey, + const secp256k1_pedersen_commitment* commit +); + +# ifdef __cplusplus +} +# endif + +#endif diff --git a/src/secp256k1/include/secp256k1_generator.h b/src/secp256k1/include/secp256k1_generator.h new file mode 100644 index 0000000000000..9789250a2d49f --- /dev/null +++ b/src/secp256k1/include/secp256k1_generator.h @@ -0,0 +1,99 @@ +#ifndef _SECP256K1_GENERATOR_ +# define _SECP256K1_GENERATOR_ + +# include "secp256k1.h" + +# ifdef __cplusplus +extern "C" { +# endif + +#include + +/** Opaque data structure that stores a base point + * + * The exact representation of data inside is implementation defined and not + * guaranteed to be portable between different platforms or versions. It is + * however guaranteed to be 64 bytes in size, and can be safely copied/moved. + * If you need to convert to a format suitable for storage, transmission, or + * comparison, use secp256k1_generator_serialize and secp256k1_generator_parse. + */ +typedef struct { + unsigned char data[64]; +} secp256k1_generator; + +/** Standard secp256k1 generator G */ +SECP256K1_API extern const secp256k1_generator secp256k1_generator_const_g; + +/** Alternate secp256k1 generator from Elements Alpha */ +SECP256K1_API extern const secp256k1_generator secp256k1_generator_const_h; + +/** Parse a 33-byte generator byte sequence into a generator object. + * + * Returns: 1 if input contains a valid generator. + * Args: ctx: a secp256k1 context object. + * Out: commit: pointer to the output generator object + * In: input: pointer to a 33-byte serialized generator + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_generator_parse( + const secp256k1_context* ctx, + secp256k1_generator* commit, + const unsigned char *input +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + +/** Serialize a 33-byte generator into a serialized byte sequence. + * + * Returns: 1 always. + * Args: ctx: a secp256k1 context object. + * Out: output: a pointer to a 33-byte byte array + * In: commit: a pointer to a generator + */ +SECP256K1_API int secp256k1_generator_serialize( + const secp256k1_context* ctx, + unsigned char *output, + const secp256k1_generator* commit +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + +/** Generate a generator for the curve. + * + * Returns: 0 in the highly unlikely case the seed is not acceptable, + * 1 otherwise. + * Args: ctx: a secp256k1 context object + * Out: gen: a generator object + * In: seed32: a 32-byte seed + * + * If successful a valid generator will be placed in gen. The produced + * generators are distributed uniformly over the curve, and will not have a + * known discrete logarithm with respect to any other generator produced, + * or to the base generator G. + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_generator_generate( + const secp256k1_context* ctx, + secp256k1_generator* gen, + const unsigned char *seed32 +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + +/** Generate a blinded generator for the curve. + * + * Returns: 0 in the highly unlikely case the seed is not acceptable or when + * blind is out of range. 1 otherwise. + * Args: ctx: a secp256k1 context object, initialized for signing + * Out: gen: a generator object + * In: seed32: a 32-byte seed + * blind32: a 32-byte secret value to blind the generator with. + * + * The result is equivalent to first calling secp256k1_generator_generate, + * converting the result to a public key, calling secp256k1_ec_pubkey_tweak_add, + * and then converting back to generator form. + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_generator_generate_blinded( + const secp256k1_context* ctx, + secp256k1_generator* gen, + const unsigned char *key32, + const unsigned char *blind32 +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); + +# ifdef __cplusplus +} +# endif + +#endif diff --git a/src/secp256k1/include/secp256k1_schnorrsig.h b/src/secp256k1/include/secp256k1_schnorrsig.h index 5fedcb07b0519..d95bcc00494b1 100644 --- a/src/secp256k1/include/secp256k1_schnorrsig.h +++ b/src/secp256k1/include/secp256k1_schnorrsig.h @@ -175,6 +175,28 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorrsig_verify( const secp256k1_xonly_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(5); +/** Verifies a set of Schnorr signatures. + * + * Returns 1 if all succeeded, 0 otherwise. In particular, returns 1 if n_sigs is 0. + * + * Args: ctx: a secp256k1 context object, initialized for verification. + * scratch: scratch space used for the multiexponentiation + * In: sig: array of signatures, or NULL if there are no signatures + * msg32: array of messages, or NULL if there are no signatures + * pk: array of public keys, or NULL if there are no signatures + * n_sigs: number of signatures in above arrays. Must be smaller than + * 2^31 and smaller than half the maximum size_t value. Must be 0 + * if above arrays are NULL. + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorrsig_verify_batch( + const secp256k1_context* ctx, + secp256k1_scratch_space* scratch, + const uint8_t* const* sig, + const unsigned char* const* msg32, + const secp256k1_pubkey* const* pk, + size_t n_sigs +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); + #ifdef __cplusplus } #endif diff --git a/src/secp256k1/src/field.h b/src/secp256k1/src/field.h index 2584a494eeb09..ab661750b1bd0 100644 --- a/src/secp256k1/src/field.h +++ b/src/secp256k1/src/field.h @@ -111,6 +111,9 @@ static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a); * itself. */ static int secp256k1_fe_sqrt(secp256k1_fe *r, const secp256k1_fe *a); +/** Checks whether a field element is a quadratic residue. */ +static int secp256k1_fe_is_quad_var(const secp256k1_fe *a); + /** Sets a field element to be the (modular) inverse of another. Requires the input's magnitude to be * at most 8. The output magnitude is 1 (but not guaranteed to be normalized). */ static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a); diff --git a/src/secp256k1/src/field_impl.h b/src/secp256k1/src/field_impl.h index 0a4a04d9ac2a9..f1bdffd397086 100644 --- a/src/secp256k1/src/field_impl.h +++ b/src/secp256k1/src/field_impl.h @@ -135,4 +135,9 @@ static int secp256k1_fe_sqrt(secp256k1_fe *r, const secp256k1_fe *a) { return secp256k1_fe_equal(&t1, a); } +static int secp256k1_fe_is_quad_var(const secp256k1_fe *a) { + secp256k1_fe r; + return secp256k1_fe_sqrt(&r, a); +} + #endif /* SECP256K1_FIELD_IMPL_H */ diff --git a/src/secp256k1/src/group.h b/src/secp256k1/src/group.h index bb7dae1cf781e..8a6adfe97f562 100644 --- a/src/secp256k1/src/group.h +++ b/src/secp256k1/src/group.h @@ -47,6 +47,12 @@ typedef struct { /** Set a group element equal to the point with given X and Y coordinates */ static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y); +/** Set a group element (affine) equal to the point with the given X coordinate + * and a Y coordinate that is a quadratic residue modulo p. The return value + * is true iff a coordinate with the given X coordinate exists. + */ +static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x); + /** Set a group element (affine) equal to the point with the given X coordinate, and given oddness * for Y. Return value indicates whether the result is valid. */ static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd); @@ -106,6 +112,9 @@ static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a); /** Check whether a group element is the point at infinity. */ static int secp256k1_gej_is_infinity(const secp256k1_gej *a); +/** Check whether a group element's y coordinate is a quadratic residue. */ +static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a); + /** Set r equal to the double of a. Constant time. */ static void secp256k1_gej_double(secp256k1_gej *r, const secp256k1_gej *a); diff --git a/src/secp256k1/src/group_impl.h b/src/secp256k1/src/group_impl.h index 63735ab682ace..75c7f1f6fdb63 100644 --- a/src/secp256k1/src/group_impl.h +++ b/src/secp256k1/src/group_impl.h @@ -48,6 +48,8 @@ static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST( 0x3d3486b2, 0x159a9ca5, 0xc75638be, 0xb23a69bc, 0x946a45ab, 0x24801247, 0xb4ed2b8e, 0x26b6a417 ); + +const int CURVE_B = 2; # elif EXHAUSTIVE_TEST_ORDER == 199 static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_G_ORDER_199; @@ -55,6 +57,8 @@ static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST( 0x2cca28fa, 0xfc614b80, 0x2a3db42b, 0x00ba00b1, 0xbea8d943, 0xdace9ab2, 0x9536daea, 0x0074defb ); + +const int CURVE_B = 4; # else # error No known generator for the specified exhaustive test group order. # endif @@ -62,6 +66,8 @@ static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST( static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_G; static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 7); + +const int CURVE_B = 7; #endif static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) { @@ -211,6 +217,17 @@ static void secp256k1_ge_clear(secp256k1_ge *r) { secp256k1_fe_clear(&r->y); } +static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x) { + secp256k1_fe x2, x3, c; + r->x = *x; + secp256k1_fe_sqr(&x2, x); + secp256k1_fe_mul(&x3, x, &x2); + r->infinity = 0; + secp256k1_fe_set_int(&c, CURVE_B); + secp256k1_fe_add(&c, &x3); + return secp256k1_fe_sqrt(&r->y, &c); +} + static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd) { secp256k1_fe x2, x3; r->x = *x; @@ -674,6 +691,20 @@ static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) { secp256k1_fe_mul(&r->x, &r->x, &secp256k1_const_beta); } +static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) { + secp256k1_fe yz; + + if (a->infinity) { + return 0; + } + + /* We rely on the fact that the Jacobi symbol of 1 / a->z^3 is the same as + * that of a->z. Thus a->y / a->z^3 is a quadratic residue iff a->y * a->z + is */ + secp256k1_fe_mul(&yz, &a->y, &a->z); + return secp256k1_fe_is_quad_var(&yz); +} + static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge) { #ifdef EXHAUSTIVE_TEST_ORDER secp256k1_gej out; diff --git a/src/secp256k1/src/modules/aggsig/Makefile.am.include b/src/secp256k1/src/modules/aggsig/Makefile.am.include new file mode 100644 index 0000000000000..e00bd6a505c9a --- /dev/null +++ b/src/secp256k1/src/modules/aggsig/Makefile.am.include @@ -0,0 +1,8 @@ +include_HEADERS += include/secp256k1_aggsig.h +noinst_HEADERS += src/modules/aggsig/main_impl.h +noinst_HEADERS += src/modules/aggsig/tests_impl.h +if USE_BENCHMARK +noinst_PROGRAMS += bench_aggsig +bench_aggsig_SOURCES = src/bench_aggsig.c +bench_aggsig_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB) +endif diff --git a/src/secp256k1/src/modules/aggsig/main_impl.h b/src/secp256k1/src/modules/aggsig/main_impl.h new file mode 100644 index 0000000000000..036e03601100d --- /dev/null +++ b/src/secp256k1/src/modules/aggsig/main_impl.h @@ -0,0 +1,614 @@ +/********************************************************************** + * Copyright (c) 2017 Andrew Poelstra, Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef _SECP256K1_MODULE_AGGSIG_MAIN_ +#define _SECP256K1_MODULE_AGGSIG_MAIN_ + +#include "include/secp256k1.h" +#include "include/secp256k1_aggsig.h" +#include "hash.h" + +enum nonce_progress { + /* Nonce has not been generated by us or recevied from another party */ + NONCE_PROGRESS_UNKNOWN = 0, + /* Public nonce has been recevied from another party */ + NONCE_PROGRESS_OTHER = 1, + /* Public nonce has been generated by us but not used in signing. */ + NONCE_PROGRESS_OURS = 2, + /* Public nonce has been generated by us and used in signing. An attempt to + * use a nonce twice will result in an error. */ + NONCE_PROGRESS_SIGNED = 3 +}; + +struct secp256k1_aggsig_context_struct { + enum nonce_progress *progress; + secp256k1_pubkey *pubkeys; + secp256k1_scalar *secnonce; + secp256k1_gej pubnonce_sum; + size_t n_sigs; + secp256k1_rfc6979_hmac_sha256 rng; +}; + +/* Compute sighash for a single-signer */ +static int secp256k1_compute_sighash_single(const secp256k1_context *ctx, secp256k1_scalar *r, const secp256k1_pubkey *pubnonce, const secp256k1_pubkey *pubkey, const unsigned char *msghash32) { + unsigned char output[32]; + unsigned char buf[33]; + size_t buflen = sizeof(buf); + int overflow; + + secp256k1_sha256 hasher; + secp256k1_sha256_initialize(&hasher); + + /* Encode public nonce */ + CHECK(secp256k1_ec_pubkey_serialize(ctx, buf, &buflen, pubnonce, SECP256K1_EC_COMPRESSED)); + secp256k1_sha256_write(&hasher, buf+1, 32); + + /* Encode public key */ + if (pubkey != NULL) { + buflen = sizeof(buf); + CHECK(secp256k1_ec_pubkey_serialize(ctx, buf, &buflen, pubkey, SECP256K1_EC_COMPRESSED)); + secp256k1_sha256_write(&hasher, buf, 33); + } + + /* Encode message */ + secp256k1_sha256_write(&hasher, msghash32, 32); + + /* Finish */ + secp256k1_sha256_finalize(&hasher, output); + secp256k1_scalar_set_b32(r, output, &overflow); + return !overflow; +} + +/* Compute the hash of all the data that every pubkey needs to sign */ +static void secp256k1_compute_prehash(const secp256k1_context *ctx, unsigned char *output, const secp256k1_pubkey *pubkeys, size_t n_pubkeys, const secp256k1_fe *nonce_ge_x, const unsigned char *msghash32) { + size_t i; + unsigned char buf[33]; + size_t buflen = sizeof(buf); + secp256k1_sha256 hasher; + secp256k1_sha256_initialize(&hasher); + + /* Encode nonce */ + secp256k1_fe_get_b32(buf, nonce_ge_x); + secp256k1_sha256_write(&hasher, buf, 32); + + /* Encode pubkeys */ + for (i = 0; i < n_pubkeys; i++) { + CHECK(secp256k1_ec_pubkey_serialize(ctx, buf, &buflen, &pubkeys[i], SECP256K1_EC_COMPRESSED)); + secp256k1_sha256_write(&hasher, buf, sizeof(buf)); + } + + /* Encode message */ + secp256k1_sha256_write(&hasher, msghash32, 32); + + /* Finish */ + secp256k1_sha256_finalize(&hasher, output); +} + +/* Add the index to the above hash to customize it for each pubkey */ +static int secp256k1_compute_sighash(secp256k1_scalar *r, const unsigned char *prehash, size_t index) { + unsigned char output[32]; + int overflow; + secp256k1_sha256 hasher; + secp256k1_sha256_initialize(&hasher); + /* Encode index as a UTF8-style bignum */ + while (index > 0) { + unsigned char ch = index & 0x7f; + secp256k1_sha256_write(&hasher, &ch, 1); + index >>= 7; + } + secp256k1_sha256_write(&hasher, prehash, 32); + secp256k1_sha256_finalize(&hasher, output); + secp256k1_scalar_set_b32(r, output, &overflow); + return !overflow; +} + +secp256k1_aggsig_context* secp256k1_aggsig_context_create(const secp256k1_context *ctx, const secp256k1_pubkey *pubkeys, size_t n_pubkeys, const unsigned char *seed) { + secp256k1_aggsig_context* aggctx; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(pubkeys != NULL); + ARG_CHECK(seed != NULL); + + aggctx = (secp256k1_aggsig_context*)checked_malloc(&ctx->error_callback, sizeof(*aggctx)); + aggctx->progress = (enum nonce_progress*)checked_malloc(&ctx->error_callback, n_pubkeys * sizeof(*aggctx->progress)); + aggctx->pubkeys = (secp256k1_pubkey*)checked_malloc(&ctx->error_callback, n_pubkeys * sizeof(*aggctx->pubkeys)); + aggctx->secnonce = (secp256k1_scalar*)checked_malloc(&ctx->error_callback, n_pubkeys * sizeof(*aggctx->secnonce)); + aggctx->n_sigs = n_pubkeys; + secp256k1_gej_set_infinity(&aggctx->pubnonce_sum); + memcpy(aggctx->pubkeys, pubkeys, n_pubkeys * sizeof(*aggctx->pubkeys)); + memset(aggctx->progress, 0, n_pubkeys * sizeof(*aggctx->progress)); + secp256k1_rfc6979_hmac_sha256_initialize(&aggctx->rng, seed, 32); + return aggctx; +} + +int secp256k1_aggsig_generate_nonce_single(const secp256k1_context* ctx, secp256k1_scalar *secnonce, secp256k1_gej* pubnonce, secp256k1_rfc6979_hmac_sha256* rng) { + int retry; + unsigned char data[32]; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(secnonce != NULL); + ARG_CHECK(pubnonce != NULL); + ARG_CHECK(rng != NULL); + + /* generate nonce from the RNG */ + do { + secp256k1_rfc6979_hmac_sha256_generate(rng, data, 32); + secp256k1_scalar_set_b32(secnonce, data, &retry); + retry = secp256k1_scalar_is_zero(secnonce); + } while (retry); /* This branch true is cryptographically unreachable. Requires sha256_hmac output > Fp. */ + secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, pubnonce, secnonce); + memset(data, 0, 32); /* TODO proper clear */ + /* Negate nonce if needed to get y to be a quadratic residue */ + if (!secp256k1_gej_has_quad_y_var(pubnonce)) { + secp256k1_scalar_negate(secnonce, secnonce); + secp256k1_gej_neg(pubnonce, pubnonce); + } + return 1; +} + +int secp256k1_aggsig_export_secnonce_single(const secp256k1_context* ctx, unsigned char* secnonce32, const unsigned char* seed) { + secp256k1_scalar secnonce; + secp256k1_gej pubnonce; + secp256k1_rfc6979_hmac_sha256 rng; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(secnonce32 != NULL); + secp256k1_rfc6979_hmac_sha256_initialize(&rng, seed, 32); + + if (secp256k1_aggsig_generate_nonce_single(ctx, &secnonce, &pubnonce, &rng) == 0){ + return 0; + } + + secp256k1_scalar_get_b32(secnonce32, &secnonce); + return 1; +} + +/* TODO extend this to export the nonce if the user wants */ +int secp256k1_aggsig_generate_nonce(const secp256k1_context* ctx, secp256k1_aggsig_context* aggctx, size_t index) { + secp256k1_gej pubnon; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(aggctx != NULL); + ARG_CHECK(index < aggctx->n_sigs); + + if (aggctx->progress[index] != NONCE_PROGRESS_UNKNOWN) { + return 0; + } + if (secp256k1_aggsig_generate_nonce_single(ctx, &aggctx->secnonce[index], &pubnon, &aggctx->rng) == 0){ + return 0; + } + + secp256k1_gej_add_var(&aggctx->pubnonce_sum, &aggctx->pubnonce_sum, &pubnon, NULL); + aggctx->progress[index] = NONCE_PROGRESS_OURS; + return 1; +} + +int secp256k1_aggsig_sign_single(const secp256k1_context* ctx, + unsigned char *sig64, + const unsigned char *msg32, + const unsigned char *seckey32, + const unsigned char* secnonce32, + const unsigned char* extra32, + const secp256k1_pubkey* pubnonce_for_e, + const secp256k1_pubkey* pubnonce_total, + const secp256k1_pubkey* pubkey_for_e, + const unsigned char* seed){ + + secp256k1_scalar sighash; + secp256k1_rfc6979_hmac_sha256 rng; + secp256k1_scalar sec; + secp256k1_ge tmp_ge; + secp256k1_ge total_tmp_ge; + secp256k1_gej pubnonce_j; + secp256k1_gej pubnonce_total_j; + secp256k1_pubkey pub_tmp; + + secp256k1_scalar secnonce; + secp256k1_ge final; + int overflow; + int retry; + secp256k1_scalar tmp_scalar; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(sig64 != NULL); + ARG_CHECK(msg32 != NULL); + ARG_CHECK(seckey32 != NULL); + ARG_CHECK(seed != NULL); + + /* generate nonce if needed */ + if (secnonce32==NULL){ + secp256k1_rfc6979_hmac_sha256_initialize(&rng, seed, 32); + if (secp256k1_aggsig_generate_nonce_single(ctx, &secnonce, &pubnonce_j, &rng) == 0){ + return 0; + } + secp256k1_rfc6979_hmac_sha256_finalize(&rng); + secp256k1_ge_set_gej(&tmp_ge, &pubnonce_j); + } else { + secp256k1_scalar_set_b32(&secnonce, secnonce32, &retry); + secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubnonce_j, &secnonce); + secp256k1_ge_set_gej(&tmp_ge, &pubnonce_j); + + if (pubnonce_total!=NULL) { + secp256k1_gej_set_infinity(&pubnonce_total_j); + secp256k1_pubkey_load(ctx, &total_tmp_ge, pubnonce_total); + secp256k1_gej_add_ge(&pubnonce_total_j, &pubnonce_total_j, &total_tmp_ge); + if (!secp256k1_gej_has_quad_y_var(&pubnonce_total_j)) { + secp256k1_scalar_negate(&secnonce, &secnonce); + } + } else { + if (!secp256k1_gej_has_quad_y_var(&pubnonce_j)) { + secp256k1_scalar_negate(&secnonce, &secnonce); + secp256k1_gej_neg(&pubnonce_j, &pubnonce_j); + secp256k1_ge_neg(&tmp_ge, &tmp_ge); + } + } + } + + secp256k1_fe_normalize(&tmp_ge.x); + + /* compute signature hash (in the simple case just message+pubnonce+pubkey) */ + if (pubnonce_for_e != NULL) { + secp256k1_compute_sighash_single(ctx, &sighash, pubnonce_for_e, pubkey_for_e, msg32); + } else { + secp256k1_pubkey_save(&pub_tmp, &tmp_ge); + secp256k1_compute_sighash_single(ctx, &sighash, &pub_tmp, pubkey_for_e, msg32); + } + /* calculate signature */ + secp256k1_scalar_set_b32(&sec, seckey32, &overflow); + if (overflow) { + secp256k1_scalar_clear(&sec); + return 0; + } + + secp256k1_scalar_mul(&sec, &sec, &sighash); + secp256k1_scalar_add(&sec, &sec, &secnonce); + + if (extra32 != NULL) { + /* add extra scalar */ + secp256k1_scalar_set_b32(&tmp_scalar, extra32, &overflow); + if (overflow) { + secp256k1_scalar_clear(&sec); + return 0; + } + secp256k1_scalar_add(&sec, &sec, &tmp_scalar); + } + + /* finalize */ + secp256k1_ge_set_gej(&final, &pubnonce_j); + secp256k1_fe_normalize_var(&final.x); + secp256k1_fe_get_b32(sig64, &final.x); + secp256k1_scalar_get_b32(sig64 + 32, &sec); + + secp256k1_scalar_clear(&sec); + + return 1; +} + +int secp256k1_aggsig_partial_sign(const secp256k1_context* ctx, secp256k1_aggsig_context* aggctx, secp256k1_aggsig_partial_signature *partial, const unsigned char *msghash32, const unsigned char *seckey32, size_t index) { + size_t i; + secp256k1_scalar sighash; + secp256k1_scalar sec; + secp256k1_ge tmp_ge; + int overflow; + unsigned char prehash[32]; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(aggctx != NULL); + ARG_CHECK(partial != NULL); + ARG_CHECK(msghash32 != NULL); + ARG_CHECK(seckey32 != NULL); + ARG_CHECK(index < aggctx->n_sigs); + + /* check state machine */ + for (i = 0; i < aggctx->n_sigs; i++) { + if (aggctx->progress[i] == NONCE_PROGRESS_UNKNOWN) { + return 0; + } + } + if (aggctx->progress[index] != NONCE_PROGRESS_OURS) { + return 0; + } + + /* sign */ + /* If the total public nonce has wrong sign, negate our + * secret nonce. Everyone will negate the public one + * at combine time. */ + secp256k1_ge_set_gej(&tmp_ge, &aggctx->pubnonce_sum); /* TODO cache this */ + if (!secp256k1_gej_has_quad_y_var(&aggctx->pubnonce_sum)) { + secp256k1_scalar_negate(&aggctx->secnonce[index], &aggctx->secnonce[index]); + secp256k1_ge_neg(&tmp_ge, &tmp_ge); + } + secp256k1_fe_normalize(&tmp_ge.x); + secp256k1_compute_prehash(ctx, prehash, aggctx->pubkeys, aggctx->n_sigs, &tmp_ge.x, msghash32); + if (secp256k1_compute_sighash(&sighash, prehash, index) == 0) { + return 0; + } + secp256k1_scalar_set_b32(&sec, seckey32, &overflow); + if (overflow) { + secp256k1_scalar_clear(&sec); + return 0; + } + secp256k1_scalar_mul(&sec, &sec, &sighash); + secp256k1_scalar_add(&sec, &sec, &aggctx->secnonce[index]); + + /* finalize */ + secp256k1_scalar_get_b32(partial->data, &sec); + secp256k1_scalar_clear(&sec); + aggctx->progress[index] = NONCE_PROGRESS_SIGNED; + return 1; +} + +int secp256k1_aggsig_combine_signatures(const secp256k1_context* ctx, secp256k1_aggsig_context* aggctx, unsigned char *sig64, const secp256k1_aggsig_partial_signature *partial, size_t n_sigs) { + size_t i; + secp256k1_scalar s; + secp256k1_ge final; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(aggctx != NULL); + ARG_CHECK(sig64 != NULL); + ARG_CHECK(partial != NULL); + (void) ctx; + + if (n_sigs != aggctx->n_sigs) { + return 0; + } + + secp256k1_scalar_set_int(&s, 0); + for (i = 0; i < n_sigs; i++) { + secp256k1_scalar tmp; + int overflow; + secp256k1_scalar_set_b32(&tmp, partial[i].data, &overflow); + if (overflow) { + return 0; + } + secp256k1_scalar_add(&s, &s, &tmp); + } + + /* If we need to negate the public nonce, everyone will + * have negated their secret nonces in the previous step. */ + if (!secp256k1_gej_has_quad_y_var(&aggctx->pubnonce_sum)) { + secp256k1_gej_neg(&aggctx->pubnonce_sum, &aggctx->pubnonce_sum); + } + + secp256k1_ge_set_gej(&final, &aggctx->pubnonce_sum); + secp256k1_fe_normalize_var(&final.x); + secp256k1_fe_get_b32(sig64, &final.x); + secp256k1_scalar_get_b32(sig64 + 32, &s); + return 1; +} + +int secp256k1_aggsig_add_signatures_single(const secp256k1_context* ctx, + unsigned char *sig64, + const unsigned char** sigs, + size_t num_sigs, + const secp256k1_pubkey* pubnonce_total) { + + secp256k1_scalar s; + secp256k1_ge final; + secp256k1_scalar tmp; + secp256k1_ge noncesum_pt; + secp256k1_gej pubnonce_total_j; + size_t i; + int overflow; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(sig64 != NULL); + ARG_CHECK(sigs != NULL); + for (i=0;iprehash, idx) == 0) { + return 0; + } + secp256k1_scalar_negate(sc, sc); + secp256k1_pubkey_load(cbdata->ctx, pt, &cbdata->pubkeys[idx]); + return 1; +} + +int secp256k1_aggsig_verify(const secp256k1_context* ctx, secp256k1_scratch_space *scratch, const unsigned char *sig64, const unsigned char *msg32, const secp256k1_pubkey *pubkeys, size_t n_pubkeys) { + secp256k1_scalar g_sc; + secp256k1_gej pk_sum; + secp256k1_ge pk_sum_ge; + secp256k1_fe r_x; + int overflow; + secp256k1_verify_callback_data cbdata; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(scratch != NULL); + ARG_CHECK(sig64 != NULL); + ARG_CHECK(msg32 != NULL); + ARG_CHECK(pubkeys != NULL); + (void) ctx; + + if (n_pubkeys == 0) { + return 0; + } + + /* extract R */ + if (!secp256k1_fe_set_b32(&r_x, sig64)) { + return 0; + } + + /* extract s */ + secp256k1_scalar_set_b32(&g_sc, sig64 + 32, &overflow); + if (overflow) { + return 0; + } + + /* Populate callback data */ + cbdata.ctx = ctx; + cbdata.pubkeys = pubkeys; + secp256k1_compute_prehash(ctx, cbdata.prehash, pubkeys, n_pubkeys, &r_x, msg32); + + /* Compute sum sG - e_i*P_i, which should be R */ + if (!secp256k1_ecmult_multi_var(&ctx->error_callback, scratch, &pk_sum, &g_sc, secp256k1_aggsig_verify_callback, &cbdata, n_pubkeys)) { + return 0; + } + + /* Check sum */ + secp256k1_ge_set_gej(&pk_sum_ge, &pk_sum); + return secp256k1_fe_equal_var(&r_x, &pk_sum_ge.x) && + secp256k1_gej_has_quad_y_var(&pk_sum); +} + +int secp256k1_aggsig_build_scratch_and_verify(const secp256k1_context* ctx, + const unsigned char *sig64, + const unsigned char *msg32, + const secp256k1_pubkey *pubkeys, + size_t n_pubkeys) { + /* just going to inefficiently allocate every time */ + secp256k1_scratch_space *scratch = secp256k1_scratch_space_create(ctx, 1024*4096); + int returnval=secp256k1_aggsig_verify(ctx, scratch, sig64, msg32, pubkeys, n_pubkeys); + secp256k1_scratch_space_destroy(ctx, scratch); + return returnval; +} + +static int secp256k1_aggsig_verify_callback_single(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) { + secp256k1_verify_callback_data *cbdata = (secp256k1_verify_callback_data*) data; + secp256k1_scalar_negate(sc, &cbdata->single_hash); + secp256k1_pubkey_load(cbdata->ctx, pt, &cbdata->pubkeys[idx]); + return 1; +} + +int secp256k1_aggsig_verify_single( + const secp256k1_context* ctx, + const unsigned char *sig64, + const unsigned char *msg32, + const secp256k1_pubkey *pubnonce, + const secp256k1_pubkey *pubkey, + const secp256k1_pubkey *pubkey_total, + const secp256k1_pubkey *extra_pubkey, + const int is_partial){ + + secp256k1_scalar g_sc; + secp256k1_fe r_x; + secp256k1_gej pk_sum; + secp256k1_ge pk_sum_ge; + secp256k1_scalar sighash; + secp256k1_scratch_space *scratch; + secp256k1_verify_callback_data cbdata; + secp256k1_ge tmp_ge; + secp256k1_pubkey tmp_pk; + + int overflow; + int return_check=0; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(sig64 != NULL); + ARG_CHECK(msg32 != NULL); + ARG_CHECK(pubkey != NULL); + + /* extract R */ + if (!secp256k1_fe_set_b32(&r_x, sig64)) { + return 0; + } + + /* extract s */ + secp256k1_scalar_set_b32(&g_sc, sig64 + 32, &overflow); + if (overflow) { + return 0; + } + + /* compute e = sighash */ + if (pubnonce != NULL) { + secp256k1_compute_sighash_single(ctx, &sighash, pubnonce, pubkey_total, msg32); + } else { + secp256k1_ge_set_xquad(&tmp_ge, &r_x); + secp256k1_pubkey_save(&tmp_pk, &tmp_ge); + secp256k1_compute_sighash_single(ctx, &sighash, &tmp_pk, pubkey_total, msg32); + } + + /* Populate callback data */ + cbdata.ctx = ctx; + cbdata.pubkeys = pubkey; + cbdata.single_hash = sighash; + + scratch = secp256k1_scratch_space_create(ctx, 1024*4096); + if (scratch == NULL){ + return 0; + } + + /* Compute sG - eP, which should be R */ + if (!secp256k1_ecmult_multi_var(&ctx->error_callback, scratch, &pk_sum, &g_sc, secp256k1_aggsig_verify_callback_single, &cbdata, 1)) { + secp256k1_scratch_space_destroy(ctx, scratch); + return 0; + } + + secp256k1_scratch_space_destroy(ctx, scratch); + + if (extra_pubkey != NULL) { + /* Subtract an extra public key */ + secp256k1_pubkey_load(ctx, &tmp_ge, extra_pubkey); + secp256k1_ge_neg(&tmp_ge, &tmp_ge); + secp256k1_gej_add_ge(&pk_sum, &pk_sum, &tmp_ge); + } + + secp256k1_ge_set_gej(&pk_sum_ge, &pk_sum); + + return_check = secp256k1_fe_equal_var(&r_x, &pk_sum_ge.x); + if (!is_partial){ + return return_check && secp256k1_gej_has_quad_y_var(&pk_sum); + } else { + return return_check; + } + +} + +void secp256k1_aggsig_context_destroy(secp256k1_aggsig_context *aggctx) { + if (aggctx == NULL) { + return; + } + memset(aggctx->pubkeys, 0, aggctx->n_sigs * sizeof(*aggctx->pubkeys)); + memset(aggctx->secnonce, 0, aggctx->n_sigs * sizeof(*aggctx->secnonce)); + memset(aggctx->progress, 0, aggctx->n_sigs * sizeof(*aggctx->progress)); + free(aggctx->pubkeys); + free(aggctx->secnonce); + free(aggctx->progress); + free(aggctx); +} + +#endif diff --git a/src/secp256k1/src/modules/aggsig/tests_impl.h b/src/secp256k1/src/modules/aggsig/tests_impl.h new file mode 100644 index 0000000000000..73bdb4b2e6c2d --- /dev/null +++ b/src/secp256k1/src/modules/aggsig/tests_impl.h @@ -0,0 +1,336 @@ +/********************************************************************** + * Copyright (c) 2017 Andrew Poelstra * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef _SECP256K1_MODULE_AGGSIG_TESTS_ +#define _SECP256K1_MODULE_AGGSIG_TESTS_ + +#include "secp256k1_aggsig.h" + +void test_aggsig_api(void) { + /* Setup contexts that just count errors */ + secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE); + secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); + secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY); + secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + secp256k1_scratch_space *scratch = secp256k1_scratch_space_create(none, 1024*4096); + secp256k1_scalar tmp_s; + unsigned char seckeys[5][32]; + secp256k1_pubkey pubkeys[5]; + secp256k1_aggsig_partial_signature partials[5]; + secp256k1_aggsig_context *aggctx; + unsigned char seed[32] = { 1, 2, 3, 4, 0 }; + unsigned char sig[64]; + unsigned char sig2[64]; + unsigned char* sigs[2]; + unsigned char combined_sig[64]; + unsigned char sec_nonces[5][32]; + secp256k1_pubkey pub_nonces[5]; + unsigned char orig_sig; + unsigned char orig_msg; + unsigned char *msg = seed; /* shh ;) */ + const secp256k1_pubkey* pubkey_combiner[2]; + secp256k1_pubkey combiner_sum; + secp256k1_pubkey combiner_sum_2; + int32_t ecount = 0; + + size_t i; + size_t j; + + secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount); + + for (i = 0; i < 5; i++) { + random_scalar_order_test(&tmp_s); + secp256k1_scalar_get_b32(seckeys[i], &tmp_s); + CHECK(secp256k1_ec_pubkey_create(ctx, &pubkeys[i], seckeys[i]) == 1); + } + + aggctx = secp256k1_aggsig_context_create(none, pubkeys, 5, seed); + CHECK(ecount == 0); + CHECK(aggctx != NULL); + secp256k1_aggsig_context_destroy(NULL); /* harmless */ + secp256k1_aggsig_context_destroy(aggctx); + + aggctx = secp256k1_aggsig_context_create(none, pubkeys, 0, seed); + CHECK(ecount == 0); + CHECK(aggctx != NULL); + secp256k1_aggsig_context_destroy(aggctx); + + aggctx = secp256k1_aggsig_context_create(none, pubkeys, 0, NULL); + CHECK(ecount == 1); + CHECK(aggctx == NULL); + aggctx = secp256k1_aggsig_context_create(none, NULL, 0, seed); + CHECK(ecount == 2); + CHECK(aggctx == NULL); + + aggctx = secp256k1_aggsig_context_create(none, pubkeys, 5, seed); + + CHECK(!secp256k1_aggsig_generate_nonce(none, aggctx, 0)); + CHECK(ecount == 3); + CHECK(secp256k1_aggsig_generate_nonce(sign, aggctx, 0)); + CHECK(ecount == 3); + CHECK(!secp256k1_aggsig_generate_nonce(vrfy, aggctx, 0)); + CHECK(ecount == 4); + CHECK(!secp256k1_aggsig_generate_nonce(both, aggctx, 0)); /* double-generate, not API error */ + CHECK(ecount == 4); + CHECK(secp256k1_aggsig_generate_nonce(both, aggctx, 1)); + CHECK(ecount == 4); + CHECK(!secp256k1_aggsig_generate_nonce(both, NULL, 2)); + CHECK(ecount == 5); + CHECK(!secp256k1_aggsig_generate_nonce(both, aggctx, 5)); /* out of range, API error */ + CHECK(ecount == 6); + + CHECK(!secp256k1_aggsig_partial_sign(both, aggctx, &partials[0], msg, seckeys[0], 0)); /* not all nonces generated, not API error */ + CHECK(secp256k1_aggsig_generate_nonce(both, aggctx, 2)); + CHECK(secp256k1_aggsig_generate_nonce(both, aggctx, 3)); + CHECK(secp256k1_aggsig_generate_nonce(both, aggctx, 4)); + CHECK(secp256k1_aggsig_partial_sign(both, aggctx, &partials[0], msg, seckeys[0], 0)); + CHECK(!secp256k1_aggsig_partial_sign(both, aggctx, &partials[0], msg, seckeys[0], 0)); /* double sign, not API error */ + CHECK(ecount == 6); + + CHECK(!secp256k1_aggsig_partial_sign(none, aggctx, &partials[1], msg, seckeys[1], 1)); + CHECK(ecount == 7); + CHECK(!secp256k1_aggsig_partial_sign(vrfy, aggctx, &partials[1], msg, seckeys[1], 1)); + CHECK(ecount == 8); + CHECK(secp256k1_aggsig_partial_sign(sign, aggctx, &partials[1], msg, seckeys[1], 1)); + CHECK(ecount == 8); + CHECK(!secp256k1_aggsig_partial_sign(sign, aggctx, NULL, msg, seckeys[2], 2)); + CHECK(ecount == 9); + CHECK(!secp256k1_aggsig_partial_sign(sign, aggctx, &partials[2], NULL, seckeys[2], 2)); + CHECK(ecount == 10); + CHECK(!secp256k1_aggsig_partial_sign(sign, aggctx, &partials[2], msg, NULL, 2)); + CHECK(ecount == 11); + CHECK(!secp256k1_aggsig_partial_sign(sign, aggctx, &partials[2], msg, seckeys[2], 5)); /* out of range, API error */ + CHECK(ecount == 12); + CHECK(secp256k1_aggsig_partial_sign(both, aggctx, &partials[2], msg, seckeys[2], 2)); + CHECK(secp256k1_aggsig_partial_sign(both, aggctx, &partials[3], msg, seckeys[3], 3)); + CHECK(secp256k1_aggsig_partial_sign(both, aggctx, &partials[4], msg, seckeys[4], 4)); + CHECK(ecount == 12); + + CHECK(secp256k1_aggsig_combine_signatures(none, aggctx, sig, partials, 5)); + CHECK(!secp256k1_aggsig_combine_signatures(none, aggctx, sig, partials, 4)); /* wrong sig count, not API error (should it be?) */ + CHECK(!secp256k1_aggsig_combine_signatures(none, aggctx, sig, partials, 0)); + CHECK(ecount == 12); + CHECK(!secp256k1_aggsig_combine_signatures(none, NULL, sig, partials, 5)); + CHECK(ecount == 13); + CHECK(!secp256k1_aggsig_combine_signatures(none, aggctx, NULL, partials, 5)); + CHECK(ecount == 14); + CHECK(!secp256k1_aggsig_combine_signatures(none, aggctx, sig, NULL, 5)); + CHECK(ecount == 15); + + memset(sig, 0, sizeof(sig)); + CHECK(!secp256k1_aggsig_verify(vrfy, scratch, sig, msg, pubkeys, 5)); + CHECK(secp256k1_aggsig_combine_signatures(none, aggctx, sig, partials, 5)); + CHECK(!secp256k1_aggsig_verify(vrfy, scratch, sig, msg, pubkeys, 4)); + CHECK(!secp256k1_aggsig_verify(vrfy, scratch, sig, msg, pubkeys, 0)); + CHECK(secp256k1_aggsig_verify(vrfy, scratch, sig, msg, pubkeys, 5)); + CHECK(ecount == 15); + CHECK(!secp256k1_aggsig_verify(none, scratch, sig, msg, pubkeys, 5)); + CHECK(ecount == 16); + + CHECK(!secp256k1_aggsig_verify(vrfy, NULL, sig, msg, pubkeys, 5)); + CHECK(ecount == 17); + CHECK(!secp256k1_aggsig_verify(vrfy, scratch, NULL, msg, pubkeys, 5)); + CHECK(ecount == 18); + CHECK(!secp256k1_aggsig_verify(vrfy, scratch, sig, NULL, pubkeys, 5)); + CHECK(ecount == 19); + CHECK(!secp256k1_aggsig_verify(vrfy, scratch, sig, msg, NULL, 5)); + CHECK(ecount == 20); + + /* Test single api */ + memset(sig, 0, sizeof(sig)); + CHECK(secp256k1_aggsig_sign_single(sign, sig, msg, seckeys[0], NULL, NULL, NULL, NULL, NULL, seed)); + CHECK(ecount == 20); + CHECK(secp256k1_aggsig_verify_single(vrfy, sig, msg, NULL, &pubkeys[0], NULL, NULL, 0)); + CHECK(!secp256k1_aggsig_verify_single(vrfy, sig, msg, NULL, &pubkeys[1], NULL, NULL, 0)); + CHECK(!secp256k1_aggsig_verify_single(vrfy, sig, msg, NULL, &pubkeys[0], &pubkeys[1], NULL, 0)); + orig_sig=sig[0]; + sig[0]=99; + CHECK(!secp256k1_aggsig_verify_single(vrfy, sig, msg, NULL, &pubkeys[0], NULL, NULL, 0)); + sig[0]=orig_sig; + CHECK(secp256k1_aggsig_verify_single(vrfy, sig, msg, NULL, &pubkeys[0], NULL, NULL, 0)); + orig_msg=msg[0]; + msg[0]=99; + CHECK(!secp256k1_aggsig_verify_single(vrfy, sig, msg, NULL, &pubkeys[0], NULL, NULL, 0)); + msg[0]=orig_msg; + + /* Test single api with pubkey in e */ + memset(sig, 0, sizeof(sig)); + CHECK(secp256k1_aggsig_sign_single(sign, sig, msg, seckeys[0], NULL, NULL, NULL, NULL, &pubkeys[2], seed)); + CHECK(secp256k1_aggsig_verify_single(vrfy, sig, msg, NULL, &pubkeys[0], &pubkeys[2], NULL, 0)); + CHECK(!secp256k1_aggsig_verify_single(vrfy, sig, msg, NULL, &pubkeys[0], &pubkeys[3], NULL, 0)); + CHECK(!secp256k1_aggsig_verify_single(vrfy, sig, msg, NULL, &pubkeys[0], NULL, NULL, 0)); + CHECK(!secp256k1_aggsig_verify_single(vrfy, sig, msg, NULL, &pubkeys[1], &pubkeys[2], NULL, 0)); + CHECK(!secp256k1_aggsig_verify_single(vrfy, sig, msg, NULL, &pubkeys[1], NULL, NULL, 0)); + orig_sig=sig[0]; + sig[0]=99; + CHECK(!secp256k1_aggsig_verify_single(vrfy, sig, msg, NULL, &pubkeys[0], &pubkeys[2], NULL, 0)); + sig[0]=orig_sig; + CHECK(secp256k1_aggsig_verify_single(vrfy, sig, msg, NULL, &pubkeys[0], &pubkeys[2], NULL, 0)); + msg[0]=99; + CHECK(!secp256k1_aggsig_verify_single(vrfy, sig, msg, NULL, &pubkeys[0], &pubkeys[2], NULL, 0)); + + /* Overriding sec nonce */ + memset(sig, 0, sizeof(sig)); + CHECK(secp256k1_aggsig_sign_single(sign, sig, msg, seckeys[0], seckeys[1], NULL, NULL, NULL, NULL, seed)); + CHECK(secp256k1_aggsig_verify_single(vrfy, sig, msg, NULL, &pubkeys[0], NULL, NULL, 0)); + + /* Overriding sec nonce and pub nonce encoded in e */ + memset(sig, 0, sizeof(sig)); + CHECK(secp256k1_aggsig_sign_single(sign, sig, msg, seckeys[0], seckeys[1], NULL, &pubkeys[3], NULL, NULL, seed)); + CHECK(secp256k1_aggsig_verify_single(vrfy, sig, msg, &pubkeys[3], &pubkeys[0], NULL, NULL, 0)); + + /* Add extra key to the signature */ + memset(sig, 0, sizeof(sig)); + CHECK(secp256k1_aggsig_sign_single(sign, sig, msg, seckeys[0], seckeys[1], seckeys[2], &pubkeys[3], &pubkeys[3], &pubkeys[4], seed)); + /* Check that it doesn't verify without the extra key */ + CHECK(!secp256k1_aggsig_verify_single(vrfy, sig, msg, &pubkeys[3], &pubkeys[0], &pubkeys[4], NULL, 1)); + /* And that it does with */ + CHECK(secp256k1_aggsig_verify_single(vrfy, sig, msg, &pubkeys[3], &pubkeys[0], &pubkeys[4], &pubkeys[2], 1)); + + /* Testing aggsig exchange algorithm for Grin */ + /* ****************************************** */ + + for (i=0;i<20;i++){ + memset(sig, 0, sizeof(sig)); + memset(sig, 0, sizeof(sig2)); + memset(sig, 0, sizeof(combined_sig)); + + /* Create a couple of nonces */ + /* Randomise seed to make it more interesting */ + random_scalar_order_test(&tmp_s); + secp256k1_scalar_get_b32(seed, &tmp_s); + CHECK(secp256k1_aggsig_export_secnonce_single(sign, sec_nonces[0], seed)); + random_scalar_order_test(&tmp_s); + secp256k1_scalar_get_b32(seed, &tmp_s); + CHECK(secp256k1_aggsig_export_secnonce_single(sign, sec_nonces[1], seed)); + + for (j = 0; j < 2; j++) { + CHECK(secp256k1_ec_pubkey_create(ctx, &pub_nonces[j], sec_nonces[j]) == 1); + } + + /* Randomize keys */ + for (j = 0; j < 2; j++) { + random_scalar_order_test(&tmp_s); + secp256k1_scalar_get_b32(seckeys[j], &tmp_s); + CHECK(secp256k1_ec_pubkey_create(ctx, &pubkeys[j], seckeys[j]) == 1); + } + + /* Combine pubnonces */ + pubkey_combiner[0]=&pub_nonces[0]; + pubkey_combiner[1]=&pub_nonces[1]; + CHECK(secp256k1_ec_pubkey_combine(ctx, &combiner_sum, pubkey_combiner, 2) == 1); + + /* Combine pubkeys */ + pubkey_combiner[0]=&pubkeys[0]; + pubkey_combiner[1]=&pubkeys[1]; + CHECK(secp256k1_ec_pubkey_combine(ctx, &combiner_sum_2, pubkey_combiner, 2) == 1); + + /* Create 2 partial signatures (Sender, Receiver)*/ + CHECK(secp256k1_aggsig_sign_single(sign, sig, msg, seckeys[0], sec_nonces[0], NULL, &combiner_sum, &combiner_sum, &combiner_sum_2, seed)); + + /* Receiver verifies sender's Sig and signs */ + CHECK(secp256k1_aggsig_verify_single(vrfy, sig, msg, &combiner_sum, &pubkeys[0], &combiner_sum_2, NULL, 1)); + CHECK(secp256k1_aggsig_sign_single(sign, sig2, msg, seckeys[1], sec_nonces[1], NULL, &combiner_sum, &combiner_sum, &combiner_sum_2, seed)); + /* sender verifies receiver's Sig then creates final combined sig */ + CHECK(secp256k1_aggsig_verify_single(vrfy, sig2, msg, &combiner_sum, &pubkeys[1], &combiner_sum_2, NULL, 1)); + + sigs[0] = sig; + sigs[1] = sig2; + /* Add 2 sigs and nonces */ + CHECK(secp256k1_aggsig_add_signatures_single(sign, combined_sig, (const unsigned char **) sigs, 2, &combiner_sum)); + + /* Ensure added sigs verify properly (with and without providing nonce_sum */ + CHECK(secp256k1_aggsig_verify_single(vrfy, combined_sig, msg, &combiner_sum, &combiner_sum_2, &combiner_sum_2, NULL, 0)); + CHECK(secp256k1_aggsig_verify_single(vrfy, combined_sig, msg, NULL, &combiner_sum_2, &combiner_sum_2, NULL, 0)); + + /* And anything else doesn't */ + CHECK(!secp256k1_aggsig_verify_single(vrfy, combined_sig, msg, &combiner_sum, &combiner_sum_2, NULL, NULL, 0)); + CHECK(!secp256k1_aggsig_verify_single(vrfy, combined_sig, msg, NULL, &pub_nonces[1], NULL, NULL, 0)); + CHECK(!secp256k1_aggsig_verify_single(vrfy, combined_sig, msg, NULL, &pub_nonces[1], &combiner_sum_2, NULL, 0)); + CHECK(!secp256k1_aggsig_verify_single(vrfy, combined_sig, msg, &pub_nonces[0], &combiner_sum_2, NULL, NULL, 0)); + CHECK(!secp256k1_aggsig_verify_single(vrfy, combined_sig, msg, &pub_nonces[0], &combiner_sum_2, &combiner_sum_2, NULL, 0)); + msg[0]=1; + msg[1]=2; + msg[2]=3; + CHECK(!secp256k1_aggsig_verify_single(vrfy, combined_sig, msg, NULL, &combiner_sum_2, NULL, NULL, 0)); + CHECK(!secp256k1_aggsig_verify_single(vrfy, combined_sig, msg, NULL, &combiner_sum_2, &combiner_sum_2, NULL, 0)); + } + /*** End aggsig for Grin exchange test ***/ + + /* cleanup */ + secp256k1_aggsig_context_destroy(aggctx); + secp256k1_scratch_space_destroy(vrfy, scratch); + secp256k1_context_destroy(none); + secp256k1_context_destroy(sign); + secp256k1_context_destroy(vrfy); + secp256k1_context_destroy(both); +} + +#define N_KEYS 200 +void test_aggsig_onesigner(void) { + secp256k1_pubkey pubkeys[N_KEYS]; + unsigned char seckeys[N_KEYS][32]; + secp256k1_aggsig_partial_signature partials[N_KEYS]; + const size_t n_pubkeys = sizeof(pubkeys) / sizeof(pubkeys[0]); + secp256k1_scalar tmp_s; + size_t i; + size_t n_signers[] = { 1, 2, N_KEYS / 5, N_KEYS - 1, N_KEYS }; + const size_t n_n_signers = sizeof(n_signers) / sizeof(n_signers[0]); + secp256k1_scratch_space *scratch = secp256k1_scratch_space_create(ctx, 1024*4096); + + unsigned char msg[32]; + + random_scalar_order_test(&tmp_s); + secp256k1_scalar_get_b32(msg, &tmp_s); + + for (i = 0; i < n_pubkeys; i++) { + random_scalar_order_test(&tmp_s); + secp256k1_scalar_get_b32(seckeys[i], &tmp_s); + CHECK(secp256k1_ec_pubkey_create(ctx, &pubkeys[i], seckeys[i]) == 1); + } + + for (i = 0; i < n_n_signers; i++) { + size_t j; + unsigned char seed[32]; + unsigned char sig[64]; + secp256k1_aggsig_context *aggctx; + + random_scalar_order_test(&tmp_s); + secp256k1_scalar_get_b32(seed, &tmp_s); + aggctx = secp256k1_aggsig_context_create(ctx, pubkeys, n_signers[i], seed); + + /* all nonces must be generated before signing */ + for (j = 0; j < n_signers[i]; j++) { + CHECK(secp256k1_aggsig_generate_nonce(ctx, aggctx, j)); + } + for (j = 0; j < n_signers[i]; j++) { + CHECK(secp256k1_aggsig_partial_sign(ctx, aggctx, &partials[j], msg, seckeys[j], j)); + } + CHECK(secp256k1_aggsig_combine_signatures(ctx, aggctx, sig, partials, n_signers[i])); + CHECK(secp256k1_aggsig_verify(ctx, scratch, sig, msg, pubkeys, n_signers[i])); + /* Make sure verification with 0 pubkeys fails without Bad Things happenings */ + CHECK(!secp256k1_aggsig_verify(ctx, scratch, sig, msg, pubkeys, 0)); + + secp256k1_aggsig_context_destroy(aggctx); + } + + secp256k1_scratch_space_destroy(ctx, scratch); +} +#undef N_KEYS + +void run_aggsig_tests(void) { + test_aggsig_api(); + test_aggsig_onesigner(); +} + +#endif diff --git a/src/secp256k1/src/modules/bulletproofs/Makefile.am.include b/src/secp256k1/src/modules/bulletproofs/Makefile.am.include new file mode 100644 index 0000000000000..1bee88ef3411d --- /dev/null +++ b/src/secp256k1/src/modules/bulletproofs/Makefile.am.include @@ -0,0 +1,12 @@ +include_HEADERS += include/secp256k1_bulletproofs.h +noinst_HEADERS += src/modules/bulletproofs/inner_product_impl.h +noinst_HEADERS += src/modules/bulletproofs/rangeproof_impl.h +noinst_HEADERS += src/modules/bulletproofs/main_impl.h +noinst_HEADERS += src/modules/bulletproofs/tests_impl.h +noinst_HEADERS += src/modules/bulletproofs/util.h +if USE_BENCHMARK +noinst_PROGRAMS += bench_bulletproof +bench_bulletproof_SOURCES = src/bench_bulletproof.c +bench_bulletproof_LDADD = libsecp256k1.la $(SECP_LIBS) +bench_bulletproof_LDFLAGS = -static +endif diff --git a/src/secp256k1/src/modules/bulletproofs/inner_product_impl.h b/src/secp256k1/src/modules/bulletproofs/inner_product_impl.h new file mode 100644 index 0000000000000..b2ead610681b7 --- /dev/null +++ b/src/secp256k1/src/modules/bulletproofs/inner_product_impl.h @@ -0,0 +1,852 @@ +/********************************************************************** + * Copyright (c) 2018 Andrew Poelstra * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef SECP256K1_MODULE_BULLETPROOF_INNER_PRODUCT_IMPL +#define SECP256K1_MODULE_BULLETPROOF_INNER_PRODUCT_IMPL + +#include "group.h" +#include "scalar.h" + +#include "modules/bulletproofs/main_impl.h" +#include "modules/bulletproofs/util.h" + +/* Number of scalars that should remain at the end of a recursive proof. The paper + * uses 2, by reducing the scalars as far as possible. We stop one recursive step + * early, trading two points (L, R) for two scalars, which reduces verification + * and prover cost. + * + * For the most part, all comments assume this value is at 4. + */ +#define IP_AB_SCALARS 4 + +/* Bulletproof inner products consist of the four scalars and `2[log2(n) - 1]` points + * `a_1`, `a_2`, `b_1`, `b_2`, `L_i` and `R_i`, where `i` ranges from 0 to `log2(n)-1`. + * + * The prover takes as input a point `P` and scalar `c`. It proves that it knows + * scalars `a_i`, `b_i` for `i` ranging from 1 to `n`, such that + * `P = sum_i [a_i G_i + b_i H_i]` and `<{a_i}, {b_i}> = c`, + * where `G_i` and `H_i` are standard NUMS generators. + * + * Verification of the proof comes down to a single multiexponentiation of the form + * + * P + (c - a_1*b_1 - a_2*b_2)*x*G + * - sum_{i=1}^n [s'_i*G_i + s_i*H_i] + * + sum_{i=1}^log2(n) [x_i^-2 L_i + x_i^2 R_i] + * + * which will equal infinity if the inner product proof is correct. Here + * - `G` is the standard secp generator + * - `x` is a hash of `commit` and is used to rerandomize `c`. See Protocol 2 vs Protocol 1 in the paper. + * - `x_i = H(x_{i-1} || L_i || R_i)`, where `x_{-1}` is passed through the `commit` variable and + * must be a commitment to `P` and `c`. + * - `s_i` and `s'_i` are computed as follows. + * + * Letting `i_j` be defined as 1 if `i & 2^j == 1`, and -1 otherwise, + * - For `i` from `1` to `n/2`, `s'_i = a_1 * prod_{j=1}^log2(n) x_j^i_j` + * - For `i` from `n/2 + 1` to `n`, `s'_i = a_2 * prod_{j=1}^log2(n) x_j^i_j` + * - For `i` from `1` to `n/2`, `s_i = b_1 * prod_{j=1}^log2(n) x_j^-i_j` + * - For `i` from `n/2 + 1` to `n`, `s_i = b_2 * prod_{j=1}^log2(n) x_j^-i_j` + * + * Observe that these can be computed iteratively by labelling the coefficients `s_i` for `i` + * from `0` to `2n-1` rather than 1-indexing and distinguishing between `s_i'`s and `s_i`s: + * + * Start with `s_0 = a_1 * prod_{j=1}^log2(n) x_j^-1`, then for later `s_i`s, + * - For `i` from `1` to `n/2 - 1`, multiply some earlier `s'_j` by some `x_k^2` + * - For `i = n/2`, multiply `s_{i-1} by `a_2/a_1`. + * - For `i` from `n/2 + 1` to `n - 1`, multiply some earlier `s'_j` by some `x_k^2` + * - For `i = n`, multiply `s'_{i-1}` by `b_1/a_2` to get `s_i`. + * - For `i` from `n + 1` to `3n/2 - 1`, multiply some earlier `s_j` by some `x_k^-2` + * - For `i = 3n/2`, multiply `s_{i-1}` by `b_2/b_1`. + * - For `i` from `3n/2 + 1` to `2n - 1`, multiply some earlier `s_j` by some `x_k^-2` + * where of course, the indices `j` and `k` must be chosen carefully. + * + * The bulk of `secp256k1_bulletproof_innerproduct_vfy_ecmult_callback` involves computing + * these indices, given `a_2/a_1`, `b_1/a_1`, `b_2/b_1`, and the `x_k^2`s as input. It + * computes `x_k^-2` as a side-effect of its other computation. + */ + +typedef int (secp256k1_bulletproof_vfy_callback)(secp256k1_scalar *sc, secp256k1_ge *pt, secp256k1_scalar *randomizer, size_t idx, void *data); + +/* used by callers to wrap a proof with surrounding context */ +typedef struct { + const unsigned char *proof; + secp256k1_scalar p_offs; + secp256k1_scalar yinv; + unsigned char commit[32]; + secp256k1_bulletproof_vfy_callback *rangeproof_cb; + void *rangeproof_cb_data; + size_t n_extra_rangeproof_points; +} secp256k1_bulletproof_innerproduct_context; + +/* used internally */ +typedef struct { + const secp256k1_bulletproof_innerproduct_context *proof; + secp256k1_scalar abinv[IP_AB_SCALARS]; + secp256k1_scalar xsq[SECP256K1_BULLETPROOF_MAX_DEPTH + 1]; + secp256k1_scalar xsqinv[SECP256K1_BULLETPROOF_MAX_DEPTH + 1]; + secp256k1_scalar xsqinvy[SECP256K1_BULLETPROOF_MAX_DEPTH + 1]; + secp256k1_scalar xcache[SECP256K1_BULLETPROOF_MAX_DEPTH + 1]; + secp256k1_scalar xsqinv_mask; + const unsigned char *serialized_lr; +} secp256k1_bulletproof_innerproduct_vfy_data; + +/* used by callers to modify the multiexp */ +typedef struct { + size_t n_proofs; + secp256k1_scalar p_offs; + const secp256k1_ge *g; + const secp256k1_ge *geng; + const secp256k1_ge *genh; + size_t vec_len; + size_t lg_vec_len; + int shared_g; + secp256k1_scalar *randomizer; + secp256k1_bulletproof_innerproduct_vfy_data *proof; +} secp256k1_bulletproof_innerproduct_vfy_ecmult_context; + +size_t secp256k1_bulletproof_innerproduct_proof_length(size_t n) { + if (n < IP_AB_SCALARS / 2) { + return 32 * (1 + 2 * n); + } else { + size_t bit_count = secp256k1_popcountl(n); + size_t log = secp256k1_floor_lg(2 * n / IP_AB_SCALARS); + return 32 * (1 + 2 * (bit_count - 1 + log) + IP_AB_SCALARS) + (2*log + 7) / 8; + } +} + +/* Our ecmult_multi function takes `(c - a*b)*x` directly and multiplies this by `G`. For every other + * (scalar, point) pair it calls the following callback function, which takes an index and outputs a + * pair. The function therefore has three regimes: + * + * For the first `n` invocations, it returns `(s'_i, G_i)` for `i` from 1 to `n`. + * For the next `n` invocations, it returns `(s_i, H_i)` for `i` from 1 to `n`. + * For the next `2*log2(n)` invocations it returns `(x_i^-2, L_i)` and `(x_i^2, R_i)`, + * alternating between the two choices, for `i` from 1 to `log2(n)`. + * + * For the remaining invocations it passes through to another callback, `rangeproof_cb_data` which + * computes `P`. The reason for this is that in practice `P` is usually defined by another multiexp + * rather than being a known point, and it is more efficient to compute one exponentiation. + * + * Inline we refer to the first `2n` coefficients as `s_i` for `i` from 0 to `2n-1`, since that + * is the more convenient indexing. In particular we describe (a) how the indices `j` and `k`, + * from the big comment block above, are chosen; and (b) when/how each `x_k^-2` is computed. + */ +static int secp256k1_bulletproof_innerproduct_vfy_ecmult_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) { + secp256k1_bulletproof_innerproduct_vfy_ecmult_context *ctx = (secp256k1_bulletproof_innerproduct_vfy_ecmult_context *) data; + + /* First 2N points use the standard Gi, Hi generators, and the scalars can be aggregated across proofs. + * Inside this if clause, `idx` corresponds to the index `i` in the big comment, and runs from 0 to `2n-1`. + * Also `ctx->vec_len` corresponds to `n`. */ + if (idx < 2 * ctx->vec_len) { + /* Number of `a` scalars in the proof (same as number of `b` scalars in the proof). Will + * be 2 except for very small proofs that have fewer than 2 scalars as input. */ + const size_t grouping = ctx->vec_len < IP_AB_SCALARS / 2 ? ctx->vec_len : IP_AB_SCALARS / 2; + const size_t lg_grouping = secp256k1_floor_lg(grouping); + size_t i; + VERIFY_CHECK(lg_grouping == 0 || lg_grouping == 1); /* TODO support higher IP_AB_SCALARS */ + + /* Determine whether we're multiplying by `G_i`s or `H_i`s. */ + if (idx < ctx->vec_len) { + *pt = ctx->geng[idx]; + } else { + *pt = ctx->genh[idx - ctx->vec_len]; + } + + secp256k1_scalar_clear(sc); + /* Loop over all the different inner product proofs we might be doing at once. Since they + * share generators `G_i` and `H_i`, we compute all of their scalars at once and add them. + * For each proof we start with the "seed value" `ctx->proof[i].xcache[0]` (see next comment + * for its meaning) from which every other scalar derived. We expect the caller to have + * randomized this to ensure that this wanton addition cannot enable cancellation attacks. + */ + for (i = 0; i < ctx->n_proofs; i++) { + /* To recall from the introductory comment: most `s_i` values are computed by taking an + * earlier `s_j` value and multiplying it by some `x_k^2`. + * + * We now explain the index `j`: it is the largest number with one fewer 1-bits than `i`. + * Alternately, the most recently returned `s_j` where `j` has one fewer 1-bits than `i`. + * + * To ensure that `s_j` is available when we need it, on each iteration we define the + * variable `cache_idx` which simply counts the 1-bits in `i`; before returning `s_i` + * we store it in `ctx->proof[i].xcache[cache_idx]`. Then later, when we want "most + * recently returned `s_j` with one fewer 1-bits than `i`, it'll be sitting right + * there in `ctx->proof[i].xcache[cache_idx - 1]`. + * + * Note that `ctx->proof[i].xcache[0]` will always equal `-a_1 * prod_{i=1}^{n-1} x_i^-2`, + * and we expect the caller to have set this. + */ + const size_t cache_idx = secp256k1_popcountl(idx); + secp256k1_scalar term; + VERIFY_CHECK(cache_idx < SECP256K1_BULLETPROOF_MAX_DEPTH); + /* For the special case `cache_idx == 0` (which is true iff `idx == 0`) there is nothing to do. */ + if (cache_idx > 0) { + /* Otherwise, check if this is one of the special indices where we transition from `a_1` to `a_2`, + * from `a_2` to `b_1`, or from `b_1` to `b_2`. (For small proofs there is only one transition, + * from `a` to `b`.) */ + if (idx % (ctx->vec_len / grouping) == 0) { + const size_t abinv_idx = idx / (ctx->vec_len / grouping) - 1; + size_t prev_cache_idx; + /* Check if it's the even specialer index where we're transitioning from `a`s to `b`s, from + * `G`s to `H`s, and from `x_k^2`s to `x_k^-2`s. In rangeproof and circuit applications, + * the caller secretly has a variable `y` such that `H_i` is really `y^-i H_i` for `i` ranging + * from 0 to `n-1`. Rather than forcing the caller to tweak every `H_i` herself, which would + * be very slow and prevent precomputation, we instead multiply our cached `x_k^-2` values + * by `y^(-2^k)` respectively, which will ultimately result in every `s_i` we return having + * been multiplied by `y^-i`. + * + * This is an underhanded trick but the result is that all `n` powers of `y^-i` show up + * in the right place, and we only need log-many scalar squarings and multiplications. + */ + if (idx == ctx->vec_len) { + secp256k1_scalar yinvn = ctx->proof[i].proof->yinv; + size_t j; + prev_cache_idx = secp256k1_popcountl(idx - 1); + for (j = 0; j < (size_t) secp256k1_ctzl(idx) - lg_grouping; j++) { + secp256k1_scalar_mul(&ctx->proof[i].xsqinvy[j], &ctx->proof[i].xsqinv[j], &yinvn); + secp256k1_scalar_sqr(&yinvn, &yinvn); + } + if (lg_grouping == 1) { + secp256k1_scalar_mul(&ctx->proof[i].abinv[2], &ctx->proof[i].abinv[2], &yinvn); + secp256k1_scalar_sqr(&yinvn, &yinvn); + } + } else { + prev_cache_idx = cache_idx - 1; + } + /* Regardless of specialness, we multiply by `a_2/a_1` or whatever the appropriate multiplier + * is. We expect the caller to have given these to us in the `ctx->proof[i].abinv` array. */ + secp256k1_scalar_mul( + &ctx->proof[i].xcache[cache_idx], + &ctx->proof[i].xcache[prev_cache_idx], + &ctx->proof[i].abinv[abinv_idx] + ); + /* If it's *not* a special index, just multiply by the appropriate `x_k^2`, or `x_k^-2` in case + * we're in the `H_i` half of the multiexp. At this point we can explain the index `k`, which + * is computed in the variable `xsq_idx` (`xsqinv_idx` respectively). In light of our discussion + * of `j`, we see that this should be "the least significant bit that's 1 in `i` but not `i-1`." + * In other words, it is the number of trailing 0 bits in the index `i`. */ + } else if (idx < ctx->vec_len) { + const size_t xsq_idx = secp256k1_ctzl(idx); + secp256k1_scalar_mul(&ctx->proof[i].xcache[cache_idx], &ctx->proof[i].xcache[cache_idx - 1], &ctx->proof[i].xsq[xsq_idx]); + } else { + const size_t xsqinv_idx = secp256k1_ctzl(idx); + secp256k1_scalar_mul(&ctx->proof[i].xcache[cache_idx], &ctx->proof[i].xcache[cache_idx - 1], &ctx->proof[i].xsqinvy[xsqinv_idx]); + } + } + term = ctx->proof[i].xcache[cache_idx]; + + /* One last trick: compute `x_k^-2` while computing the `G_i` scalars, so that they'll be + * available when we need them for the `H_i` scalars. We can do this for every `i` value + * that has exactly one 0-bit, i.e. which is a product of all `x_i`s and one `x_k^-1`. By + * multiplying that by the special value `prod_{i=1}^n x_i^-1` we obtain simply `x_k^-2`. + * We expect the caller to give us this special value in `ctx->proof[i].xsqinv_mask`. */ + if (idx < ctx->vec_len / grouping && secp256k1_popcountl(idx) == ctx->lg_vec_len - 1) { + const size_t xsqinv_idx = secp256k1_ctzl(~idx); + secp256k1_scalar_mul(&ctx->proof[i].xsqinv[xsqinv_idx], &ctx->proof[i].xcache[cache_idx], &ctx->proof[i].xsqinv_mask); + } + + /* Finally, if the caller, in its computation of `P`, wants to multiply `G_i` or `H_i` by some scalar, + * we add that to our sum as well. Again, we trust the randomization in `xcache[0]` to prevent any + * cancellation attacks here. */ + if (ctx->proof[i].proof->rangeproof_cb != NULL) { + secp256k1_scalar rangeproof_offset; + if ((ctx->proof[i].proof->rangeproof_cb)(&rangeproof_offset, NULL, &ctx->randomizer[i], idx, ctx->proof[i].proof->rangeproof_cb_data) != 1) { + return 0; + } + secp256k1_scalar_add(&term, &term, &rangeproof_offset); + } + + secp256k1_scalar_add(sc, sc, &term); + } + /* Next 2lgN points are the L and R vectors */ + } else if (idx < 2 * (ctx->vec_len + ctx->lg_vec_len * ctx->n_proofs)) { + size_t real_idx = idx - 2 * ctx->vec_len; + const size_t proof_idx = real_idx / (2 * ctx->lg_vec_len); + real_idx = real_idx % (2 * ctx->lg_vec_len); + if (!secp256k1_bulletproof_deserialize_point( + pt, + ctx->proof[proof_idx].serialized_lr, + real_idx, + 2 * ctx->lg_vec_len + )) { + return 0; + } + if (idx % 2 == 0) { + *sc = ctx->proof[proof_idx].xsq[real_idx / 2]; + } else { + *sc = ctx->proof[proof_idx].xsqinv[real_idx / 2]; + } + secp256k1_scalar_mul(sc, sc, &ctx->randomizer[proof_idx]); + /* After the G's, H's, L's and R's, do the blinding_gen */ + } else if (idx == 2 * (ctx->vec_len + ctx->lg_vec_len * ctx->n_proofs)) { + *sc = ctx->p_offs; + *pt = *ctx->g; + /* Remaining points are whatever the rangeproof wants */ + } else if (ctx->shared_g && idx == 2 * (ctx->vec_len + ctx->lg_vec_len * ctx->n_proofs) + 1) { + /* Special case: the first extra point is independent of the proof, for both rangeproof and circuit */ + size_t i; + secp256k1_scalar_clear(sc); + for (i = 0; i < ctx->n_proofs; i++) { + secp256k1_scalar term; + if ((ctx->proof[i].proof->rangeproof_cb)(&term, pt, &ctx->randomizer[i], 2 * (ctx->vec_len + ctx->lg_vec_len), ctx->proof[i].proof->rangeproof_cb_data) != 1) { + return 0; + } + secp256k1_scalar_add(sc, sc, &term); + } + } else { + size_t proof_idx = 0; + size_t real_idx = idx - 2 * (ctx->vec_len + ctx->lg_vec_len * ctx->n_proofs) - 1 - !!ctx->shared_g; + while (real_idx >= ctx->proof[proof_idx].proof->n_extra_rangeproof_points - !!ctx->shared_g) { + real_idx -= ctx->proof[proof_idx].proof->n_extra_rangeproof_points - !!ctx->shared_g; + proof_idx++; + VERIFY_CHECK(proof_idx < ctx->n_proofs); + } + if ((ctx->proof[proof_idx].proof->rangeproof_cb)(sc, pt, &ctx->randomizer[proof_idx], 2 * (ctx->vec_len + ctx->lg_vec_len), ctx->proof[proof_idx].proof->rangeproof_cb_data) != 1) { + return 0; + } + } + + return 1; +} + +/* nb For security it is essential that `commit_inp` already commit to all data + * needed to compute `P`. We do not hash it in during verification since `P` + * may be specified indirectly as a bunch of scalar offsets. + */ +static int secp256k1_bulletproof_inner_product_verify_impl(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, const secp256k1_bulletproof_generators *gens, size_t vec_len, const secp256k1_bulletproof_innerproduct_context *proof, size_t n_proofs, size_t plen, int shared_g) { + secp256k1_sha256 sha256; + secp256k1_bulletproof_innerproduct_vfy_ecmult_context ecmult_data; + unsigned char commit[32]; + size_t total_n_points = 2 * vec_len + !!shared_g + 1; /* +1 for shared G (value_gen), +1 for H (blinding_gen) */ + secp256k1_gej r; + secp256k1_scalar zero; + size_t i; + const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch); + + if (plen != secp256k1_bulletproof_innerproduct_proof_length(vec_len)) { + return 0; + } + + if (n_proofs == 0) { + return 1; + } + + secp256k1_scalar_clear(&zero); + ecmult_data.n_proofs = n_proofs; + ecmult_data.g = gens->blinding_gen; + ecmult_data.geng = gens->gens; + ecmult_data.genh = gens->gens + gens->n / 2; + ecmult_data.vec_len = vec_len; + ecmult_data.lg_vec_len = secp256k1_floor_lg(2 * vec_len / IP_AB_SCALARS); + ecmult_data.shared_g = shared_g; + ecmult_data.randomizer = (secp256k1_scalar *)secp256k1_scratch_alloc(error_callback, scratch, n_proofs * sizeof(*ecmult_data.randomizer)); + ecmult_data.proof = (secp256k1_bulletproof_innerproduct_vfy_data *)secp256k1_scratch_alloc(error_callback, scratch, n_proofs * sizeof(*ecmult_data.proof)); + + if (ecmult_data.randomizer == NULL || ecmult_data.proof == NULL) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + + /* Seed RNG for per-proof randomizers */ + secp256k1_sha256_initialize(&sha256); + for (i = 0; i < n_proofs; i++) { + secp256k1_sha256_write(&sha256, proof[i].proof, plen); + secp256k1_sha256_write(&sha256, proof[i].commit, 32); + secp256k1_scalar_get_b32(commit, &proof[i].p_offs); + secp256k1_sha256_write(&sha256, commit, 32); + } + secp256k1_sha256_finalize(&sha256, commit); + + secp256k1_scalar_clear(&ecmult_data.p_offs); + for (i = 0; i < n_proofs; i++) { + const unsigned char *serproof = proof[i].proof; + unsigned char proof_commit[32]; + secp256k1_scalar dot; + secp256k1_scalar ab[IP_AB_SCALARS]; + secp256k1_scalar negprod; + secp256k1_scalar x; + int overflow; + size_t j; + const size_t n_ab = 2 * vec_len < IP_AB_SCALARS ? 2 * vec_len : IP_AB_SCALARS; + + total_n_points += 2 * ecmult_data.lg_vec_len + proof[i].n_extra_rangeproof_points - !!shared_g; /* -1 for shared G */ + + /* Extract dot product, will always be the first 32 bytes */ + secp256k1_scalar_set_b32(&dot, serproof, &overflow); + if (overflow) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + /* Commit to dot product */ + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, proof[i].commit, 32); + secp256k1_sha256_write(&sha256, serproof, 32); + secp256k1_sha256_finalize(&sha256, proof_commit); + serproof += 32; + + /* Extract a, b */ + for (j = 0; j < n_ab; j++) { + secp256k1_scalar_set_b32(&ab[j], serproof, &overflow); + if (overflow) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + /* TODO our verifier currently bombs out with zeros because it uses + * scalar inverses gratuitously. Fix that. */ + if (secp256k1_scalar_is_zero(&ab[j])) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + serproof += 32; + } + secp256k1_scalar_dot_product(&negprod, &ab[0], &ab[n_ab / 2], n_ab / 2); + + ecmult_data.proof[i].proof = &proof[i]; + /* set per-proof randomizer */ + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, commit, 32); + secp256k1_sha256_finalize(&sha256, commit); + secp256k1_scalar_set_b32(&ecmult_data.randomizer[i], commit, &overflow); + if (overflow || secp256k1_scalar_is_zero(&ecmult_data.randomizer[i])) { + /* cryptographically unreachable */ + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + + /* Compute x*(dot - a*b) for each proof; add it and p_offs to the p_offs accumulator */ + secp256k1_scalar_set_b32(&x, proof_commit, &overflow); + if (overflow || secp256k1_scalar_is_zero(&x)) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + secp256k1_scalar_negate(&negprod, &negprod); + secp256k1_scalar_add(&negprod, &negprod, &dot); + secp256k1_scalar_mul(&x, &x, &negprod); + secp256k1_scalar_add(&x, &x, &proof[i].p_offs); + + secp256k1_scalar_mul(&x, &x, &ecmult_data.randomizer[i]); + secp256k1_scalar_add(&ecmult_data.p_offs, &ecmult_data.p_offs, &x); + + /* Special-case: trivial proofs are valid iff the explicitly revealed scalars + * dot to the explicitly revealed dot product. */ + if (2 * vec_len <= IP_AB_SCALARS) { + if (!secp256k1_scalar_is_zero(&negprod)) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + /* remaining data does not (and cannot) be computed for proofs with no a's or b's. */ + if (vec_len == 0) { + continue; + } + } + + /* Compute the inverse product and the array of squares; the rest will be filled + * in by the callback during the multiexp. */ + ecmult_data.proof[i].serialized_lr = serproof; /* bookmark L/R location in proof */ + negprod = ab[n_ab - 1]; + ab[n_ab - 1] = ecmult_data.randomizer[i]; /* build r * x1 * x2 * ... * xn in last slot of `ab` array */ + for (j = 0; j < ecmult_data.lg_vec_len; j++) { + secp256k1_scalar xi; + const size_t lidx = 2 * j; + const size_t ridx = 2 * j + 1; + const size_t bitveclen = (2 * ecmult_data.lg_vec_len + 7) / 8; + const unsigned char lrparity = 2 * !!(serproof[lidx / 8] & (1 << (lidx % 8))) + !!(serproof[ridx / 8] & (1 << (ridx % 8))); + /* Map commit -> H(commit || LR parity || Lx || Rx), compute xi from it */ + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, proof_commit, 32); + secp256k1_sha256_write(&sha256, &lrparity, 1); + secp256k1_sha256_write(&sha256, &serproof[32 * lidx + bitveclen], 32); + secp256k1_sha256_write(&sha256, &serproof[32 * ridx + bitveclen], 32); + secp256k1_sha256_finalize(&sha256, proof_commit); + + secp256k1_scalar_set_b32(&xi, proof_commit, &overflow); + if (overflow || secp256k1_scalar_is_zero(&xi)) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + secp256k1_scalar_mul(&ab[n_ab - 1], &ab[n_ab - 1], &xi); + secp256k1_scalar_sqr(&ecmult_data.proof[i].xsq[j], &xi); + } + /* Compute inverse of all a's and b's, except the last b whose inverse is not needed. + * Also compute the inverse of (-r * x1 * ... * xn) which will be needed */ + secp256k1_scalar_inverse_all_var(ecmult_data.proof[i].abinv, ab, n_ab); + ab[n_ab - 1] = negprod; + + /* Compute (-a0 * r * x1 * ... * xn)^-1 which will be used to mask out individual x_i^-2's */ + secp256k1_scalar_negate(&ecmult_data.proof[i].xsqinv_mask, &ecmult_data.proof[i].abinv[0]); + secp256k1_scalar_mul(&ecmult_data.proof[i].xsqinv_mask, &ecmult_data.proof[i].xsqinv_mask, &ecmult_data.proof[i].abinv[n_ab - 1]); + + /* Compute each scalar times the previous' inverse, which is used to switch between a's and b's */ + for (j = n_ab - 1; j > 0; j--) { + size_t prev_idx; + if (j == n_ab / 2) { + prev_idx = j - 1; /* we go from a_{n-1} to b_0 */ + } else { + prev_idx = j & (j - 1); /* but from a_i' to a_i, where i' is i with its lowest set bit unset */ + } + secp256k1_scalar_mul( + &ecmult_data.proof[i].abinv[j - 1], + &ecmult_data.proof[i].abinv[prev_idx], + &ab[j] + ); + } + + /* Extract -a0 * r * (x1 * ... * xn)^-1 which is our first coefficient. Use negprod as a dummy */ + secp256k1_scalar_mul(&negprod, &ecmult_data.randomizer[i], &ab[0]); /* r*a */ + secp256k1_scalar_sqr(&negprod, &negprod); /* (r*a)^2 */ + secp256k1_scalar_mul(&ecmult_data.proof[i].xcache[0], &ecmult_data.proof[i].xsqinv_mask, &negprod); /* -a * r * (x1 * x2 * ... * xn)^-1 */ + } + + /* Do the multiexp */ + if (secp256k1_ecmult_multi_var(error_callback, scratch, &r, NULL, secp256k1_bulletproof_innerproduct_vfy_ecmult_callback, (void *) &ecmult_data, total_n_points) != 1) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return secp256k1_gej_is_infinity(&r); +} + +typedef struct { + secp256k1_scalar x[SECP256K1_BULLETPROOF_MAX_DEPTH]; + secp256k1_scalar xinv[SECP256K1_BULLETPROOF_MAX_DEPTH]; + secp256k1_scalar yinv; + secp256k1_scalar yinvn; + const secp256k1_ge *geng; + const secp256k1_ge *genh; + const secp256k1_ge *g; + const secp256k1_scalar *a; + const secp256k1_scalar *b; + secp256k1_scalar g_sc; + size_t grouping; + size_t n; +} secp256k1_bulletproof_innerproduct_pf_ecmult_context; + +/* At each level i of recursion (i from 0 upto lg(vector size) - 1) + * L = a_even . G_odd + b_odd . H_even (18) + * which, by expanding the generators into the original G's and H's + * and setting n = (1 << i), can be computed as follows: + * + * For j from 1 to [vector size], + * 1. Use H[j] or G[j] as generator, starting with H and switching + * every n. + * 2. Start with b1 with H and a0 with G, and increment by 2 each switch. + * 3. For k = 1, 2, 4, ..., n/2, use the same algorithm to choose + * between a and b to choose between x and x^-1, except using + * k in place of n. With H's choose x then x^-1, with G's choose + * x^-1 then x. + * + * For R everything is the same except swap G/H and a/b and x/x^-1. + */ +static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_l(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) { + secp256k1_bulletproof_innerproduct_pf_ecmult_context *ctx = (secp256k1_bulletproof_innerproduct_pf_ecmult_context *) data; + const size_t ab_idx = (idx / ctx->grouping) ^ 1; + size_t i; + + /* Special-case the primary generator */ + if (idx == ctx->n) { + *pt = *ctx->g; + *sc = ctx->g_sc; + return 1; + } + + /* steps 1/2 */ + if ((idx / ctx->grouping) % 2 == 0) { + *pt = ctx->genh[idx]; + *sc = ctx->b[ab_idx]; + /* Map h -> h' (eqn 59) */ + secp256k1_scalar_mul(sc, sc, &ctx->yinvn); + } else { + *pt = ctx->geng[idx]; + *sc = ctx->a[ab_idx]; + } + + /* step 3 */ + for (i = 0; (1u << i) < ctx->grouping; i++) { + size_t grouping = (1u << i); + if ((((idx / grouping) % 2) ^ ((idx / ctx->grouping) % 2)) == 0) { + secp256k1_scalar_mul(sc, sc, &ctx->x[i]); + } else { + secp256k1_scalar_mul(sc, sc, &ctx->xinv[i]); + } + } + + secp256k1_scalar_mul(&ctx->yinvn, &ctx->yinvn, &ctx->yinv); + return 1; +} + +/* Identical code except `== 0` changed to `== 1` twice, and the + * `+ 1` from Step 1/2 was moved to the other if branch. */ +static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_r(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) { + secp256k1_bulletproof_innerproduct_pf_ecmult_context *ctx = (secp256k1_bulletproof_innerproduct_pf_ecmult_context *) data; + const size_t ab_idx = (idx / ctx->grouping) ^ 1; + size_t i; + + /* Special-case the primary generator */ + if (idx == ctx->n) { + *pt = *ctx->g; + *sc = ctx->g_sc; + return 1; + } + + /* steps 1/2 */ + if ((idx / ctx->grouping) % 2 == 1) { + *pt = ctx->genh[idx]; + *sc = ctx->b[ab_idx]; + /* Map h -> h' (eqn 59) */ + secp256k1_scalar_mul(sc, sc, &ctx->yinvn); + } else { + *pt = ctx->geng[idx]; + *sc = ctx->a[ab_idx]; + } + + /* step 3 */ + for (i = 0; (1u << i) < ctx->grouping; i++) { + size_t grouping = (1u << i); + if ((((idx / grouping) % 2) ^ ((idx / ctx->grouping) % 2)) == 1) { + secp256k1_scalar_mul(sc, sc, &ctx->x[i]); + } else { + secp256k1_scalar_mul(sc, sc, &ctx->xinv[i]); + } + } + + secp256k1_scalar_mul(&ctx->yinvn, &ctx->yinvn, &ctx->yinv); + return 1; +} + +static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_g(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) { + secp256k1_bulletproof_innerproduct_pf_ecmult_context *ctx = (secp256k1_bulletproof_innerproduct_pf_ecmult_context *) data; + size_t i; + + *pt = ctx->geng[idx]; + secp256k1_scalar_set_int(sc, 1); + for (i = 0; (1u << i) <= ctx->grouping; i++) { + if (idx & (1u << i)) { + secp256k1_scalar_mul(sc, sc, &ctx->x[i]); + } else { + secp256k1_scalar_mul(sc, sc, &ctx->xinv[i]); + } + } + return 1; +} + +static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_h(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) { + secp256k1_bulletproof_innerproduct_pf_ecmult_context *ctx = (secp256k1_bulletproof_innerproduct_pf_ecmult_context *) data; + size_t i; + + *pt = ctx->genh[idx]; + secp256k1_scalar_set_int(sc, 1); + for (i = 0; (1u << i) <= ctx->grouping; i++) { + if (idx & (1u << i)) { + secp256k1_scalar_mul(sc, sc, &ctx->xinv[i]); + } else { + secp256k1_scalar_mul(sc, sc, &ctx->x[i]); + } + } + secp256k1_scalar_mul(sc, sc, &ctx->yinvn); + secp256k1_scalar_mul(&ctx->yinvn, &ctx->yinvn, &ctx->yinv); + return 1; +} + +/* These proofs are not zero-knowledge. There is no need to worry about constant timeness. + * `commit_inp` must contain 256 bits of randomness, it is used immediately as a randomizer. + */ +static int secp256k1_bulletproof_inner_product_real_prove_impl(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, secp256k1_ge *out_pt, size_t *pt_idx, const secp256k1_ge *g, secp256k1_ge *geng, secp256k1_ge *genh, secp256k1_scalar *a_arr, secp256k1_scalar *b_arr, const secp256k1_scalar *yinv, const secp256k1_scalar *ux, const size_t n, unsigned char *commit) { + size_t i; + size_t halfwidth; + + secp256k1_bulletproof_innerproduct_pf_ecmult_context pfdata; + pfdata.yinv = *yinv; + pfdata.g = g; + pfdata.geng = geng; + pfdata.genh = genh; + pfdata.a = a_arr; + pfdata.b = b_arr; + pfdata.n = n; + + /* Protocol 1: Iterate, halving vector size until it is 1 */ + for (halfwidth = n / 2, i = 0; halfwidth > IP_AB_SCALARS / 4; halfwidth /= 2, i++) { + secp256k1_gej tmplj, tmprj; + size_t j; + int overflow; + + pfdata.grouping = 1u << i; + + /* L */ + secp256k1_scalar_clear(&pfdata.g_sc); + for (j = 0; j < halfwidth; j++) { + secp256k1_scalar prod; + secp256k1_scalar_mul(&prod, &a_arr[2*j], &b_arr[2*j + 1]); + secp256k1_scalar_add(&pfdata.g_sc, &pfdata.g_sc, &prod); + } + secp256k1_scalar_mul(&pfdata.g_sc, &pfdata.g_sc, ux); + + secp256k1_scalar_set_int(&pfdata.yinvn, 1); + secp256k1_ecmult_multi_var(error_callback, scratch, &tmplj, NULL, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_l, (void *) &pfdata, n + 1); + secp256k1_ge_set_gej(&out_pt[(*pt_idx)++], &tmplj); + + /* R */ + secp256k1_scalar_clear(&pfdata.g_sc); + for (j = 0; j < halfwidth; j++) { + secp256k1_scalar prod; + secp256k1_scalar_mul(&prod, &a_arr[2*j + 1], &b_arr[2*j]); + secp256k1_scalar_add(&pfdata.g_sc, &pfdata.g_sc, &prod); + } + secp256k1_scalar_mul(&pfdata.g_sc, &pfdata.g_sc, ux); + + secp256k1_scalar_set_int(&pfdata.yinvn, 1); + secp256k1_ecmult_multi_var(error_callback, scratch, &tmprj, NULL, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_r, (void *) &pfdata, n + 1); + secp256k1_ge_set_gej(&out_pt[(*pt_idx)++], &tmprj); + + /* x, x^2, x^-1, x^-2 */ + secp256k1_bulletproof_update_commit(commit, &out_pt[*pt_idx - 2], &out_pt[*pt_idx] - 1); + secp256k1_scalar_set_b32(&pfdata.x[i], commit, &overflow); + if (overflow || secp256k1_scalar_is_zero(&pfdata.x[i])) { + return 0; + } + secp256k1_scalar_inverse_var(&pfdata.xinv[i], &pfdata.x[i]); + + /* update scalar array */ + for (j = 0; j < halfwidth; j++) { + secp256k1_scalar tmps; + secp256k1_scalar_mul(&a_arr[2*j], &a_arr[2*j], &pfdata.x[i]); + secp256k1_scalar_mul(&tmps, &a_arr[2*j + 1], &pfdata.xinv[i]); + secp256k1_scalar_add(&a_arr[j], &a_arr[2*j], &tmps); + + secp256k1_scalar_mul(&b_arr[2*j], &b_arr[2*j], &pfdata.xinv[i]); + secp256k1_scalar_mul(&tmps, &b_arr[2*j + 1], &pfdata.x[i]); + secp256k1_scalar_add(&b_arr[j], &b_arr[2*j], &tmps); + + } + + /* Combine G generators and recurse, if that would be more optimal */ + if ((n > 2048 && i == 3) || (n > 128 && i == 2) || (n > 32 && i == 1)) { + secp256k1_scalar yinv2; + + for (j = 0; j < halfwidth; j++) { + secp256k1_gej rj; + secp256k1_ecmult_multi_var(error_callback, scratch, &rj, NULL, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_g, (void *) &pfdata, 2u << i); + pfdata.geng += 2u << i; + secp256k1_ge_set_gej(&geng[j], &rj); + secp256k1_scalar_set_int(&pfdata.yinvn, 1); + secp256k1_ecmult_multi_var(error_callback, scratch, &rj, NULL, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_h, (void *) &pfdata, 2u << i); + pfdata.genh += 2u << i; + secp256k1_ge_set_gej(&genh[j], &rj); + } + + secp256k1_scalar_sqr(&yinv2, yinv); + for (j = 0; j < i; j++) { + secp256k1_scalar_sqr(&yinv2, &yinv2); + } + if (!secp256k1_bulletproof_inner_product_real_prove_impl(error_callback, scratch, out_pt, pt_idx, g, geng, genh, a_arr, b_arr, &yinv2, ux, halfwidth, commit)) { + return 0; + } + break; + } + } + return 1; +} + +static int secp256k1_bulletproof_inner_product_prove_impl(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, unsigned char *proof, size_t *proof_len, const secp256k1_bulletproof_generators *gens, const secp256k1_scalar *yinv, const size_t n, secp256k1_ecmult_multi_callback *cb, void *cb_data, const unsigned char *commit_inp) { + secp256k1_sha256 sha256; + size_t i; + unsigned char commit[32]; + secp256k1_scalar *a_arr; + secp256k1_scalar *b_arr; + secp256k1_ge *out_pt; + secp256k1_ge *geng; + secp256k1_ge *genh; + secp256k1_scalar ux; + int overflow; + size_t pt_idx = 0; + secp256k1_scalar dot; + size_t half_n_ab = n < IP_AB_SCALARS / 2 ? n : IP_AB_SCALARS / 2; + const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch); + + if (*proof_len < secp256k1_bulletproof_innerproduct_proof_length(n)) { + return 0; + } + *proof_len = secp256k1_bulletproof_innerproduct_proof_length(n); + + /* Special-case lengths 0 and 1 whose proofs are just explicit lists of scalars */ + if (n <= IP_AB_SCALARS / 2) { + secp256k1_scalar a[IP_AB_SCALARS / 2]; + secp256k1_scalar b[IP_AB_SCALARS / 2]; + + for (i = 0; i < n; i++) { + cb(&a[i], NULL, 2*i, cb_data); + cb(&b[i], NULL, 2*i+1, cb_data); + } + + secp256k1_scalar_dot_product(&dot, a, b, n); + secp256k1_scalar_get_b32(proof, &dot); + + for (i = 0; i < n; i++) { + secp256k1_scalar_get_b32(&proof[32 * (i + 1)], &a[i]); + secp256k1_scalar_get_b32(&proof[32 * (i + n + 1)], &b[i]); + } + VERIFY_CHECK(*proof_len == 32 * (2 * n + 1)); + return 1; + } + + a_arr = (secp256k1_scalar*)secp256k1_scratch_alloc(error_callback, scratch, n * sizeof(secp256k1_scalar)); + b_arr = (secp256k1_scalar*)secp256k1_scratch_alloc(error_callback, scratch, n * sizeof(secp256k1_scalar)); + geng = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n * sizeof(secp256k1_ge)); + genh = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n * sizeof(secp256k1_ge)); + out_pt = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, 2 * secp256k1_floor_lg(n) * sizeof(secp256k1_ge)); + + if (a_arr == NULL || b_arr == NULL || geng == NULL || genh == NULL || out_pt == NULL) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + + VERIFY_CHECK(a_arr != NULL); + VERIFY_CHECK(b_arr != NULL); + VERIFY_CHECK(gens != NULL); + + for (i = 0; i < n; i++) { + cb(&a_arr[i], NULL, 2*i, cb_data); + cb(&b_arr[i], NULL, 2*i+1, cb_data); + geng[i] = gens->gens[i]; + genh[i] = gens->gens[i + gens->n/2]; + } + + /* Record final dot product */ + secp256k1_scalar_dot_product(&dot, a_arr, b_arr, n); + secp256k1_scalar_get_b32(proof, &dot); + + /* Protocol 2: hash dot product to obtain G-randomizer */ + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, commit_inp, 32); + secp256k1_sha256_write(&sha256, proof, 32); + secp256k1_sha256_finalize(&sha256, commit); + + proof += 32; + + secp256k1_scalar_set_b32(&ux, commit, &overflow); + if (overflow || secp256k1_scalar_is_zero(&ux)) { + /* cryptographically unreachable */ + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + + if (!secp256k1_bulletproof_inner_product_real_prove_impl(error_callback, scratch, out_pt, &pt_idx, gens->blinding_gen, geng, genh, a_arr, b_arr, yinv, &ux, n, commit)) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + + /* Final a/b values */ + for (i = 0; i < half_n_ab; i++) { + secp256k1_scalar_get_b32(&proof[32 * i], &a_arr[i]); + secp256k1_scalar_get_b32(&proof[32 * (i + half_n_ab)], &b_arr[i]); + } + proof += 64 * half_n_ab; + secp256k1_bulletproof_serialize_points(proof, out_pt, pt_idx); + + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 1; +} + +#undef IP_AB_SCALARS + +#endif diff --git a/src/secp256k1/src/modules/bulletproofs/main_impl.h b/src/secp256k1/src/modules/bulletproofs/main_impl.h new file mode 100644 index 0000000000000..6c492d401c6b5 --- /dev/null +++ b/src/secp256k1/src/modules/bulletproofs/main_impl.h @@ -0,0 +1,298 @@ +/********************************************************************** + * Copyright (c) 2018 Andrew Poelstra * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef SECP256K1_MODULE_BULLETPROOF_MAIN_IMPL +#define SECP256K1_MODULE_BULLETPROOF_MAIN_IMPL + +#include "group.h" +#include "scalar.h" + +#include "modules/commitment/main_impl.h" + +struct secp256k1_bulletproof_generators { + size_t n; + /* `G_i`, `H_i` generators, `n` each of them which are generated when creating this struct */ + secp256k1_ge *gens; + /* `H` "alternate" generator, used in Pedersen commitments. Passed in by caller to + * `secp256k1_bulletproof_generators_create`; stored in this structure to allow consistent + * generators between functions using `secp256k1_bulletproof_generators` and functions + * using the Pedersen commitment module. */ + secp256k1_ge *blinding_gen; +}; + +#include "modules/bulletproofs/inner_product_impl.h" +#include "modules/bulletproofs/rangeproof_impl.h" +#include "modules/bulletproofs/util.h" + +secp256k1_bulletproof_generators *secp256k1_bulletproof_generators_create(const secp256k1_context *ctx, const secp256k1_generator *blinding_gen, size_t n) { + secp256k1_bulletproof_generators *ret; + secp256k1_rfc6979_hmac_sha256 rng; + unsigned char seed[64]; + secp256k1_gej precompj; + size_t i; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(blinding_gen != NULL); + + ret = (secp256k1_bulletproof_generators *)checked_malloc(&ctx->error_callback, sizeof(*ret)); + if (ret == NULL) { + return NULL; + } + ret->gens = (secp256k1_ge *)checked_malloc(&ctx->error_callback, (n + 1) * sizeof(*ret->gens)); + if (ret->gens == NULL) { + free(ret); + return NULL; + } + ret->blinding_gen = &ret->gens[n]; + ret->n = n; + + secp256k1_fe_get_b32(&seed[0], &secp256k1_ge_const_g.x); + secp256k1_fe_get_b32(&seed[32], &secp256k1_ge_const_g.y); + + secp256k1_rfc6979_hmac_sha256_initialize(&rng, seed, 64); + for (i = 0; i < n; i++) { + unsigned char tmp[32] = { 0 }; + secp256k1_generator gen; + secp256k1_rfc6979_hmac_sha256_generate(&rng, tmp, 32); + CHECK(secp256k1_generator_generate(ctx, &gen, tmp)); + secp256k1_generator_load(&ret->gens[i], &gen); + + secp256k1_gej_set_ge(&precompj, &ret->gens[i]); + } + + secp256k1_generator_load(&ret->blinding_gen[0], blinding_gen); + secp256k1_gej_set_ge(&precompj, &ret->blinding_gen[0]); + + return ret; +} + +void secp256k1_bulletproof_generators_destroy(const secp256k1_context* ctx, secp256k1_bulletproof_generators *gens) { + (void) ctx; + if (gens != NULL) { + free(gens->gens); + free(gens); + } +} + +int secp256k1_bulletproof_rangeproof_verify(const secp256k1_context* ctx, secp256k1_scratch_space *scratch, const secp256k1_bulletproof_generators *gens, const unsigned char *proof, size_t plen, + const uint64_t *min_value, const secp256k1_pedersen_commitment* commit, size_t n_commits, size_t nbits, const secp256k1_generator *value_gen, const unsigned char *extra_commit, size_t extra_commit_len) { + int ret; + size_t i; + secp256k1_ge *commitp; + secp256k1_ge value_genp; + const secp256k1_ge *commitp_ptr; + const uint64_t *minvalue_ptr; + const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(&ctx->error_callback, scratch); + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(scratch != NULL); + ARG_CHECK(gens != NULL); + ARG_CHECK(gens->n >= 2 * nbits * n_commits); + ARG_CHECK(proof != NULL); + ARG_CHECK(commit != NULL); + ARG_CHECK(n_commits > 0); + ARG_CHECK(nbits > 0); + ARG_CHECK(nbits <= 64); + ARG_CHECK(value_gen != NULL); + ARG_CHECK(extra_commit != NULL || extra_commit_len == 0); + + commitp = (secp256k1_ge *)secp256k1_scratch_alloc(&ctx->error_callback, scratch, n_commits * sizeof(secp256k1_ge)); + if (commitp == NULL) { + secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, scratch_checkpoint); + return 0; + } + + for (i = 0; i < n_commits; i++) { + secp256k1_pedersen_commitment_load(&commitp[i], &commit[i]); + } + secp256k1_generator_load(&value_genp, value_gen); + + commitp_ptr = commitp; + minvalue_ptr = min_value; + ret = secp256k1_bulletproof_rangeproof_verify_impl(&ctx->error_callback, scratch, &proof, 1, plen, nbits, &minvalue_ptr, &commitp_ptr, n_commits, &value_genp, gens, &extra_commit, &extra_commit_len); + secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, scratch_checkpoint); + return ret; +} + +int secp256k1_bulletproof_rangeproof_verify_multi(const secp256k1_context* ctx, secp256k1_scratch_space *scratch, const secp256k1_bulletproof_generators *gens, const unsigned char* const* proof, size_t n_proofs, size_t plen, const uint64_t* const* min_value, const secp256k1_pedersen_commitment* const* commit, size_t n_commits, size_t nbits, const secp256k1_generator *value_gen, const unsigned char* const* extra_commit, size_t *extra_commit_len) { + int ret; + secp256k1_ge **commitp; + secp256k1_ge *value_genp; + size_t i; + const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(&ctx->error_callback, scratch); + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(scratch != NULL); + ARG_CHECK(gens != NULL); + ARG_CHECK(gens->n >= 2 * nbits * n_commits); + ARG_CHECK(commit != NULL); + ARG_CHECK(proof != NULL); + ARG_CHECK(n_proofs > 0); + ARG_CHECK(n_commits > 0); + ARG_CHECK(nbits > 0); + ARG_CHECK(nbits <= 64); + ARG_CHECK(value_gen != NULL); + ARG_CHECK((extra_commit_len == NULL) == (extra_commit == NULL)); + if (extra_commit != NULL) { + for (i = 0; i < n_proofs; i++) { + ARG_CHECK(extra_commit[i] != NULL || extra_commit_len[i] == 0); + } + } + + commitp = (secp256k1_ge **)secp256k1_scratch_alloc(&ctx->error_callback, scratch, n_proofs * sizeof(*commitp)); + value_genp = (secp256k1_ge *)secp256k1_scratch_alloc(&ctx->error_callback, scratch, n_proofs * sizeof(*value_genp)); + if (commitp == NULL || value_genp == NULL) { + secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, scratch_checkpoint); + return 0; + } + + for (i = 0; i < n_proofs; i++) { + size_t j; + commitp[i] = (secp256k1_ge *)secp256k1_scratch_alloc(&ctx->error_callback, scratch, n_commits * sizeof(*commitp[i])); + if (commitp[i] == NULL) { + secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, scratch_checkpoint); + return 0; + } + + for (j = 0; j < n_commits; j++) { + secp256k1_pedersen_commitment_load(&commitp[i][j], &commit[i][j]); + } + secp256k1_generator_load(&value_genp[i], &value_gen[i]); + } + + ret = secp256k1_bulletproof_rangeproof_verify_impl(&ctx->error_callback, scratch, proof, n_proofs, plen, nbits, min_value, (const secp256k1_ge **) commitp, n_commits, value_genp, gens, extra_commit, extra_commit_len); + secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, scratch_checkpoint); + return ret; +} + +int secp256k1_bulletproof_rangeproof_rewind(const secp256k1_context* ctx, uint64_t *value, unsigned char *blind, const unsigned char *proof, size_t plen, uint64_t min_value, const secp256k1_pedersen_commitment* commit, const secp256k1_generator *value_gen, const unsigned char *nonce, const unsigned char *extra_commit, size_t extra_commit_len, unsigned char *message) { + secp256k1_scalar blinds; + int ret; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(value != NULL); + ARG_CHECK(blind != NULL); + ARG_CHECK(proof != NULL); + ARG_CHECK(commit != NULL); + ARG_CHECK(value_gen != NULL); + ARG_CHECK(nonce != NULL); + ARG_CHECK(extra_commit != NULL || extra_commit_len == 0); + + ret = secp256k1_bulletproof_rangeproof_rewind_impl(value, &blinds, proof, plen, min_value, commit, value_gen, nonce, extra_commit, extra_commit_len, message); + if (ret == 1) { + secp256k1_scalar_get_b32(blind, &blinds); + } + return ret; +} + +int secp256k1_bulletproof_rangeproof_prove( + const secp256k1_context* ctx, secp256k1_scratch_space* scratch, const secp256k1_bulletproof_generators* gens, + unsigned char* proof, size_t* plen, + unsigned char* tau_x, secp256k1_pubkey* t_one, secp256k1_pubkey* t_two, + const uint64_t* value, const uint64_t* min_value, + const unsigned char* const* blind, const secp256k1_pedersen_commitment* const* commits, size_t n_commits, + const secp256k1_generator* value_gen, size_t nbits, + const unsigned char* nonce, const unsigned char* private_nonce, + const unsigned char* extra_commit, size_t extra_commit_len, const unsigned char* message +) { + int ret; + secp256k1_ge *commitp; + secp256k1_scalar *blinds; + secp256k1_ge value_genp; + size_t i; + const unsigned char *secondary_nonce; + secp256k1_ge *tge = NULL; + const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(&ctx->error_callback, scratch); + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(scratch != NULL); + ARG_CHECK(gens != NULL); + ARG_CHECK(gens->n >= 2 * nbits * n_commits); + ARG_CHECK( + (proof != NULL && plen != NULL && tau_x == NULL && t_one == NULL && t_two == NULL && commits == NULL) || + (proof == NULL && plen == NULL && tau_x == NULL && t_one != NULL && t_two != NULL && commits != NULL && private_nonce != NULL) || + (proof == NULL && plen == NULL && tau_x != NULL && t_one != NULL && t_two != NULL && commits != NULL && private_nonce != NULL) || + (proof != NULL && plen != NULL && tau_x != NULL && t_one != NULL && t_two != NULL && commits != NULL && private_nonce != NULL) + ); /* 1) normal BP, 2) multi-party BP step 1, 3) multi-party BP step 2, 4) multi-party BP step 3 */ + ARG_CHECK(value != NULL); + ARG_CHECK(blind != NULL); + ARG_CHECK(value_gen != NULL); + ARG_CHECK(nonce != NULL); + ARG_CHECK(n_commits > 0 && n_commits); + ARG_CHECK(nbits <= 64); + if (nbits < 64) { + for (i = 0; i < n_commits; i++) { + ARG_CHECK(value[i] < (1ull << nbits)); + ARG_CHECK(blind[i] != NULL); + } + } + ARG_CHECK(extra_commit != NULL || extra_commit_len == 0); + ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + + commitp = (secp256k1_ge *)secp256k1_scratch_alloc(&ctx->error_callback, scratch, n_commits * sizeof(*commitp)); + blinds = (secp256k1_scalar *)secp256k1_scratch_alloc(&ctx->error_callback, scratch, n_commits * sizeof(*blinds)); + if (commitp == NULL || blinds == NULL) { + secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, scratch_checkpoint); + return 0; + } + + secp256k1_generator_load(&value_genp, value_gen); + for (i = 0; i < n_commits; i++) { + int overflow; + secp256k1_scalar_set_b32(&blinds[i], blind[i], &overflow); + if (overflow || secp256k1_scalar_is_zero(&blinds[i])) { + secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, scratch_checkpoint); + return 0; + } + + if (commits == NULL) { + /* Calculate commitment from blinding factor */ + secp256k1_gej commitj; + secp256k1_pedersen_ecmult(&commitj, &blinds[i], value[i], &value_genp, &gens->blinding_gen[0]); + secp256k1_ge_set_gej(&commitp[i], &commitj); + } + else { + /* Multi-party bulletproof: total blinding factor unknown. Input commitment(s) */ + secp256k1_pedersen_commitment_load(&commitp[i], commits[i]); + } + } + + if (private_nonce == NULL) { + secondary_nonce = nonce; + } + else { + secondary_nonce = private_nonce; + } + + if (t_one != NULL) { + tge = malloc(2*sizeof(secp256k1_ge)); + if (tge == NULL){ + secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, scratch_checkpoint); + return 0; + } + if (tau_x != NULL) { + if (!secp256k1_pubkey_load(ctx, &tge[0], t_one)) { + secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, scratch_checkpoint); + return 0; + } + if (!secp256k1_pubkey_load(ctx, &tge[1], t_two)) { + secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, scratch_checkpoint); + return 0; + } + } + } + + ret = secp256k1_bulletproof_rangeproof_prove_impl(&ctx->error_callback, scratch, proof, plen, tau_x, tge, nbits, value, min_value, blinds, commitp, n_commits, &value_genp, gens, nonce, secondary_nonce, extra_commit, extra_commit_len, message); + + if (t_one != NULL && tau_x == NULL) { + secp256k1_pubkey_save(t_one, &tge[0]); + secp256k1_pubkey_save(t_two, &tge[1]); + } + secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, scratch_checkpoint); + return ret; +} + +#endif diff --git a/src/secp256k1/src/modules/bulletproofs/rangeproof_impl.h b/src/secp256k1/src/modules/bulletproofs/rangeproof_impl.h new file mode 100644 index 0000000000000..39dbcab5c480a --- /dev/null +++ b/src/secp256k1/src/modules/bulletproofs/rangeproof_impl.h @@ -0,0 +1,855 @@ +/********************************************************************** + * Copyright (c) 2018 Andrew Poelstra * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef SECP256K1_MODULE_BULLETPROOF_RANGEPROOF_IMPL +#define SECP256K1_MODULE_BULLETPROOF_RANGEPROOF_IMPL + +#include "modules/bulletproofs/inner_product_impl.h" +#include "modules/bulletproofs/util.h" +#include "group.h" + +#define MAX_NBITS 64 + +typedef struct { + secp256k1_scalar yinv; + secp256k1_scalar yinvn; + secp256k1_scalar z; + secp256k1_scalar z_randomized; + secp256k1_scalar zsq; + secp256k1_scalar g_exponent; + secp256k1_scalar negz; + secp256k1_scalar x; + secp256k1_ge a; + secp256k1_ge s; + size_t n; + /* eq (61) stuff */ + size_t count; + secp256k1_scalar randomizer61; + secp256k1_scalar y; + secp256k1_scalar t; + const secp256k1_ge *asset; + const secp256k1_ge *commit; + const uint64_t *min_value; + size_t n_commits; + secp256k1_ge t1; + secp256k1_ge t2; +} secp256k1_bulletproof_vfy_ecmult_context; + +static int secp256k1_bulletproof_rangeproof_vfy_callback(secp256k1_scalar *sc, secp256k1_ge *pt, secp256k1_scalar *randomizer, size_t idx, void *data) { + secp256k1_bulletproof_vfy_ecmult_context *ctx = (secp256k1_bulletproof_vfy_ecmult_context *) data; + + if (idx == 0) { + secp256k1_scalar_mul(&ctx->g_exponent, &ctx->negz, randomizer); + secp256k1_scalar_mul(&ctx->z_randomized, &ctx->z, randomizer); + } + + if (idx < ctx->n) { + *sc = ctx->g_exponent; + } else if (idx < 2 * ctx->n) { + const size_t nbits = ctx->n / ctx->n_commits; + const size_t commit_idx = (idx - ctx->n) / nbits; + const size_t bit_idx = (idx - ctx->n) % nbits; + + if (bit_idx == 0) { + size_t i; + secp256k1_scalar tmp; + secp256k1_scalar_mul(&tmp, &ctx->z, &ctx->yinvn); + secp256k1_scalar_sqr(&ctx->zsq, &ctx->z); + for (i = 0; i < commit_idx; i++) { + secp256k1_scalar_mul(&ctx->zsq, &ctx->zsq, &tmp); + } + secp256k1_scalar_mul(&ctx->zsq, &ctx->zsq, randomizer); + } + secp256k1_scalar_add(sc, &ctx->zsq, &ctx->z_randomized); + + secp256k1_scalar_mul(&ctx->zsq, &ctx->zsq, &ctx->yinv); + secp256k1_scalar_add(&ctx->zsq, &ctx->zsq, &ctx->zsq); + } else { + switch(ctx->count) { + /* S^x in eq (62) */ + case 2: + *sc = ctx->x; + *pt = ctx->s; + break; + /* A in eq (62) */ + case 1: + *pt = ctx->a; + secp256k1_scalar_set_int(sc, 1); + break; + /* G^[k(y, z) + sum_i y^i - t] from eq (61) */ + case 0: { + size_t i; + secp256k1_scalar yn; + secp256k1_scalar twosum; + secp256k1_scalar tmp; + + secp256k1_scalar_clear(&twosum); + secp256k1_scalar_clear(&yn); + secp256k1_scalar_set_int(&tmp, 1); + + secp256k1_scalar_sqr(&ctx->zsq, &ctx->z); /* need to re-set this */ + secp256k1_scalar_negate(sc, &ctx->zsq); /* -z^2 */ + secp256k1_scalar_add(sc, sc, &ctx->z); /* z - z^2 */ + + for (i = 0; i < ctx->n_commits; i++) { + const size_t nbits = ctx->n / ctx->n_commits; + secp256k1_scalar negzn; + secp256k1_scalar twon; + size_t j; + + secp256k1_scalar_clear(&twon); + for (j = 0; j < nbits; j++) { + secp256k1_scalar_mul(&yn, &yn, &ctx->y); + secp256k1_scalar_add(&twon, &twon, &twon); + + secp256k1_scalar_add(&yn, &yn, &tmp); + secp256k1_scalar_add(&twon, &twon, &tmp); + } + + secp256k1_scalar_mul(&negzn, &ctx->zsq, &ctx->negz); + for (j = 0; j < i; j++) { + secp256k1_scalar_mul(&negzn, &negzn, &ctx->z); + } + if (ctx->min_value != NULL) { + secp256k1_scalar mv; + secp256k1_scalar_set_int(&mv, ctx->min_value[i]); + secp256k1_scalar_mul(&mv, &mv, &ctx->negz); + secp256k1_scalar_mul(&mv, &mv, &ctx->z); + for (j = 0; j < i; j++) { + secp256k1_scalar_mul(&negzn, &negzn, &ctx->z); + } + secp256k1_scalar_add(&twosum, &twosum, &mv); + } + secp256k1_scalar_mul(&twon, &twon, &negzn); + secp256k1_scalar_add(&twosum, &twosum, &twon); + } /* yn = 1 + y + ... + y^(n-1); twosum = (z^3 + ... + z^{2 + n_commits})(1 + 2 + ... + 2^(n-1)) */ + + + secp256k1_scalar_mul(sc, sc, &yn); /* (z - z^2)(1 + ... + y^(n-1)) */ + secp256k1_scalar_add(sc, sc, &twosum); /* (z - z^2)(1 + ... + y^(n-1)) - z^3(1 + ... + 2^(n-1)) */ + secp256k1_scalar_negate(&tmp, &ctx->t); + secp256k1_scalar_add(sc, sc, &tmp); /* (z - z^2)(1 + ... + y^n) - z^3(1 + ... + 2^n) - t */ + secp256k1_scalar_mul(sc, sc, &ctx->randomizer61); + *pt = *ctx->asset; + break; + } + /* T1^x in eq (61) */ + case 3: + secp256k1_scalar_mul(sc, &ctx->x, &ctx->randomizer61); + *pt = ctx->t1; + break; + /* T2^x^2 in eq (61) */ + case 4: + secp256k1_scalar_sqr(sc, &ctx->x); + secp256k1_scalar_mul(sc, sc, &ctx->randomizer61); + *pt = ctx->t2; + break; + /* V^z^2 in eq (61) */ + default: + VERIFY_CHECK(ctx->count < 5 + ctx->n_commits); + + secp256k1_scalar_mul(sc, &ctx->zsq, &ctx->randomizer61); + secp256k1_scalar_mul(&ctx->zsq, &ctx->zsq, &ctx->z); + *pt = ctx->commit[ctx->count - 5]; + break; + } + secp256k1_scalar_mul(sc, sc, randomizer); + ctx->count++; + } + return 1; +} + +static int secp256k1_bulletproof_rangeproof_verify_impl(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, const unsigned char* const* proof, const size_t n_proofs, const size_t plen, size_t nbits, const uint64_t* const* min_value, const secp256k1_ge* const* commitp, size_t n_commits, const secp256k1_ge *value_gen, const secp256k1_bulletproof_generators *gens, const unsigned char* const* extra_commit, size_t *extra_commit_len) { + secp256k1_bulletproof_vfy_ecmult_context *ecmult_data; + secp256k1_bulletproof_innerproduct_context *innp_ctx; + int ret; + size_t i; + int same_generators = 1; + const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch); + + /* sanity-check input */ + if (secp256k1_popcountl(nbits) != 1 || nbits > MAX_NBITS) { + return 0; + } + if (plen < 64 + 128 + 1 + 32) { /* inner product argument will do a more precise check */ + return 0; + } + if (plen > SECP256K1_BULLETPROOF_MAX_PROOF) { + return 0; + } + + ecmult_data = (secp256k1_bulletproof_vfy_ecmult_context *)secp256k1_scratch_alloc(error_callback, scratch, n_proofs * sizeof(*ecmult_data)); + innp_ctx = (secp256k1_bulletproof_innerproduct_context *)secp256k1_scratch_alloc(error_callback, scratch, n_proofs * sizeof(*innp_ctx)); + if (ecmult_data == NULL || innp_ctx == NULL) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + + /* Check if all generators are equal. If so, we can amortize all their scalar multiplications + * by them and save one scalar-ge multiplication per proof. */ + VERIFY_CHECK(!secp256k1_ge_is_infinity(&value_gen[0])); + for (i = 1; i < n_proofs; i++) { + VERIFY_CHECK(!secp256k1_ge_is_infinity(&value_gen[i])); + if (!secp256k1_fe_equal_var(&value_gen[i].x, &value_gen[i - 1].x) || + !secp256k1_fe_equal_var(&value_gen[i].y, &value_gen[i - 1].y)) { + same_generators = 0; + } + } + + for (i = 0; i < n_proofs; i++) { + secp256k1_sha256 sha256; + unsigned char commit[32] = {0}; + unsigned char randomizer61[32] = {0}; /* randomizer for eq (61) so we can add it to eq (62) to save a separate multiexp */ + secp256k1_scalar taux, mu; + secp256k1_ge age, sge; + int overflow; + size_t j; + + /* Commit to all input data: min value, pedersen commit, asset generator, extra_commit */ + if (min_value != NULL && min_value[i] != NULL) { + unsigned char len[4]; + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, commit, 32); + len[0] = n_commits; + len[1] = n_commits >> 8; + len[2] = n_commits >> 16; + len[3] = n_commits >> 24; + secp256k1_sha256_write(&sha256, len, 4); + for (j = 0; j < n_commits; j++) { + unsigned char vbuf[8]; + vbuf[0] = min_value[i][j]; + vbuf[1] = min_value[i][j] >> 8; + vbuf[2] = min_value[i][j] >> 16; + vbuf[3] = min_value[i][j] >> 24; + vbuf[4] = min_value[i][j] >> 32; + vbuf[5] = min_value[i][j] >> 40; + vbuf[6] = min_value[i][j] >> 48; + vbuf[7] = min_value[i][j] >> 56; + secp256k1_sha256_write(&sha256, vbuf, 8); + } + secp256k1_sha256_finalize(&sha256, commit); + } + for (j = 0; j < n_commits; j++) { + secp256k1_bulletproof_update_commit(commit, &commitp[i][j], &value_gen[i]); + } + if (extra_commit != NULL && extra_commit[i] != NULL) { + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, commit, 32); + secp256k1_sha256_write(&sha256, extra_commit[i], extra_commit_len[i]); + secp256k1_sha256_finalize(&sha256, commit); + } + + /* Compute y, z, x */ + if (!secp256k1_bulletproof_deserialize_point(&age, &proof[i][64], 0, 4) || + !secp256k1_bulletproof_deserialize_point(&sge, &proof[i][64], 1, 4)) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + + secp256k1_bulletproof_update_commit(commit, &age, &sge); + secp256k1_scalar_set_b32(&ecmult_data[i].y, commit, &overflow); + if (overflow || secp256k1_scalar_is_zero(&ecmult_data[i].y)) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + secp256k1_bulletproof_update_commit(commit, &age, &sge); + secp256k1_scalar_set_b32(&ecmult_data[i].z, commit, &overflow); + if (overflow || secp256k1_scalar_is_zero(&ecmult_data[i].z)) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + + if (!secp256k1_bulletproof_deserialize_point(&ecmult_data[i].t1, &proof[i][64], 2, 4) || + !secp256k1_bulletproof_deserialize_point(&ecmult_data[i].t2, &proof[i][64], 3, 4)) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + + secp256k1_bulletproof_update_commit(commit, &ecmult_data[i].t1, &ecmult_data[i].t2); + secp256k1_scalar_set_b32(&ecmult_data[i].x, commit, &overflow); + if (overflow || secp256k1_scalar_is_zero(&ecmult_data[i].x)) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + + /* compute exponent offsets */ + secp256k1_scalar_inverse_var(&ecmult_data[i].yinv, &ecmult_data[i].y); /* TODO somehow batch this w the inner-product argument inverse */ + ecmult_data[i].yinvn = ecmult_data[i].yinv; + for (j = 0; j < secp256k1_floor_lg(nbits); j++) { + secp256k1_scalar_sqr(&ecmult_data[i].yinvn, &ecmult_data[i].yinvn); + } + secp256k1_scalar_sqr(&ecmult_data[i].zsq, &ecmult_data[i].z); + secp256k1_scalar_negate(&ecmult_data[i].negz, &ecmult_data[i].z); + + /* Update commit with remaining data for the inner product proof */ + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, commit, 32); + secp256k1_sha256_write(&sha256, &proof[i][0], 64); + secp256k1_sha256_finalize(&sha256, commit); + + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, commit, 32); + secp256k1_sha256_finalize(&sha256, randomizer61); + secp256k1_scalar_set_b32(&ecmult_data[i].randomizer61, randomizer61, &overflow); + if (overflow || secp256k1_scalar_is_zero(&ecmult_data[i].randomizer61)) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + + /* Deserialize everything else */ + secp256k1_scalar_set_b32(&taux, &proof[i][0], &overflow); + if (overflow || secp256k1_scalar_is_zero(&taux)) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + secp256k1_scalar_set_b32(&mu, &proof[i][32], &overflow); + if (overflow || secp256k1_scalar_is_zero(&mu)) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + /* A little sketchy, we read t (l(x) . r(x)) off the front of the inner product proof, + * which we otherwise treat as a black box */ + secp256k1_scalar_set_b32(&ecmult_data[i].t, &proof[i][64 + 128 + 1], &overflow); + if (overflow || secp256k1_scalar_is_zero(&ecmult_data[i].t)) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } + + /* Verify inner product proof */ + ecmult_data[i].a = age; + ecmult_data[i].s = sge; + ecmult_data[i].n = nbits * n_commits; + ecmult_data[i].count = 0; + ecmult_data[i].asset = &value_gen[i]; + ecmult_data[i].min_value = min_value == NULL ? NULL : min_value[i]; + ecmult_data[i].commit = commitp[i]; + ecmult_data[i].n_commits = n_commits; + secp256k1_scalar_mul(&taux, &taux, &ecmult_data[i].randomizer61); + secp256k1_scalar_add(&mu, &mu, &taux); + + innp_ctx[i].proof = &proof[i][64 + 128 + 1]; + innp_ctx[i].p_offs = mu; + memcpy(innp_ctx[i].commit, commit, 32); + innp_ctx[i].yinv = ecmult_data[i].yinv; + innp_ctx[i].rangeproof_cb = secp256k1_bulletproof_rangeproof_vfy_callback; + innp_ctx[i].rangeproof_cb_data = (void *) &ecmult_data[i]; + innp_ctx[i].n_extra_rangeproof_points = 5 + n_commits; + } + + ret = secp256k1_bulletproof_inner_product_verify_impl(error_callback, scratch, gens, nbits * n_commits, innp_ctx, n_proofs, plen - (64 + 128 + 1), same_generators); + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return ret; +} + +typedef struct { + const unsigned char *nonce; + secp256k1_scalar y; + secp256k1_scalar z; + secp256k1_scalar yn; + secp256k1_scalar z22n; + const uint64_t *val; + const uint64_t *min_val; + size_t n_vals; + size_t nbits; + size_t count; +} secp256k1_bulletproof_lr_generator; + +static void secp256k1_lr_generator_init(secp256k1_bulletproof_lr_generator *generator, const unsigned char *nonce, const secp256k1_scalar *y, const secp256k1_scalar *z, size_t nbits, const uint64_t *val, const uint64_t *min_val, size_t n_vals) { + generator->nonce = nonce; + generator->y = *y; + generator->z = *z; + secp256k1_scalar_set_int(&generator->yn, 1); + generator->nbits = nbits; + generator->val = val; + generator->min_val = min_val; + generator->n_vals = n_vals; + generator->count = 0; +} + +static void secp256k1_lr_generate(secp256k1_bulletproof_lr_generator *generator, secp256k1_scalar *lout, secp256k1_scalar *rout, const secp256k1_scalar *x) { + const size_t commit_idx = generator->count / generator->nbits; + const size_t bit_idx = generator->count % generator->nbits; + const uint64_t mv = generator->min_val == NULL ? 0 : generator->min_val[commit_idx]; + const int bit = ((generator->val[commit_idx] - mv) >> bit_idx) & 1; + secp256k1_scalar sl, sr; + secp256k1_scalar negz; + + if (bit_idx == 0) { + size_t i; + secp256k1_scalar_sqr(&generator->z22n, &generator->z); + for (i = 0; i < commit_idx; i++) { + secp256k1_scalar_mul(&generator->z22n, &generator->z22n, &generator->z); + } + } + + secp256k1_scalar_chacha20(&sl, &sr, generator->nonce, generator->count + 2); + secp256k1_scalar_mul(&sl, &sl, x); + secp256k1_scalar_mul(&sr, &sr, x); + + secp256k1_scalar_set_int(lout, bit); + secp256k1_scalar_negate(&negz, &generator->z); + secp256k1_scalar_add(lout, lout, &negz); + secp256k1_scalar_add(lout, lout, &sl); + + secp256k1_scalar_set_int(rout, 1 - bit); + secp256k1_scalar_negate(rout, rout); + secp256k1_scalar_add(rout, rout, &generator->z); + secp256k1_scalar_add(rout, rout, &sr); + secp256k1_scalar_mul(rout, rout, &generator->yn); + secp256k1_scalar_add(rout, rout, &generator->z22n); + + generator->count++; + secp256k1_scalar_mul(&generator->yn, &generator->yn, &generator->y); + secp256k1_scalar_add(&generator->z22n, &generator->z22n, &generator->z22n); +} + +typedef struct { + secp256k1_scalar x; + secp256k1_scalar cache; + secp256k1_bulletproof_lr_generator lr_gen; +} secp256k1_bulletproof_abgh_data; + +static int secp256k1_bulletproof_abgh_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) { + secp256k1_bulletproof_abgh_data *ctx = (secp256k1_bulletproof_abgh_data *) data; + const int is_g = idx % 2 == 0; + + (void) pt; + if (is_g) { + secp256k1_lr_generate(&ctx->lr_gen, sc, &ctx->cache, &ctx->x); + } else { + *sc = ctx->cache; + } + + return 1; +} + +/* Proof format: t, tau_x, mu, a, b, A, S, T_1, T_2, {L_i}, {R_i} + * 5 scalar + [4 + 2log(n)] ge + * + * The non-bold `h` in the Bulletproofs paper corresponds to our gens->blinding_gen + * while the non-bold `g` corresponds to the asset type `value_gen`. + */ +static int secp256k1_bulletproof_rangeproof_prove_impl( + const secp256k1_callback *error_callback, secp256k1_scratch *scratch, + unsigned char *proof, size_t *plen, + unsigned char *tauxc, secp256k1_ge *tge, + const size_t nbits, const uint64_t *value, const uint64_t *min_value, + const secp256k1_scalar *blind, const secp256k1_ge *commitp, size_t n_commits, + const secp256k1_ge *value_gen, const secp256k1_bulletproof_generators *gens, + const unsigned char *nonce, const unsigned char *private_nonce, + const unsigned char *extra_commit, size_t extra_commit_len, const unsigned char *message) { + secp256k1_bulletproof_lr_generator lr_gen; + secp256k1_bulletproof_abgh_data abgh_data; + secp256k1_scalar zero; + secp256k1_sha256 sha256; + unsigned char commit[32] = {0}; + secp256k1_scalar alpha, rho; + secp256k1_scalar t0, t1, t2; + secp256k1_scalar tau1, tau2, taux, mu; + secp256k1_scalar y; + secp256k1_scalar z, zsq; + secp256k1_scalar x, xsq; + secp256k1_scalar tmps; + secp256k1_gej aj, sj; + secp256k1_gej tmpj; + size_t i, j; + int overflow; + unsigned char vals_bytes[32] = {0}; + /* inner product proof variables */ + secp256k1_ge out_pt[4]; + + if (secp256k1_popcountl(nbits) != 1 || nbits > MAX_NBITS) { + return 0; + } + for (i = 0; i < n_commits; i++) { + uint64_t mv = min_value == NULL ? 0 : min_value[i]; + if (mv > value[i]) { + return 0; + } + if (nbits < 64 && (value[i] - mv) >= (1ull << nbits)) { + return 0; + } + } + if (plen != NULL && *plen < 128 + 64 + 1) { /* inner product argument will check and assign plen */ + return 0; + } + + secp256k1_scalar_clear(&zero); + + /* Commit to all input data: min value, pedersen commit, asset generator, extra_commit */ + if (min_value != NULL) { + unsigned char len[4]; + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, commit, 32); + len[0] = n_commits; + len[1] = n_commits >> 8; + len[2] = n_commits >> 16; + len[3] = n_commits >> 24; + secp256k1_sha256_write(&sha256, len, 4); + for (i = 0; i < n_commits; i++) { + unsigned char vbuf[8]; + vbuf[0] = min_value[i]; + vbuf[1] = min_value[i] >> 8; + vbuf[2] = min_value[i] >> 16; + vbuf[3] = min_value[i] >> 24; + vbuf[4] = min_value[i] >> 32; + vbuf[5] = min_value[i] >> 40; + vbuf[6] = min_value[i] >> 48; + vbuf[7] = min_value[i] >> 56; + secp256k1_sha256_write(&sha256, vbuf, 8); + } + secp256k1_sha256_finalize(&sha256, commit); + } + for (i = 0; i < n_commits; i++) { + secp256k1_bulletproof_update_commit(commit, &commitp[i], value_gen); /* TODO be less stupid about this */ + } + if (extra_commit != NULL) { + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, commit, 32); + secp256k1_sha256_write(&sha256, extra_commit, extra_commit_len); + secp256k1_sha256_finalize(&sha256, commit); + } + + secp256k1_scalar_chacha20(&alpha, &rho, nonce, 0); + secp256k1_scalar_chacha20(&tau1, &tau2, private_nonce, 1); + + if (proof == NULL && tauxc == NULL && tge != NULL) { + /* Multi-party bulletproof: export tau1j*G and tau2j*G */ + secp256k1_ecmult_const(&tmpj, &gens->blinding_gen[0], &tau1, 256); + secp256k1_ge_set_gej(&tge[0], &tmpj); + + secp256k1_ecmult_const(&tmpj, &gens->blinding_gen[0], &tau2, 256); + secp256k1_ge_set_gej(&tge[1], &tmpj); + + return 1; + } + + /* Encrypt value into alpha, so it will be recoverable from -mu by someone who knows `nonce` */ + if (n_commits == 1) { + secp256k1_scalar vals; + secp256k1_scalar_set_u64(&vals, value[0]); + if (message != NULL) { + /* Combine value with 20 bytes of optional message */ + secp256k1_scalar_get_b32(vals_bytes, &vals); + for (i=0; i<20; i++) { + vals_bytes[i+4] = message[i]; + } + secp256k1_scalar_set_b32(&vals, vals_bytes, &overflow); + } + secp256k1_scalar_negate(&vals, &vals); /* Negate so it'll be positive in -mu */ + secp256k1_scalar_add(&alpha, &alpha, &vals); + } + + /* Compute A and S */ + secp256k1_ecmult_const(&aj, &gens->blinding_gen[0], &alpha, 256); + secp256k1_ecmult_const(&sj, &gens->blinding_gen[0], &rho, 256); + for (i = 0; i < n_commits; i++) { + for (j = 0; j < nbits; j++) { + secp256k1_scalar sl, sr; + uint64_t mv = min_value == NULL ? 0 : min_value[i]; + size_t al = !!((value[i] - mv) & (1ull << j)); + secp256k1_ge aterm = gens->gens[i * nbits + j + gens->n/2]; + secp256k1_ge sterm; + secp256k1_gej stermj; + + secp256k1_scalar_chacha20(&sl, &sr, nonce, i * nbits + j + 2); + + secp256k1_ge_neg(&aterm, &aterm); + secp256k1_fe_cmov(&aterm.x, &gens->gens[i * nbits + j].x, al); + secp256k1_fe_cmov(&aterm.y, &gens->gens[i * nbits + j].y, al); + + secp256k1_gej_add_ge(&aj, &aj, &aterm); + + secp256k1_ecmult_const(&stermj, &gens->gens[i * nbits + j], &sl, 256); + secp256k1_ge_set_gej(&sterm, &stermj); + secp256k1_gej_add_ge(&sj, &sj, &sterm); + secp256k1_ecmult_const(&stermj, &gens->gens[i * nbits + j + gens->n/2], &sr, 256); + secp256k1_ge_set_gej(&sterm, &stermj); + secp256k1_gej_add_ge(&sj, &sj, &sterm); + } + } + + /* get challenges y and z */ + secp256k1_ge_set_gej(&out_pt[0], &aj); + secp256k1_ge_set_gej(&out_pt[1], &sj); + + secp256k1_bulletproof_update_commit(commit, &out_pt[0], &out_pt[1]); + secp256k1_scalar_set_b32(&y, commit, &overflow); + if (overflow || secp256k1_scalar_is_zero(&y)) { + return 0; + } + secp256k1_bulletproof_update_commit(commit, &out_pt[0], &out_pt[1]); /* TODO rehashing A and S to get a second challenge is overkill */ + secp256k1_scalar_set_b32(&z, commit, &overflow); + if (overflow || secp256k1_scalar_is_zero(&z)) { + return 0; + } + secp256k1_scalar_sqr(&zsq, &z); + + /* Compute coefficients t0, t1, t2 of the polynomial */ + /* t0 = l(0) dot r(0) */ + secp256k1_lr_generator_init(&lr_gen, nonce, &y, &z, nbits, value, min_value, n_commits); + secp256k1_scalar_clear(&t0); + for (i = 0; i < nbits * n_commits; i++) { + secp256k1_scalar l, r; + secp256k1_lr_generate(&lr_gen, &l, &r, &zero); + secp256k1_scalar_mul(&l, &l, &r); + secp256k1_scalar_add(&t0, &t0, &l); + } + + /* A = t0 + t1 + t2 = l(1) dot r(1) */ + secp256k1_lr_generator_init(&lr_gen, nonce, &y, &z, nbits, value, min_value, n_commits); + secp256k1_scalar_clear(&t1); + for (i = 0; i < nbits * n_commits; i++) { + secp256k1_scalar one; + secp256k1_scalar l, r; + secp256k1_scalar_set_int(&one, 1); + secp256k1_lr_generate(&lr_gen, &l, &r, &one); + secp256k1_scalar_mul(&l, &l, &r); + secp256k1_scalar_add(&t1, &t1, &l); + } + + /* B = t0 - t1 + t2 = l(-1) dot r(-1) */ + secp256k1_lr_generator_init(&lr_gen, nonce, &y, &z, nbits, value, min_value, n_commits); + secp256k1_scalar_clear(&t2); + for (i = 0; i < nbits * n_commits; i++) { + secp256k1_scalar negone; + secp256k1_scalar l, r; + secp256k1_scalar_set_int(&negone, 1); + secp256k1_scalar_negate(&negone, &negone); + secp256k1_lr_generate(&lr_gen, &l, &r, &negone); + secp256k1_scalar_mul(&l, &l, &r); + secp256k1_scalar_add(&t2, &t2, &l); + } + + /* t1 = (A - B)/2 */ + secp256k1_scalar_set_int(&tmps, 2); + secp256k1_scalar_inverse_var(&tmps, &tmps); + secp256k1_scalar_negate(&t2, &t2); + secp256k1_scalar_add(&t1, &t1, &t2); + secp256k1_scalar_mul(&t1, &t1, &tmps); + + /* t2 = -(-B + t0) + t1 */ + secp256k1_scalar_add(&t2, &t2, &t0); + secp256k1_scalar_negate(&t2, &t2); + secp256k1_scalar_add(&t2, &t2, &t1); + + /* Compute Ti = t_i*A + tau_i*G for i = 1,2 */ + secp256k1_ecmult_const(&tmpj, value_gen, &t1, 256); + if (tge == NULL) { + /* Normal bulletproof: T1=t1*A + tau1*G */ + secp256k1_ge_set_gej(&out_pt[2], &tmpj); + secp256k1_ecmult_const(&tmpj, &gens->blinding_gen[0], &tau1, 256); + secp256k1_gej_add_ge(&tmpj, &tmpj, &out_pt[2]); + } else { + /* Multi-party bulletproof: T1=t1*A + sumj tau1j*G */ + secp256k1_gej_add_ge(&tmpj, &tmpj, &tge[0]); + } + secp256k1_ge_set_gej(&out_pt[2], &tmpj); + + secp256k1_ecmult_const(&tmpj, value_gen, &t2, 256); + if (tge == NULL) { + /* Normal bulletproof: T1=t1*A + tau1*G */ + secp256k1_ge_set_gej(&out_pt[3], &tmpj); + secp256k1_ecmult_const(&tmpj, &gens->blinding_gen[0], &tau2, 256); + secp256k1_gej_add_ge(&tmpj, &tmpj, &out_pt[3]); + } else { + /* Multi-party bulletproof: T2=t2*A + sumj tau2j*G */ + secp256k1_gej_add_ge(&tmpj, &tmpj, &tge[1]); + } + secp256k1_ge_set_gej(&out_pt[3], &tmpj); + + secp256k1_bulletproof_update_commit(commit, &out_pt[2], &out_pt[3]); + secp256k1_scalar_set_b32(&x, commit, &overflow); + if (overflow || secp256k1_scalar_is_zero(&x)) { + return 0; + } + secp256k1_scalar_sqr(&xsq, &x); + + if (proof == NULL || tauxc == NULL) { + /* compute tau_x and mu */ + secp256k1_scalar_mul(&taux, &tau1, &x); + secp256k1_scalar_mul(&tmps, &tau2, &xsq); + secp256k1_scalar_add(&taux, &taux, &tmps); + for (i = 0; i < n_commits; i++) { + secp256k1_scalar_mul(&tmps, &zsq, &blind[i]); + secp256k1_scalar_add(&taux, &taux, &tmps); + secp256k1_scalar_mul(&zsq, &zsq, &z); + } + } + + if (proof == NULL) { + /* Multi-party bulletproof: export tauxj */ + secp256k1_scalar_get_b32(tauxc, &taux); + return 1; + } + + if (tauxc != NULL) { + /* Multi-party bulletproof: taux = sumj tauxj */ + secp256k1_scalar_set_b32(&taux, tauxc, &overflow); + if (overflow || secp256k1_scalar_is_zero(&tmps)) { + return 0; + } + } + + secp256k1_scalar_mul(&mu, &rho, &x); + secp256k1_scalar_add(&mu, &mu, &alpha); + + /* Negate taux and mu so the verifier doesn't have to */ + secp256k1_scalar_negate(&taux, &taux); + secp256k1_scalar_negate(&mu, &mu); + + /* Encode rangeproof stuff */ + secp256k1_scalar_get_b32(&proof[0], &taux); + secp256k1_scalar_get_b32(&proof[32], &mu); + secp256k1_bulletproof_serialize_points(&proof[64], out_pt, 4); + + /* Mix this into the hash so the input to the inner product proof is fixed */ + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, commit, 32); + secp256k1_sha256_write(&sha256, proof, 64); + secp256k1_sha256_finalize(&sha256, commit); + + /* Compute l and r, do inner product proof */ + abgh_data.x = x; + secp256k1_lr_generator_init(&abgh_data.lr_gen, nonce, &y, &z, nbits, value, min_value, n_commits); + *plen -= 64 + 128 + 1; + secp256k1_scalar_inverse_var(&y, &y); + if (secp256k1_bulletproof_inner_product_prove_impl(error_callback, scratch, &proof[64 + 128 + 1], plen, gens, &y, nbits * n_commits, secp256k1_bulletproof_abgh_callback, (void *) &abgh_data, commit) == 0) { + return 0; + } + *plen += 64 + 128 + 1; + + return 1; +} + +static int secp256k1_bulletproof_rangeproof_rewind_impl(uint64_t *value, secp256k1_scalar *blind, const unsigned char *proof, const size_t plen, uint64_t min_value, const secp256k1_pedersen_commitment *pcommit, const secp256k1_generator *value_gen, const unsigned char *nonce, const unsigned char *extra_commit, size_t extra_commit_len, unsigned char *message) { + secp256k1_sha256 sha256; + static const unsigned char zero4[4] = { 0 }; + unsigned char commit[32] = { 0 }; + unsigned char lrparity; + secp256k1_scalar taux, mu; + secp256k1_scalar alpha, rho, tau1, tau2; + secp256k1_scalar x, z; + secp256k1_ge commitp, value_genp; + int overflow, i; + + if (plen < 64 + 128 + 1 || plen > SECP256K1_BULLETPROOF_MAX_PROOF) { + return 0; + } + + /* Extract data from beginning of proof */ + secp256k1_scalar_set_b32(&taux, &proof[0], &overflow); + if (overflow || secp256k1_scalar_is_zero(&taux)) { + return 0; + } + secp256k1_scalar_set_b32(&mu, &proof[32], &overflow); + if (overflow || secp256k1_scalar_is_zero(&mu)) { + return 0; + } + + secp256k1_scalar_chacha20(&alpha, &rho, nonce, 0); + secp256k1_scalar_chacha20(&tau1, &tau2, nonce, 1); + + if (min_value > 0) { + unsigned char vbuf[8]; + const unsigned char len[4] = { 1, 0, 0, 0 }; + vbuf[0] = min_value; + vbuf[1] = min_value >> 8; + vbuf[2] = min_value >> 16; + vbuf[3] = min_value >> 24; + vbuf[4] = min_value >> 32; + vbuf[5] = min_value >> 40; + vbuf[6] = min_value >> 48; + vbuf[7] = min_value >> 56; + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, commit, 32); + secp256k1_sha256_write(&sha256, len, 4); + secp256k1_sha256_write(&sha256, vbuf, 8); + secp256k1_sha256_finalize(&sha256, commit); + } + + secp256k1_pedersen_commitment_load(&commitp, pcommit); + secp256k1_generator_load(&value_genp, value_gen); + secp256k1_bulletproof_update_commit(commit, &commitp, &value_genp); + + if (extra_commit != NULL) { + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, commit, 32); + secp256k1_sha256_write(&sha256, extra_commit, extra_commit_len); + secp256k1_sha256_finalize(&sha256, commit); + } + + /* Extract A and S to compute y and z */ + lrparity = 2 * !!(proof[64] & 1) + !!(proof[64] & 2); + /* y */ + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, commit, 32); + secp256k1_sha256_write(&sha256, &lrparity, 1); + secp256k1_sha256_write(&sha256, &proof[65], 64); + secp256k1_sha256_finalize(&sha256, commit); + + /* z */ + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, commit, 32); + secp256k1_sha256_write(&sha256, &lrparity, 1); + secp256k1_sha256_write(&sha256, &proof[65], 64); + secp256k1_sha256_finalize(&sha256, commit); + + secp256k1_scalar_set_b32(&z, commit, &overflow); + if (overflow || secp256k1_scalar_is_zero(&z)) { + return 0; + } + + /* x */ + lrparity = 2 * !!(proof[64] & 4) + !!(proof[64] & 8); + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, commit, 32); + secp256k1_sha256_write(&sha256, &lrparity, 1); + secp256k1_sha256_write(&sha256, &proof[129], 64); + secp256k1_sha256_finalize(&sha256, commit); + + secp256k1_scalar_set_b32(&x, commit, &overflow); + if (overflow || secp256k1_scalar_is_zero(&x)) { + return 0; + } + + /* Compute candidate mu and add to (negated) mu from proof to get value */ + secp256k1_scalar_mul(&rho, &rho, &x); + secp256k1_scalar_add(&mu, &mu, &rho); + secp256k1_scalar_add(&mu, &mu, &alpha); + + secp256k1_scalar_get_b32(commit, &mu); + if (memcmp(commit, zero4, 4) != 0) { + return 0; + } + *value = commit[31] + ((uint64_t) commit[30] << 8) + + ((uint64_t) commit[29] << 16) + ((uint64_t) commit[28] << 24) + + ((uint64_t) commit[27] << 32) + ((uint64_t) commit[26] << 40) + + ((uint64_t) commit[25] << 48) + ((uint64_t) commit[24] << 56); + + if (message != NULL) { + for (i=23; i >= 4; i--) { + message[i-4] = commit[i]; + } + } + + /* Derive blinding factor */ + secp256k1_scalar_mul(&tau1, &tau1, &x); + secp256k1_scalar_mul(&tau2, &tau2, &x); + secp256k1_scalar_mul(&tau2, &tau2, &x); + + secp256k1_scalar_add(&taux, &taux, &tau1); + secp256k1_scalar_add(&taux, &taux, &tau2); + + secp256k1_scalar_sqr(&z, &z); + secp256k1_scalar_inverse_var(&z, &z); + secp256k1_scalar_mul(blind, &taux, &z); + secp256k1_scalar_negate(blind, blind); + + return 1; +} + +#endif diff --git a/src/secp256k1/src/modules/bulletproofs/tests_impl.h b/src/secp256k1/src/modules/bulletproofs/tests_impl.h new file mode 100644 index 0000000000000..b53feb0eb1c53 --- /dev/null +++ b/src/secp256k1/src/modules/bulletproofs/tests_impl.h @@ -0,0 +1,679 @@ +/********************************************************************** + * Copyright (c) 2018 Andrew Poelstra * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef SECP256K1_MODULE_BULLETPROOF_TESTS +#define SECP256K1_MODULE_BULLETPROOF_TESTS + +#include + +#include "group.h" +#include "scalar.h" +#include "testrand.h" +#include "util.h" + +#include "include/secp256k1_bulletproofs.h" + +static void test_bulletproof_api(void) { + secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE); + secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); + secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY); + secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + secp256k1_scratch *scratch = secp256k1_scratch_space_create(ctx, 1024 * 1024); + secp256k1_generator value_gen; + secp256k1_bulletproof_generators *gens; + secp256k1_pedersen_commitment pcommit[4]; + const secp256k1_pedersen_commitment *pcommit_arr[1]; + unsigned char proof[2000]; + const unsigned char *proof_ptr = proof; + const unsigned char blind[32] = " i am not a blinding factor "; + const unsigned char *blind_ptr[4]; + size_t blindlen = sizeof(blind); + size_t plen = sizeof(proof); + uint64_t value[4] = { 1234, 4567, 8910, 1112 } ; + uint64_t min_value[4] = { 1000, 4567, 0, 5000 } ; + const uint64_t *mv_ptr = min_value; + unsigned char rewind_blind[32]; + size_t rewind_v; + + int32_t ecount = 0; + + blind_ptr[0] = blind; + blind_ptr[1] = blind; + blind_ptr[2] = blind; + blind_ptr[3] = blind; + pcommit_arr[0] = pcommit; + + secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount); + + CHECK(secp256k1_generator_generate(both, &value_gen, blind) != 0); + CHECK(secp256k1_pedersen_commit(both, &pcommit[0], blind, value[0], &value_gen, &secp256k1_generator_const_h) != 0); + CHECK(secp256k1_pedersen_commit(both, &pcommit[1], blind, value[1], &value_gen, &secp256k1_generator_const_h) != 0); + CHECK(secp256k1_pedersen_commit(both, &pcommit[2], blind, value[2], &value_gen, &secp256k1_generator_const_h) != 0); + CHECK(secp256k1_pedersen_commit(both, &pcommit[3], blind, value[3], &value_gen, &secp256k1_generator_const_h) != 0); + + /* generators */ + gens = secp256k1_bulletproof_generators_create(none, NULL, 256); + CHECK(gens == NULL && ecount == 1); + gens = secp256k1_bulletproof_generators_create(none, &secp256k1_generator_const_h, 256); + CHECK(gens != NULL && ecount == 1); + + /* rangeproof_prove */ + ecount = 0; + CHECK(secp256k1_bulletproof_rangeproof_prove(none, scratch, gens, proof, &plen, NULL, NULL, NULL, value, NULL, blind_ptr, NULL, 1, &value_gen, 64, blind, NULL, NULL, 0, NULL) == 0); + CHECK(ecount == 1); + CHECK(secp256k1_bulletproof_rangeproof_prove(sign, scratch, gens, proof, &plen, NULL, NULL, NULL, value, NULL, blind_ptr, NULL, 1, &value_gen, 64, blind, NULL, NULL, 0, NULL) == 0); + CHECK(ecount == 2); + CHECK(secp256k1_bulletproof_rangeproof_prove(vrfy, scratch, gens, proof, &plen, NULL, NULL, NULL, value, NULL, blind_ptr, NULL, 1, &value_gen, 64, blind, NULL, NULL, 0, NULL) == 0); + CHECK(ecount == 3); + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, NULL, NULL, NULL, value, NULL, blind_ptr, NULL, 1, &value_gen, 64, blind, NULL, NULL, 0, NULL) == 1); + CHECK(ecount == 3); + plen = 2000; + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, NULL, NULL, NULL, value, NULL, blind_ptr, NULL, 2, &value_gen, 64, blind, NULL, NULL, 0, NULL) == 1); + CHECK(ecount == 3); + plen = 2000; + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, NULL, NULL, NULL, value, NULL, blind_ptr, NULL, 4, &value_gen, 64, blind, NULL, NULL, 0, NULL) == 0); /* too few gens */ + CHECK(ecount == 4); + + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, NULL, NULL, NULL, value, min_value, blind_ptr, NULL, 2, &value_gen, 64, blind, NULL, NULL, 0, NULL) == 1); /* mv = v, ok */ + CHECK(ecount == 4); + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, NULL, NULL, NULL, &value[1], &min_value[1], blind_ptr, NULL, 2, &value_gen, 64, blind, NULL, NULL, 0, NULL) == 1); /* mv = 0, ok */ + CHECK(ecount == 4); + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, NULL, NULL, NULL, &value[2], &min_value[2], blind_ptr, NULL, 2, &value_gen, 64, blind, NULL, NULL, 0, NULL) == 0); /* mv > v, !ok */ + CHECK(ecount == 4); + + CHECK(secp256k1_bulletproof_rangeproof_prove(both, NULL, gens, proof, &plen, NULL, NULL, NULL, value, NULL, blind_ptr, NULL, 1, &value_gen, 64, blind, NULL, NULL, 0, NULL) == 0); + CHECK(ecount == 5); + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, NULL, proof, &plen, NULL, NULL, NULL, value, NULL, blind_ptr, NULL, 1, &value_gen, 64, blind, NULL, NULL, 0, NULL) == 0); + CHECK(ecount == 6); + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, NULL, &plen, NULL, NULL, NULL, value, NULL, blind_ptr, NULL, 1, &value_gen, 64, blind, NULL, NULL, 0, NULL) == 0); + CHECK(ecount == 7); + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, NULL, NULL, NULL, NULL, value, NULL, blind_ptr, NULL, 1, &value_gen, 64, blind, NULL, NULL, 0, NULL) == 0); + CHECK(ecount == 8); + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, NULL, NULL, NULL, NULL, NULL, blind_ptr, NULL, 1, &value_gen, 64, blind, NULL, NULL, 0, NULL) == 0); + CHECK(ecount == 9); + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, NULL, NULL, NULL, value, NULL, NULL, NULL, 1, &value_gen, 64, blind, NULL, NULL, 0, NULL) == 0); + CHECK(ecount == 10); + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, NULL, NULL, NULL, value, NULL, blind_ptr, NULL, 0, &value_gen, 64, blind, NULL, NULL, 0, NULL) == 0); + CHECK(ecount == 11); + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, NULL, NULL, NULL, value, NULL, blind_ptr, NULL, 1, NULL, 64, blind, NULL, NULL, 0, NULL) == 0); + CHECK(ecount == 12); + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, NULL, NULL, NULL, value, NULL, blind_ptr, NULL, 1, &value_gen, 0, blind, NULL, NULL, 0, NULL) == 0); + CHECK(ecount == 13); + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, NULL, NULL, NULL, value, NULL, blind_ptr, NULL, 1, &value_gen, 65, blind, NULL, NULL, 0, NULL) == 0); + CHECK(ecount == 14); + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, NULL, NULL, NULL, value, NULL, blind_ptr, NULL, 1, &value_gen, -1, blind, NULL, NULL, 0, NULL) == 0); + CHECK(ecount == 15); + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, NULL, NULL, NULL, value, NULL, blind_ptr, NULL, 1, &value_gen, 64, NULL, NULL, NULL, 0, NULL) == 0); + CHECK(ecount == 16); + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, NULL, NULL, NULL, value, NULL, blind_ptr, NULL, 1, &value_gen, 64, blind, NULL, blind, 0, NULL) == 1); + CHECK(ecount == 16); + CHECK(secp256k1_bulletproof_rangeproof_prove(both, scratch, gens, proof, &plen, NULL, NULL, NULL, value, min_value, blind_ptr, NULL, 1, &value_gen, 64, blind, NULL, blind, 32, NULL) == 1); + CHECK(ecount == 16); + + /* rangeproof_verify */ + ecount = 0; + CHECK(secp256k1_bulletproof_rangeproof_verify(none, scratch, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, blind, 32) == 0); + CHECK(ecount == 1); + CHECK(secp256k1_bulletproof_rangeproof_verify(sign, scratch, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, blind, 32) == 0); + CHECK(ecount == 2); + CHECK(secp256k1_bulletproof_rangeproof_verify(vrfy, scratch, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, blind, 32) == 1); + CHECK(ecount == 2); + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, blind, 32) == 1); + CHECK(ecount == 2); + + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 63, &value_gen, blind, 32) == 0); + CHECK(ecount == 2); + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen - 1, min_value, pcommit, 1, 63, &value_gen, blind, 32) == 0); + CHECK(ecount == 2); + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, 0, min_value, pcommit, 1, 63, &value_gen, blind, 32) == 0); + CHECK(ecount == 2); + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, blind, 31) == 0); + CHECK(ecount == 2); + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, NULL, 0) == 0); + CHECK(ecount == 2); + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 2, 64, &value_gen, blind, 32) == 0); + CHECK(ecount == 2); + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 4, 64, &value_gen, blind, 32) == 0); + CHECK(ecount == 3); + + CHECK(secp256k1_bulletproof_rangeproof_verify(both, NULL, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, blind, 32) == 0); + CHECK(ecount == 4); + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, NULL, proof, plen, min_value, pcommit, 1, 64, &value_gen, blind, 32) == 0); + CHECK(ecount == 5); + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, NULL, plen, min_value, pcommit, 1, 64, &value_gen, blind, 32) == 0); + CHECK(ecount == 6); + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, NULL, pcommit, 1, 64, &value_gen, blind, 32) == 0); + CHECK(ecount == 6); + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, NULL, 1, 64, &value_gen, blind, 32) == 0); + CHECK(ecount == 7); + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 0, 64, &value_gen, blind, 32) == 0); + CHECK(ecount == 8); + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 65, &value_gen, blind, 32) == 0); + CHECK(ecount == 9); + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 0, &value_gen, blind, 32) == 0); + CHECK(ecount == 10); + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 64, NULL, blind, 32) == 0); + CHECK(ecount == 11); + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, NULL, 32) == 0); + CHECK(ecount == 12); + CHECK(secp256k1_bulletproof_rangeproof_verify(both, scratch, gens, proof, plen, min_value, pcommit, 1, 64, &value_gen, blind, 0) == 0); + CHECK(ecount == 12); + + /* verify_multi */ + ecount = 0; + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(none, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 0); + CHECK(ecount == 1); + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(sign, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 0); + CHECK(ecount == 2); + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(vrfy, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 1); + CHECK(ecount == 2); + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 1); + CHECK(ecount == 2); + + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, NULL, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 0); + CHECK(ecount == 3); + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, NULL, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 0); + CHECK(ecount == 4); + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, NULL, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 0); + CHECK(ecount == 5); + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 0, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 0); + CHECK(ecount == 6); + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, NULL, pcommit_arr, 1, 64, &value_gen, blind_ptr, &blindlen) == 0); + CHECK(ecount == 6); + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, NULL, 1, 64, &value_gen, blind_ptr, &blindlen) == 0); + CHECK(ecount == 7); + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, NULL, blind_ptr, &blindlen) == 0); + CHECK(ecount == 8); + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, NULL, &blindlen) == 0); + CHECK(ecount == 9); + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, blind_ptr, NULL) == 0); + CHECK(ecount == 10); + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 64, &value_gen, NULL, NULL) == 0); + CHECK(ecount == 10); + + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 0, 64, &value_gen, blind_ptr, &blindlen) == 0); + CHECK(ecount == 11); + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 65, &value_gen, blind_ptr, &blindlen) == 0); + CHECK(ecount == 12); + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 63, &value_gen, blind_ptr, &blindlen) == 0); + CHECK(ecount == 12); + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 1, 0, &value_gen, blind_ptr, &blindlen) == 0); + CHECK(ecount == 13); + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 2, 64, &value_gen, blind_ptr, &blindlen) == 0); + CHECK(ecount == 13); + CHECK(secp256k1_bulletproof_rangeproof_verify_multi(both, scratch, gens, &proof_ptr, 1, plen, &mv_ptr, pcommit_arr, 4, 64, &value_gen, blind_ptr, &blindlen) == 0); + CHECK(ecount == 14); + + /* Rewind */ + ecount = 0; + CHECK(secp256k1_bulletproof_rangeproof_rewind(none, &rewind_v, rewind_blind, proof, plen, min_value[0], pcommit, &value_gen, blind, blind, 32, NULL) == 1); + CHECK(ecount == 0); + CHECK(secp256k1_bulletproof_rangeproof_rewind(none, NULL, rewind_blind, proof, plen, min_value[0], pcommit, &value_gen, blind, blind, 32, NULL) == 0); + CHECK(ecount == 1); + CHECK(secp256k1_bulletproof_rangeproof_rewind(none, &rewind_v, NULL, proof, plen, min_value[0], pcommit, &value_gen, blind, blind, 32, NULL) == 0); + CHECK(ecount == 2); + CHECK(secp256k1_bulletproof_rangeproof_rewind(none, &rewind_v, rewind_blind, NULL, plen, min_value[0], pcommit, &value_gen, blind, blind, 32, NULL) == 0); + CHECK(ecount == 3); + CHECK(secp256k1_bulletproof_rangeproof_rewind(none, &rewind_v, rewind_blind, proof, 0, min_value[0], pcommit, &value_gen, blind, blind, 32, NULL) == 0); + CHECK(ecount == 3); + CHECK(secp256k1_bulletproof_rangeproof_rewind(none, &rewind_v, rewind_blind, proof, plen, 0, pcommit, &value_gen, blind, blind, 32, NULL) == 0); + CHECK(ecount == 3); + CHECK(secp256k1_bulletproof_rangeproof_rewind(none, &rewind_v, rewind_blind, proof, plen, min_value[0], NULL, &value_gen, blind, blind, 32, NULL) == 0); + CHECK(ecount == 4); + CHECK(secp256k1_bulletproof_rangeproof_rewind(none, &rewind_v, rewind_blind, proof, plen, min_value[0], pcommit, NULL, blind, blind, 32, NULL) == 0); + CHECK(ecount == 5); + CHECK(secp256k1_bulletproof_rangeproof_rewind(none, &rewind_v, rewind_blind, proof, plen, min_value[0], pcommit, &value_gen, NULL, blind, 32, NULL) == 0); + CHECK(ecount == 6); + CHECK(secp256k1_bulletproof_rangeproof_rewind(none, &rewind_v, rewind_blind, proof, plen, min_value[0], pcommit, &value_gen, blind, NULL, 32, NULL) == 0); + CHECK(ecount == 7); + CHECK(secp256k1_bulletproof_rangeproof_rewind(none, &rewind_v, rewind_blind, proof, plen, min_value[0], pcommit, &value_gen, blind, blind, 0, NULL) == 0); + CHECK(ecount == 7); + CHECK(secp256k1_bulletproof_rangeproof_rewind(none, &rewind_v, rewind_blind, proof, plen, min_value[0], pcommit, &value_gen, blind, NULL, 0, NULL) == 0); + CHECK(ecount == 7); + + secp256k1_bulletproof_generators_destroy(none, gens); + secp256k1_bulletproof_generators_destroy(none, NULL); + secp256k1_scratch_destroy(scratch); + secp256k1_context_destroy(none); + secp256k1_context_destroy(sign); + secp256k1_context_destroy(vrfy); + secp256k1_context_destroy(both); +} + +#define MAX_WIDTH (1ul << 20) +typedef struct { + const secp256k1_scalar *a; + const secp256k1_scalar *b; + const secp256k1_ge *g; + const secp256k1_ge *h; + size_t n; +} test_bulletproof_ecmult_context; + +static int test_bulletproof_ecmult_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) { + test_bulletproof_ecmult_context *ecctx = (test_bulletproof_ecmult_context *) data; + if (idx < ecctx->n) { + *sc = ecctx->a[idx]; + *pt = ecctx->g[idx]; + } else { + VERIFY_CHECK(idx < 2*ecctx->n); + *sc = ecctx->b[idx - ecctx->n]; + *pt = ecctx->h[idx - ecctx->n]; + } + return 1; +} + +typedef struct { + secp256k1_scalar offs; + secp256k1_scalar ext_sc; + secp256k1_scalar skew_sc; + secp256k1_ge ext_pt; + secp256k1_ge p; + size_t n; + int parity; +} test_bulletproof_offset_context; + +static int test_bulletproof_offset_vfy_callback(secp256k1_scalar *sc, secp256k1_ge *pt, secp256k1_scalar *randomizer, size_t idx, void *data) { + test_bulletproof_offset_context *ecctx = (test_bulletproof_offset_context *) data; + secp256k1_scalar_set_int(&ecctx->offs, 1); + if (idx < 2 * ecctx->n) { + secp256k1_scalar idxsc; + secp256k1_scalar_set_int(&idxsc, idx); + secp256k1_scalar_mul(sc, &ecctx->skew_sc, &idxsc); + } else { + if (ecctx->parity) { + *sc = ecctx->ext_sc; + *pt = ecctx->ext_pt; + } else { + secp256k1_scalar_set_int(sc, 1); + *pt = ecctx->p; + } + } + secp256k1_scalar_mul(sc, sc, randomizer); + ecctx->parity = !ecctx->parity; + return 1; +} + +typedef struct { + const secp256k1_scalar *a_arr; + const secp256k1_scalar *b_arr; +} secp256k1_bulletproof_ip_test_abgh_data; + + +static int secp256k1_bulletproof_ip_test_abgh_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) { + secp256k1_bulletproof_ip_test_abgh_data *cbctx = (secp256k1_bulletproof_ip_test_abgh_data *) data; + const int is_g = idx % 2 == 0; + + (void) pt; + if (is_g) { + *sc = cbctx->a_arr[idx / 2]; + } else { + *sc = cbctx->b_arr[idx / 2]; + } + return 1; +} + +void test_bulletproof_inner_product(size_t n, const secp256k1_bulletproof_generators *gens) { + const secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0,0,0,0,0,0,0,0); + secp256k1_gej pj; + secp256k1_gej tmpj, tmpj2; + secp256k1_scalar *a_arr = (secp256k1_scalar *)checked_malloc(&ctx->error_callback, n * sizeof(*a_arr)); + secp256k1_scalar *b_arr = (secp256k1_scalar *)checked_malloc(&ctx->error_callback, n * sizeof(*b_arr)); + unsigned char commit[32] = "hash of P, c, etc. all that jazz"; + secp256k1_scalar one; + size_t j; + test_bulletproof_offset_context offs_ctx; + secp256k1_bulletproof_ip_test_abgh_data abgh_data; + secp256k1_bulletproof_innerproduct_context innp_ctx; + unsigned char proof[2000]; + size_t plen = sizeof(proof); + + secp256k1_scratch *scratch = secp256k1_scratch_space_create(ctx, 100000 + 256 * (2 * n + 2)); + + for (j = 0; j < n; j++) { + random_scalar_order(&a_arr[j]); + random_scalar_order(&b_arr[j]); + } + + abgh_data.a_arr = a_arr; + abgh_data.b_arr = b_arr; + + random_group_element_test(&offs_ctx.ext_pt); + random_scalar_order(&offs_ctx.ext_sc); + secp256k1_scalar_clear(&offs_ctx.skew_sc); + offs_ctx.n = n; + + secp256k1_scalar_set_int(&one, 1); + CHECK(secp256k1_bulletproof_inner_product_prove_impl(&ctx->error_callback, scratch, proof, &plen, gens, &one, n, secp256k1_bulletproof_ip_test_abgh_callback, (void *) &abgh_data, commit) == 1); + + innp_ctx.proof = proof; + memcpy(innp_ctx.commit, commit, 32); + secp256k1_scalar_set_int(&innp_ctx.yinv, 1); + innp_ctx.n_extra_rangeproof_points = 1; + innp_ctx.rangeproof_cb = test_bulletproof_offset_vfy_callback; + innp_ctx.rangeproof_cb_data = (void *) &offs_ctx; + + /* Manually do the multiexp to obtain the point P which commits to the inner product. + * The prover never computes this because it is implicit in the range/circuit proofs. */ + { + test_bulletproof_ecmult_context ecmult_data; + ecmult_data.n = n; + ecmult_data.a = a_arr; + ecmult_data.b = b_arr; + ecmult_data.g = gens->gens; + ecmult_data.h = gens->gens + gens->n/2; + CHECK(secp256k1_ecmult_multi_var(&ctx->error_callback, scratch, &pj, &zero, test_bulletproof_ecmult_callback, (void*) &ecmult_data, 2 * n)); + secp256k1_ge_set_gej(&offs_ctx.p, &pj); + } + + /* Check proof with no offsets or other baubles */ + offs_ctx.parity = 0; + secp256k1_scalar_clear(&innp_ctx.p_offs); + CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->error_callback, scratch, gens, n, &innp_ctx, 1, plen, 1) == 1); + + /* skew P by a random amount and instruct the verifier to offset it */ + random_scalar_order(&innp_ctx.p_offs); + secp256k1_gej_set_ge(&tmpj2, &gens->blinding_gen[0]); + secp256k1_ecmult(&ctx->error_callback, &tmpj, &tmpj2, &innp_ctx.p_offs, &zero); + secp256k1_gej_add_var(&pj, &pj, &tmpj, NULL); + secp256k1_ge_set_gej(&offs_ctx.p, &pj); + + /* wrong p_offs should fail */ + offs_ctx.parity = 0; + CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->error_callback, scratch, gens, n, &innp_ctx, 1, plen, 1) == 0); + + secp256k1_scalar_negate(&innp_ctx.p_offs, &innp_ctx.p_offs); + + offs_ctx.parity = 0; + CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->error_callback, scratch, gens, n, &innp_ctx, 1, plen, 1) == 1); + /* check that verification did not trash anything */ + offs_ctx.parity = 0; + CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->error_callback, scratch, gens, n, &innp_ctx, 1, plen, 1) == 1); + /* check that adding a no-op rangeproof skew function doesn't break anything */ + offs_ctx.parity = 0; + CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->error_callback, scratch, gens, n, &innp_ctx, 1, plen, 1) == 1); + + /* Offset P by some random point and then try to undo this in the verification */ + secp256k1_gej_set_ge(&tmpj2, &offs_ctx.ext_pt); + secp256k1_ecmult(&ctx->error_callback, &tmpj, &tmpj2, &offs_ctx.ext_sc, &zero); + secp256k1_gej_neg(&tmpj, &tmpj); + secp256k1_gej_add_ge_var(&tmpj, &tmpj, &offs_ctx.p, NULL); + secp256k1_ge_set_gej(&offs_ctx.p, &tmpj); + offs_ctx.parity = 0; + innp_ctx.n_extra_rangeproof_points = 2; + CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->error_callback, scratch, gens, n, &innp_ctx, 1, plen, 1) == 1); + + /* Offset each basis by some random point and try to undo this in the verification */ + secp256k1_gej_set_infinity(&tmpj2); + for (j = 0; j < n; j++) { + size_t k; + /* Offset by k-times the kth G basis and (k+n)-times the kth H basis */ + for (k = 0; k < j; k++) { + secp256k1_gej_add_ge_var(&tmpj2, &tmpj2, &gens->gens[j], NULL); + secp256k1_gej_add_ge_var(&tmpj2, &tmpj2, &gens->gens[j + gens->n/2], NULL); + } + for (k = 0; k < n; k++) { + secp256k1_gej_add_ge_var(&tmpj2, &tmpj2, &gens->gens[j + gens->n/2], NULL); + } + } + random_scalar_order(&offs_ctx.skew_sc); + secp256k1_ecmult(&ctx->error_callback, &tmpj, &tmpj2, &offs_ctx.skew_sc, &zero); + secp256k1_gej_add_ge_var(&tmpj, &tmpj, &offs_ctx.p, NULL); + secp256k1_ge_set_gej(&offs_ctx.p, &tmpj); + secp256k1_scalar_negate(&offs_ctx.skew_sc, &offs_ctx.skew_sc); + + offs_ctx.parity = 0; + CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->error_callback, scratch, gens, n, &innp_ctx, 1, plen, 1) == 1); + + /* Try to validate the same proof twice */ +{ + test_bulletproof_offset_context offs_ctxs[2]; + secp256k1_bulletproof_innerproduct_context innp_ctxs[2]; + offs_ctx.parity = 1; /* set parity to 1 so the common point will be returned first, as required by the multi-proof verifier */ + memcpy(&innp_ctxs[0], &innp_ctx, sizeof(innp_ctx)); + memcpy(&innp_ctxs[1], &innp_ctx, sizeof(innp_ctx)); + memcpy(&offs_ctxs[0], &offs_ctx, sizeof(offs_ctx)); + memcpy(&offs_ctxs[1], &offs_ctx, sizeof(offs_ctx)); + innp_ctxs[0].rangeproof_cb_data = (void *)&offs_ctxs[0]; + innp_ctxs[1].rangeproof_cb_data = (void *)&offs_ctxs[1]; + CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->error_callback, scratch, gens, n, innp_ctxs, 2, plen, 1) == 1); + CHECK(secp256k1_bulletproof_inner_product_verify_impl(&ctx->error_callback, scratch, gens, n, innp_ctxs, 2, plen, 0) == 1); +} + + free(a_arr); + free(b_arr); + secp256k1_scratch_destroy(scratch); +} + +void test_bulletproof_rangeproof(size_t nbits, size_t expected_size, const secp256k1_bulletproof_generators *gens) { + secp256k1_scalar blind; + secp256k1_scalar blind_recovered; + unsigned char proof[1024]; + unsigned char proof2[1024]; + unsigned char proof3[1024]; + const unsigned char *proof_ptr[3]; + size_t plen = sizeof(proof); + uint64_t v = 123456; + uint64_t v_recovered; + secp256k1_gej commitj; + secp256k1_ge commitp; + secp256k1_ge commitp2; + secp256k1_pedersen_commitment pcommit; + const secp256k1_ge *commitp_ptr[3]; + secp256k1_ge value_gen[3]; + unsigned char nonce[32] = "my kingdom for some randomness!!"; + + secp256k1_scratch *scratch = secp256k1_scratch_space_create(ctx, 10000000); + + if (v >> nbits > 0) { + v = 0; + } + + proof_ptr[0] = proof; + proof_ptr[1] = proof2; + proof_ptr[2] = proof3; + + secp256k1_generator_load(&value_gen[0], &secp256k1_generator_const_g); + secp256k1_generator_load(&value_gen[1], &secp256k1_generator_const_g); + secp256k1_generator_load(&value_gen[2], &secp256k1_generator_const_h); + random_scalar_order(&blind); + + secp256k1_pedersen_ecmult(&commitj, &blind, v, &value_gen[0], &gens->blinding_gen[0]); + secp256k1_ge_set_gej(&commitp, &commitj); + secp256k1_pedersen_ecmult(&commitj, &blind, v, &value_gen[2], &gens->blinding_gen[0]); + secp256k1_ge_set_gej(&commitp2, &commitj); + commitp_ptr[0] = commitp_ptr[1] = &commitp; + commitp_ptr[2] = &commitp2; + secp256k1_pedersen_commitment_save(&pcommit, &commitp); + + CHECK(secp256k1_bulletproof_rangeproof_prove_impl(&ctx->error_callback, scratch, proof, &plen, NULL, NULL, nbits, &v, NULL, &blind, &commitp, 1, &value_gen[0], gens, nonce, nonce, NULL, 0, NULL) == 1); + CHECK(plen == expected_size); + nonce[0] ^= 1; + CHECK(secp256k1_bulletproof_rangeproof_prove_impl(&ctx->error_callback, scratch, proof2, &plen, NULL, NULL, nbits, &v, NULL, &blind, &commitp, 1, &value_gen[1], gens, nonce, nonce, NULL, 0, NULL) == 1); + CHECK(plen == expected_size); + nonce[0] ^= 2; + CHECK(secp256k1_bulletproof_rangeproof_prove_impl(&ctx->error_callback, scratch, proof3, &plen, NULL, NULL, nbits, &v, NULL, &blind, &commitp2, 1, &value_gen[2], gens, nonce, nonce, NULL, 0, NULL) == 1); + CHECK(plen == expected_size); + nonce[0] ^= 3; + /* Verify once */ + CHECK(secp256k1_bulletproof_rangeproof_verify_impl(&ctx->error_callback, scratch, proof_ptr, 1, plen, nbits, NULL, commitp_ptr, 1, value_gen, gens, NULL, 0) == 1); + /* Verify twice at once to test batch validation */ + CHECK(secp256k1_bulletproof_rangeproof_verify_impl(&ctx->error_callback, scratch, proof_ptr, 2, plen, nbits, NULL, commitp_ptr, 1, value_gen, gens, NULL, 0) == 1); + /* Verify thrice at once where one has a different asset type */ + CHECK(secp256k1_bulletproof_rangeproof_verify_impl(&ctx->error_callback, scratch, proof_ptr, 3, plen, nbits, NULL, commitp_ptr, 1, value_gen, gens, NULL, 0) == 1); + + /* Rewind */ + CHECK(secp256k1_bulletproof_rangeproof_rewind_impl(&v_recovered, &blind_recovered, proof, plen, 0, &pcommit, &secp256k1_generator_const_g, nonce, NULL, 0, NULL) == 1); + CHECK(v_recovered == v); + CHECK(secp256k1_scalar_eq(&blind_recovered, &blind) == 1); + + nonce[0] ^= 111; + CHECK(secp256k1_bulletproof_rangeproof_rewind_impl(&v_recovered, &blind_recovered, proof, plen, 0, &pcommit, &secp256k1_generator_const_g, nonce, NULL, 0, NULL) == 0); + + secp256k1_scratch_destroy(scratch); +} + +void test_bulletproof_rangeproof_aggregate(size_t nbits, size_t n_commits, size_t expected_size, const secp256k1_bulletproof_generators *gens) { + unsigned char proof[1024]; + const unsigned char *proof_ptr = proof; + size_t plen = sizeof(proof); + secp256k1_scalar *blind = (secp256k1_scalar *)checked_malloc(&ctx->error_callback, n_commits * sizeof(*blind)); + uint64_t *v = (uint64_t *)checked_malloc(&ctx->error_callback, n_commits * sizeof(*v)); + secp256k1_ge *commitp = (secp256k1_ge *)checked_malloc(&ctx->error_callback, n_commits * sizeof(*commitp)); + const secp256k1_ge *constptr = commitp; + secp256k1_ge value_gen; + unsigned char commit[32] = {0}; + unsigned char nonce[32] = "mary, mary quite contrary how do"; + size_t i; + + secp256k1_scratch *scratch = secp256k1_scratch_space_create(ctx, 10000000); + + secp256k1_generator_load(&value_gen, &secp256k1_generator_const_g); + for (i = 0; i < n_commits; i++) { + secp256k1_scalar vs; + secp256k1_gej commitj; + + v[i] = 223 * i; /* dice-roll random # */ + if (v[i] >> nbits > 0) { + v[i] = 0; + } + secp256k1_scalar_set_u64(&vs, v[i]); + random_scalar_order(&blind[i]); + secp256k1_pedersen_ecmult(&commitj, &blind[i], v[i], &value_gen, &gens->blinding_gen[0]); + secp256k1_ge_set_gej(&commitp[i], &commitj); + + secp256k1_bulletproof_update_commit(commit, &commitp[i], &value_gen); + } + + CHECK(secp256k1_bulletproof_rangeproof_prove_impl(&ctx->error_callback, scratch, proof, &plen, NULL, NULL, nbits, v, NULL, blind, commitp, n_commits, &value_gen, gens, nonce, nonce, NULL, 0, NULL) == 1); + CHECK(plen == expected_size); + CHECK(secp256k1_bulletproof_rangeproof_verify_impl(&ctx->error_callback, scratch, &proof_ptr, 1, plen, nbits, NULL, &constptr, n_commits, &value_gen, gens, NULL, 0) == 1); + + secp256k1_scratch_destroy(scratch); + free(commitp); + free(v); + free(blind); +} + +void test_multi_party_bulletproof(size_t n_parties, secp256k1_scratch_space* scratch, const secp256k1_bulletproof_generators *gens) { + size_t j; + secp256k1_scalar tmp_s; + unsigned char tmp_c[32]; + unsigned char common_nonce[32]; + unsigned char nonces[10][32]; + unsigned char blinds[10][32]; + const unsigned char* blind_ptr[1]; + secp256k1_pedersen_commitment* partial_commits[10]; + uint64_t value[1] = {11223344}; + secp256k1_pedersen_commitment commit[1]; + const secp256k1_pedersen_commitment *commit_ptr[1]; + secp256k1_pubkey* t_1s[10]; + secp256k1_pubkey* t_2s[10]; + secp256k1_pubkey t_1_sum; + secp256k1_pubkey t_2_sum; + unsigned char tau_x_sum[32]; + unsigned char proof[675]; + size_t plen = 675; + + if (n_parties < 2 || n_parties > 10) { + return; + } + + random_scalar_order_test(&tmp_s); + secp256k1_scalar_get_b32(common_nonce, &tmp_s); + + for (j=0;j 1) { + n /= 2; + i++; + } + return i; + } + } +} + +SECP256K1_INLINE static size_t secp256k1_popcountl(unsigned long x) { +#ifdef HAVE_BUILTIN_POPCOUNTL + return __builtin_popcountl(x); +#else + size_t ret = 0; + size_t i; + for (i = 0; i < 64; i++) { + ret += x & 1; + x >>= 1; + } + return ret; +#endif +} + +SECP256K1_INLINE static size_t secp256k1_ctzl(unsigned long x) { +#ifdef HAVE_BUILTIN_CTZL + return __builtin_ctzl(x); +#else + size_t i; + for (i = 0; i < 64; i++) { + if (x & (1ull << i)) { + return i; + } + } + /* If no bits are set, the result is __builtin_ctzl is undefined, + * so we can return whatever we want here. */ + return 0; +#endif +} + +static void secp256k1_scalar_dot_product(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, size_t n) { + secp256k1_scalar_clear(r); + while(n--) { + secp256k1_scalar term; + secp256k1_scalar_mul(&term, &a[n], &b[n]); + secp256k1_scalar_add(r, r, &term); + } +} + +static void secp256k1_scalar_inverse_all_var(secp256k1_scalar *r, const secp256k1_scalar *a, size_t len) { + secp256k1_scalar u; + size_t i; + if (len < 1) { + return; + } + + VERIFY_CHECK((r + len <= a) || (a + len <= r)); + + r[0] = a[0]; + + i = 0; + while (++i < len) { + secp256k1_scalar_mul(&r[i], &r[i - 1], &a[i]); + } + + secp256k1_scalar_inverse_var(&u, &r[--i]); + + while (i > 0) { + size_t j = i--; + secp256k1_scalar_mul(&r[j], &r[i], &u); + secp256k1_scalar_mul(&u, &u, &a[j]); + } + + r[0] = u; +} + +SECP256K1_INLINE static void secp256k1_bulletproof_serialize_points(unsigned char *out, secp256k1_ge *pt, size_t n) { + const size_t bitveclen = (n + 7) / 8; + size_t i; + + memset(out, 0, bitveclen); + for (i = 0; i < n; i++) { + secp256k1_fe pointx; + pointx = pt[i].x; + secp256k1_fe_normalize(&pointx); + secp256k1_fe_get_b32(&out[bitveclen + i*32], &pointx); + if (!secp256k1_fe_is_quad_var(&pt[i].y)) { + out[i/8] |= (1ull << (i % 8)); + } + } +} + +SECP256K1_INLINE static int secp256k1_bulletproof_deserialize_point(secp256k1_ge *pt, const unsigned char *data, size_t i, size_t n) { + const size_t bitveclen = (n + 7) / 8; + const size_t offset = bitveclen + i*32; + secp256k1_fe fe; + + secp256k1_fe_set_b32(&fe, &data[offset]); + if (secp256k1_ge_set_xquad(pt, &fe)) { + if (data[i / 8] & (1 << (i % 8))) { + secp256k1_ge_neg(pt, pt); + } + return 1; + } else { + return 0; + } +} + +static void secp256k1_bulletproof_update_commit(unsigned char *commit, const secp256k1_ge *lpt, const secp256k1_ge *rpt) { + secp256k1_fe pointx; + secp256k1_sha256 sha256; + unsigned char lrparity; + lrparity = (!secp256k1_fe_is_quad_var(&lpt->y) << 1) + !secp256k1_fe_is_quad_var(&rpt->y); + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, commit, 32); + secp256k1_sha256_write(&sha256, &lrparity, 1); + pointx = lpt->x; + secp256k1_fe_normalize(&pointx); + secp256k1_fe_get_b32(commit, &pointx); + secp256k1_sha256_write(&sha256, commit, 32); + pointx = rpt->x; + secp256k1_fe_normalize(&pointx); + secp256k1_fe_get_b32(commit, &pointx); + secp256k1_sha256_write(&sha256, commit, 32); + secp256k1_sha256_finalize(&sha256, commit); +} + +#endif diff --git a/src/secp256k1/src/modules/commitment/Makefile.am.include b/src/secp256k1/src/modules/commitment/Makefile.am.include new file mode 100644 index 0000000000000..132d6fe661153 --- /dev/null +++ b/src/secp256k1/src/modules/commitment/Makefile.am.include @@ -0,0 +1,4 @@ +include_HEADERS += include/secp256k1_commitment.h +noinst_HEADERS += src/modules/commitment/main_impl.h +noinst_HEADERS += src/modules/commitment/pedersen_impl.h +noinst_HEADERS += src/modules/commitment/tests_impl.h diff --git a/src/secp256k1/src/modules/commitment/main_impl.h b/src/secp256k1/src/modules/commitment/main_impl.h new file mode 100644 index 0000000000000..829e4dd6baa8b --- /dev/null +++ b/src/secp256k1/src/modules/commitment/main_impl.h @@ -0,0 +1,357 @@ +/********************************************************************** + * Copyright (c) 2014-2015 Gregory Maxwell * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef SECP256K1_MODULE_COMMITMENT_MAIN +#define SECP256K1_MODULE_COMMITMENT_MAIN + +#include "group.h" + +#include "modules/commitment/pedersen_impl.h" + +/** Alternative generator for secp256k1. + * This is the sha256 of 'g' after DER encoding (without compression), + * which happens to be a point on the curve. + * sage: G2 = EllipticCurve ([F (0), F (7)]).lift_x(F(int(hashlib.sha256('0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8'.decode('hex')).hexdigest(),16))) + * sage: '%x %x' % G2.xy() + */ +static const secp256k1_generator secp256k1_generator_h_internal = {{ + 0x50, 0x92, 0x9b, 0x74, 0xc1, 0xa0, 0x49, 0x54, 0xb7, 0x8b, 0x4b, 0x60, 0x35, 0xe9, 0x7a, 0x5e, + 0x07, 0x8a, 0x5a, 0x0f, 0x28, 0xec, 0x96, 0xd5, 0x47, 0xbf, 0xee, 0x9a, 0xce, 0x80, 0x3a, 0xc0, + 0x31, 0xd3, 0xc6, 0x86, 0x39, 0x73, 0x92, 0x6e, 0x04, 0x9e, 0x63, 0x7c, 0xb1, 0xb5, 0xf4, 0x0a, + 0x36, 0xda, 0xc2, 0x8a, 0xf1, 0x76, 0x69, 0x68, 0xc3, 0x0c, 0x23, 0x13, 0xf3, 0xa3, 0x89, 0x04 +}}; + +const secp256k1_generator *secp256k1_generator_h = &secp256k1_generator_h_internal; + +static void secp256k1_pedersen_commitment_load(secp256k1_ge* ge, const secp256k1_pedersen_commitment* commit) { + secp256k1_fe fe; + secp256k1_fe_set_b32(&fe, &commit->data[1]); + secp256k1_ge_set_xquad(ge, &fe); + if (commit->data[0] & 1) { + secp256k1_ge_neg(ge, ge); + } +} + +static void secp256k1_pedersen_commitment_save(secp256k1_pedersen_commitment* commit, secp256k1_ge* ge) { + secp256k1_fe_normalize(&ge->x); + secp256k1_fe_get_b32(&commit->data[1], &ge->x); + commit->data[0] = 9 ^ secp256k1_fe_is_quad_var(&ge->y); +} + +int secp256k1_pedersen_commitment_parse(const secp256k1_context* ctx, secp256k1_pedersen_commitment* commit, const unsigned char *input) { + secp256k1_fe x; + secp256k1_ge ge; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(commit != NULL); + ARG_CHECK(input != NULL); + (void) ctx; + + if ((input[0] & 0xFE) != 8 || + !secp256k1_fe_set_b32(&x, &input[1]) || + !secp256k1_ge_set_xquad(&ge, &x)) { + return 0; + } + if (input[0] & 1) { + secp256k1_ge_neg(&ge, &ge); + } + secp256k1_pedersen_commitment_save(commit, &ge); + return 1; +} + +int secp256k1_pedersen_commitment_serialize(const secp256k1_context* ctx, unsigned char *output, const secp256k1_pedersen_commitment* commit) { + secp256k1_ge ge; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(output != NULL); + ARG_CHECK(commit != NULL); + + secp256k1_pedersen_commitment_load(&ge, commit); + + output[0] = 9 ^ secp256k1_fe_is_quad_var(&ge.y); + secp256k1_fe_normalize_var(&ge.x); + secp256k1_fe_get_b32(&output[1], &ge.x); + return 1; +} + +/* Generates a pedersen commitment: *commit = blind * G + value * G2. The blinding factor is 32 bytes.*/ +int secp256k1_pedersen_commit(const secp256k1_context* ctx, secp256k1_pedersen_commitment *commit, const unsigned char *blind, uint64_t value, const secp256k1_generator* value_gen, const secp256k1_generator* blind_gen) { + secp256k1_ge value_genp; + secp256k1_ge blind_genp; + secp256k1_gej rj; + secp256k1_ge r; + secp256k1_scalar sec; + int overflow; + int ret = 0; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(commit != NULL); + ARG_CHECK(blind != NULL); + ARG_CHECK(value_gen != NULL); + ARG_CHECK(blind_gen != NULL); + secp256k1_generator_load(&value_genp, value_gen); + secp256k1_generator_load(&blind_genp, blind_gen); + secp256k1_scalar_set_b32(&sec, blind, &overflow); + if (!overflow) { + secp256k1_pedersen_ecmult(&rj, &sec, value, &value_genp, &blind_genp); + if (!secp256k1_gej_is_infinity(&rj)) { + secp256k1_ge_set_gej(&r, &rj); + secp256k1_pedersen_commitment_save(commit, &r); + ret = 1; + } + secp256k1_gej_clear(&rj); + secp256k1_ge_clear(&r); + } + secp256k1_scalar_clear(&sec); + return ret; +} + +/* Generates a pedersen commitment: *commit = blind * G + value * G2. The blinding factor is 32 bytes.*/ +int secp256k1_pedersen_blind_commit(const secp256k1_context* ctx, secp256k1_pedersen_commitment *commit, const unsigned char *blind, const unsigned char *value, const secp256k1_generator* value_gen, const secp256k1_generator* blind_gen) { + secp256k1_ge value_genp; + secp256k1_ge blind_genp; + secp256k1_gej rj; + secp256k1_ge r; + secp256k1_scalar sec; + secp256k1_scalar sec2; + int overflow; + int overflow2; + int ret = 0; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(commit != NULL); + ARG_CHECK(blind != NULL); + ARG_CHECK(value != NULL); + ARG_CHECK(value_gen != NULL); + ARG_CHECK(blind_gen != NULL); + secp256k1_generator_load(&value_genp, value_gen); + secp256k1_generator_load(&blind_genp, blind_gen); + secp256k1_scalar_set_b32(&sec, blind, &overflow); + secp256k1_scalar_set_b32(&sec2, value, &overflow2); + if (!overflow && !overflow2) { + secp256k1_pedersen_blind_ecmult(&rj, &sec, &sec2, &value_genp, &blind_genp); + if (!secp256k1_gej_is_infinity(&rj)) { + secp256k1_ge_set_gej(&r, &rj); + secp256k1_pedersen_commitment_save(commit, &r); + ret = 1; + } + secp256k1_gej_clear(&rj); + secp256k1_ge_clear(&r); + } + secp256k1_scalar_clear(&sec); + return ret; +} + +int secp256k1_pedersen_commitment_to_pubkey(const secp256k1_context* ctx, secp256k1_pubkey* pubkey, const secp256k1_pedersen_commitment* commit) { + secp256k1_ge Q; + secp256k1_fe fe; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(pubkey != NULL); + memset(pubkey, 0, sizeof(*pubkey)); + ARG_CHECK(commit != NULL); + + secp256k1_fe_set_b32(&fe, &commit->data[1]); + secp256k1_ge_set_xquad(&Q, &fe); + if (commit->data[0] & 1) { + secp256k1_ge_neg(&Q, &Q); + } + secp256k1_pubkey_save(pubkey, &Q); + secp256k1_ge_clear(&Q); + return 1; +} + +/** Takes a list of n pointers to 32 byte blinding values, the first negs of which are treated with positive sign and the rest + * negative, then calculates an additional blinding value that adds to zero. + */ +int secp256k1_pedersen_blind_sum(const secp256k1_context* ctx, unsigned char *blind_out, const unsigned char * const *blinds, size_t n, size_t npositive) { + secp256k1_scalar acc; + secp256k1_scalar x; + size_t i; + int overflow; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(blind_out != NULL); + ARG_CHECK(blinds != NULL); + ARG_CHECK(npositive <= n); + (void) ctx; + secp256k1_scalar_set_int(&acc, 0); + for (i = 0; i < n; i++) { + secp256k1_scalar_set_b32(&x, blinds[i], &overflow); + if (overflow) { + return 0; + } + if (i >= npositive) { + secp256k1_scalar_negate(&x, &x); + } + secp256k1_scalar_add(&acc, &acc, &x); + } + secp256k1_scalar_get_b32(blind_out, &acc); + secp256k1_scalar_clear(&acc); + secp256k1_scalar_clear(&x); + return 1; +} + +/* Takes two list of 33-byte commitments and sums the first set, subtracts the second and returns the resulting commitment. */ +int secp256k1_pedersen_commit_sum(const secp256k1_context* ctx, secp256k1_pedersen_commitment *commit_out, + const secp256k1_pedersen_commitment * const* commits, size_t pcnt, const secp256k1_pedersen_commitment * const* ncommits, size_t ncnt) { + secp256k1_gej accj; + secp256k1_ge add; + size_t i; + int ret = 0; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(!pcnt || (commits != NULL)); + ARG_CHECK(!ncnt || (ncommits != NULL)); + ARG_CHECK(commit_out != NULL); + (void) ctx; + secp256k1_gej_set_infinity(&accj); + for (i = 0; i < ncnt; i++) { + secp256k1_pedersen_commitment_load(&add, ncommits[i]); + secp256k1_gej_add_ge_var(&accj, &accj, &add, NULL); + } + secp256k1_gej_neg(&accj, &accj); + for (i = 0; i < pcnt; i++) { + secp256k1_pedersen_commitment_load(&add, commits[i]); + secp256k1_gej_add_ge_var(&accj, &accj, &add, NULL); + } + if (!secp256k1_gej_is_infinity(&accj)) { + secp256k1_ge acc; + secp256k1_ge_set_gej(&acc, &accj); + secp256k1_pedersen_commitment_save(commit_out, &acc); + ret = 1; + } + return ret; +} + +/* Takes two lists of commitments and sums the first set and subtracts the second and verifies that they sum to excess. */ +int secp256k1_pedersen_verify_tally(const secp256k1_context* ctx, const secp256k1_pedersen_commitment * const* pos, size_t n_pos, const secp256k1_pedersen_commitment * const* neg, size_t n_neg) { + secp256k1_gej accj; + secp256k1_ge add; + size_t i; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(!n_pos || (pos != NULL)); + ARG_CHECK(!n_neg || (neg != NULL)); + (void) ctx; + secp256k1_gej_set_infinity(&accj); + for (i = 0; i < n_neg; i++) { + secp256k1_pedersen_commitment_load(&add, neg[i]); + secp256k1_gej_add_ge_var(&accj, &accj, &add, NULL); + } + secp256k1_gej_neg(&accj, &accj); + for (i = 0; i < n_pos; i++) { + secp256k1_pedersen_commitment_load(&add, pos[i]); + secp256k1_gej_add_ge_var(&accj, &accj, &add, NULL); + } + return secp256k1_gej_is_infinity(&accj); +} + +int secp256k1_pedersen_blind_generator_blind_sum(const secp256k1_context* ctx, const uint64_t *value, const unsigned char* const* generator_blind, unsigned char* const* blinding_factor, size_t n_total, size_t n_inputs) { + secp256k1_scalar sum; + secp256k1_scalar tmp; + size_t i; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(n_total == 0 || value != NULL); + ARG_CHECK(n_total == 0 || generator_blind != NULL); + ARG_CHECK(n_total == 0 || blinding_factor != NULL); + ARG_CHECK(n_total > n_inputs); + (void) ctx; + + if (n_total == 0) { + return 1; + } + + secp256k1_scalar_set_int(&sum, 0); + for (i = 0; i < n_total; i++) { + int overflow = 0; + secp256k1_scalar addend; + secp256k1_scalar_set_u64(&addend, value[i]); /* s = v */ + + secp256k1_scalar_set_b32(&tmp, generator_blind[i], &overflow); + if (overflow == 1) { + secp256k1_scalar_clear(&tmp); + secp256k1_scalar_clear(&addend); + secp256k1_scalar_clear(&sum); + return 0; + } + secp256k1_scalar_mul(&addend, &addend, &tmp); /* s = vr */ + + secp256k1_scalar_set_b32(&tmp, blinding_factor[i], &overflow); + if (overflow == 1) { + secp256k1_scalar_clear(&tmp); + secp256k1_scalar_clear(&addend); + secp256k1_scalar_clear(&sum); + return 0; + } + secp256k1_scalar_add(&addend, &addend, &tmp); /* s = vr + r' */ + secp256k1_scalar_cond_negate(&addend, i < n_inputs); /* s is negated if it's an input */ + secp256k1_scalar_add(&sum, &sum, &addend); /* sum += s */ + secp256k1_scalar_clear(&addend); + } + + /* Right now tmp has the last pedersen blinding factor. Subtract the sum from it. */ + secp256k1_scalar_negate(&sum, &sum); + secp256k1_scalar_add(&tmp, &tmp, &sum); + secp256k1_scalar_get_b32(blinding_factor[n_total - 1], &tmp); + + secp256k1_scalar_clear(&tmp); + secp256k1_scalar_clear(&sum); + return 1; +} + +/* Generates a blinding key that contains a hashed switch commitment. */ +int secp256k1_blind_switch(const secp256k1_context* ctx, unsigned char* blind_switch, const unsigned char* blind, uint64_t value, const secp256k1_generator* value_gen, const secp256k1_generator* blind_gen, const secp256k1_pubkey* switch_pubkey) { + secp256k1_sha256 hasher; + secp256k1_pedersen_commitment commit; + unsigned char buf[33]; + size_t buflen = sizeof(buf); + unsigned char hashed[32]; + int overflow; + secp256k1_scalar blind_switch_scalar; + secp256k1_pubkey tmp_pubkey; + secp256k1_scalar tmp_scalar; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(blind_switch != NULL); + ARG_CHECK(blind != NULL); + ARG_CHECK(value_gen != NULL); + ARG_CHECK(blind_gen != NULL); + ARG_CHECK(switch_pubkey != NULL); + + secp256k1_sha256_initialize(&hasher); + /* xG + vH */ + if (secp256k1_pedersen_commit(ctx, &commit, blind, value, value_gen, blind_gen) != 1) { + return 0; + } + if (secp256k1_pedersen_commitment_serialize(ctx, buf, &commit) != 1) { + return 0; + } + secp256k1_sha256_write(&hasher, buf, buflen); + + /* xJ */ + tmp_pubkey = *switch_pubkey; + if (secp256k1_ec_pubkey_tweak_mul(ctx, &tmp_pubkey, blind) != 1) { + return 0; + } + if (secp256k1_ec_pubkey_serialize(ctx, buf, &buflen, &tmp_pubkey, SECP256K1_EC_COMPRESSED) != 1) { + return 0; + } + secp256k1_sha256_write(&hasher, buf, buflen); + secp256k1_sha256_finalize(&hasher, hashed); + secp256k1_scalar_set_b32(&blind_switch_scalar, hashed, &overflow); /* hash(xG+vH||xJ) */ + if (overflow) { + secp256k1_scalar_clear(&blind_switch_scalar); + return 0; + } + secp256k1_scalar_set_b32(&tmp_scalar, blind, &overflow); + if (overflow) { + secp256k1_scalar_clear(&blind_switch_scalar); + secp256k1_scalar_clear(&tmp_scalar); + return 0; + } + secp256k1_scalar_add(&blind_switch_scalar, &blind_switch_scalar, &tmp_scalar); /* x + hash(xG+vH||xJ) */ + secp256k1_scalar_get_b32(blind_switch, &blind_switch_scalar); + secp256k1_scalar_clear(&blind_switch_scalar); + secp256k1_scalar_clear(&tmp_scalar); + return 1; +} + +#endif diff --git a/src/secp256k1/src/modules/commitment/pedersen_impl.h b/src/secp256k1/src/modules/commitment/pedersen_impl.h new file mode 100644 index 0000000000000..d671dae6ef02d --- /dev/null +++ b/src/secp256k1/src/modules/commitment/pedersen_impl.h @@ -0,0 +1,56 @@ +/*********************************************************************** + * Copyright (c) 2015 Gregory Maxwell * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php. * + ***********************************************************************/ + +#ifndef SECP256K1_MODULE_COMMITMENT_PEDERSEN +#define SECP256K1_MODULE_COMMITMENT_PEDERSEN + +#include + +#include "ecmult_const.h" +#include "group.h" +#include "scalar.h" + +/* sec * G + value * G2. */ +SECP256K1_INLINE static void secp256k1_pedersen_ecmult(secp256k1_gej *rj, const secp256k1_scalar *sec, uint64_t value, const secp256k1_ge* value_gen, const secp256k1_ge* blind_gen) { + secp256k1_scalar vs; + secp256k1_gej bj; + secp256k1_ge bp; + + secp256k1_scalar_set_u64(&vs, value); + secp256k1_ecmult_const(rj, value_gen, &vs, 64); + secp256k1_ecmult_const(&bj, blind_gen, sec, 256); + + /* zero blinding factor indicates that we are not trying to be zero-knowledge, + * so not being constant-time in this case is OK. */ + if (!secp256k1_gej_is_infinity(&bj)) { + secp256k1_ge_set_gej(&bp, &bj); + secp256k1_gej_add_ge(rj, rj, &bp); + } + + secp256k1_gej_clear(&bj); + secp256k1_ge_clear(&bp); + secp256k1_scalar_clear(&vs); +} + +SECP256K1_INLINE static void secp256k1_pedersen_blind_ecmult(secp256k1_gej *rj, const secp256k1_scalar *sec, const secp256k1_scalar *value, const secp256k1_ge* value_gen, const secp256k1_ge* blind_gen) { + secp256k1_gej bj; + secp256k1_ge bp; + + secp256k1_ecmult_const(rj, value_gen, value, 256); + secp256k1_ecmult_const(&bj, blind_gen, sec, 256); + + /* zero blinding factor indicates that we are not trying to be zero-knowledge, + * so not being constant-time in this case is OK. */ + if (!secp256k1_gej_is_infinity(&bj)) { + secp256k1_ge_set_gej(&bp, &bj); + secp256k1_gej_add_ge(rj, rj, &bp); + } + + secp256k1_gej_clear(&bj); + secp256k1_ge_clear(&bp); +} + +#endif diff --git a/src/secp256k1/src/modules/commitment/tests_impl.h b/src/secp256k1/src/modules/commitment/tests_impl.h new file mode 100644 index 0000000000000..06a46d5f5228d --- /dev/null +++ b/src/secp256k1/src/modules/commitment/tests_impl.h @@ -0,0 +1,342 @@ +/********************************************************************** + * Copyright (c) 2015 Gregory Maxwell * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef SECP256K1_MODULE_COMMITMENT_TESTS +#define SECP256K1_MODULE_COMMITMENT_TESTS + +#include + +#include "group.h" +#include "scalar.h" +#include "testrand.h" +#include "util.h" + +#include "include/secp256k1_commitment.h" + +static void test_commitment_api(void) { + secp256k1_pedersen_commitment commit; + secp256k1_pedersen_commitment commit2; + const secp256k1_pedersen_commitment *commit_ptr = &commit; + unsigned char blind[32]; + unsigned char blind_out[32]; + const unsigned char *blind_ptr = blind; + unsigned char *blind_out_ptr = blind_out; + uint64_t val = secp256k1_rand32(); + + secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE); + secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); + secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY); + secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + int32_t ecount = 0; + + secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount); + + secp256k1_rand256(blind); + CHECK(secp256k1_pedersen_commit(none, &commit, blind, val, &secp256k1_generator_const_h, &secp256k1_generator_const_g) == 1); + CHECK(ecount == 0); + CHECK(secp256k1_pedersen_commit(vrfy, &commit, blind, val, &secp256k1_generator_const_h, &secp256k1_generator_const_g) == 1); + CHECK(ecount == 0); + CHECK(secp256k1_pedersen_commit(sign, &commit, blind, val, &secp256k1_generator_const_h, &secp256k1_generator_const_g) == 1); + CHECK(ecount == 0); + ecount = 2; + + CHECK(secp256k1_pedersen_commit(sign, NULL, blind, val, &secp256k1_generator_const_h, &secp256k1_generator_const_g) == 0); + CHECK(ecount == 3); + CHECK(secp256k1_pedersen_commit(sign, &commit, NULL, val, &secp256k1_generator_const_h, &secp256k1_generator_const_g) == 0); + CHECK(ecount == 4); + CHECK(secp256k1_pedersen_commit(sign, &commit, blind, val, NULL, &secp256k1_generator_const_g) == 0); + CHECK(ecount == 5); + CHECK(secp256k1_pedersen_commit(sign, &commit, blind, val, &secp256k1_generator_const_h, NULL) == 0); + CHECK(ecount == 6); + + CHECK(secp256k1_pedersen_blind_sum(none, blind_out, &blind_ptr, 1, 1) != 0); + CHECK(ecount == 6); + CHECK(secp256k1_pedersen_blind_sum(none, NULL, &blind_ptr, 1, 1) == 0); + CHECK(ecount == 7); + CHECK(secp256k1_pedersen_blind_sum(none, blind_out, NULL, 1, 1) == 0); + CHECK(ecount == 8); + CHECK(secp256k1_pedersen_blind_sum(none, blind_out, &blind_ptr, 0, 1) == 0); + CHECK(ecount == 9); + CHECK(secp256k1_pedersen_blind_sum(none, blind_out, &blind_ptr, 0, 0) != 0); + CHECK(ecount == 9); + + CHECK(secp256k1_pedersen_commit(sign, &commit, blind, val, &secp256k1_generator_const_h, &secp256k1_generator_const_g) != 0); + CHECK(secp256k1_pedersen_verify_tally(none, &commit_ptr, 1, &commit_ptr, 1) != 0); + CHECK(secp256k1_pedersen_verify_tally(none, NULL, 0, &commit_ptr, 1) == 0); + CHECK(secp256k1_pedersen_verify_tally(none, &commit_ptr, 1, NULL, 0) == 0); + CHECK(secp256k1_pedersen_verify_tally(none, NULL, 0, NULL, 0) != 0); + CHECK(ecount == 9); + CHECK(secp256k1_pedersen_verify_tally(none, NULL, 1, &commit_ptr, 1) == 0); + CHECK(ecount == 10); + CHECK(secp256k1_pedersen_verify_tally(none, &commit_ptr, 1, NULL, 1) == 0); + CHECK(ecount == 11); + + CHECK(secp256k1_pedersen_blind_generator_blind_sum(none, &val, &blind_ptr, &blind_out_ptr, 1, 0) != 0); + CHECK(ecount == 11); + CHECK(secp256k1_pedersen_blind_generator_blind_sum(none, &val, &blind_ptr, &blind_out_ptr, 1, 1) == 0); + CHECK(ecount == 12); + CHECK(secp256k1_pedersen_blind_generator_blind_sum(none, &val, &blind_ptr, &blind_out_ptr, 0, 0) == 0); + CHECK(ecount == 13); + CHECK(secp256k1_pedersen_blind_generator_blind_sum(none, NULL, &blind_ptr, &blind_out_ptr, 1, 0) == 0); + CHECK(ecount == 14); + CHECK(secp256k1_pedersen_blind_generator_blind_sum(none, &val, NULL, &blind_out_ptr, 1, 0) == 0); + CHECK(ecount == 15); + CHECK(secp256k1_pedersen_blind_generator_blind_sum(none, &val, &blind_ptr, NULL, 1, 0) == 0); + CHECK(ecount == 16); + + /* Test commit with integer and blinding factor */ + /* Value: 1*/ + secp256k1_scalar tmp_s; + unsigned char out[33]; + unsigned char out2[33]; + random_scalar_order_test(&tmp_s); + secp256k1_scalar_get_b32(blind, &tmp_s); + memset(blind_out, 0, 32); + blind_out[31] = 1; + val = 1; + CHECK(secp256k1_pedersen_commit(sign, &commit, blind, val, &secp256k1_generator_const_h, &secp256k1_generator_const_g) == 1); + CHECK(secp256k1_pedersen_commitment_serialize(sign, out, &commit) == 1); + CHECK(secp256k1_pedersen_blind_commit(sign, &commit2, blind, blind_out, &secp256k1_generator_const_h, &secp256k1_generator_const_g) == 1); + CHECK(secp256k1_pedersen_commitment_serialize(sign, out2, &commit2) == 1); + CHECK(memcmp(out, out2, 33) == 0); + /* Value: 1 and 2*/ + random_scalar_order_test(&tmp_s); + secp256k1_scalar_get_b32(blind, &tmp_s); + memset(blind_out, 0, 32); + blind_out[31] = 1; + val = 2; + CHECK(secp256k1_pedersen_commit(sign, &commit, blind, val, &secp256k1_generator_const_h, &secp256k1_generator_const_g) == 1); + CHECK(secp256k1_pedersen_commitment_serialize(sign, out, &commit) == 1); + CHECK(secp256k1_pedersen_blind_commit(sign, &commit2, blind, blind_out, &secp256k1_generator_const_h, &secp256k1_generator_const_g) == 1); + CHECK(secp256k1_pedersen_commitment_serialize(sign, out2, &commit2) == 1); + CHECK(memcmp(out, out2, 33) != 0); + /* Value: random*/ + random_scalar_order_test(&tmp_s); + secp256k1_scalar_get_b32(blind, &tmp_s); + memset(blind_out, 0, 32); + blind_out[30] = secp256k1_rand32()%256; + blind_out[31] = secp256k1_rand32()%256; + val = blind_out[30]*256 + blind_out[31]; + CHECK(secp256k1_pedersen_commit(sign, &commit, blind, val, &secp256k1_generator_const_h, &secp256k1_generator_const_g) == 1); + CHECK(secp256k1_pedersen_commitment_serialize(sign, out, &commit) == 1); + CHECK(secp256k1_pedersen_blind_commit(sign, &commit2, blind, blind_out, &secp256k1_generator_const_h, &secp256k1_generator_const_g) == 1); + CHECK(secp256k1_pedersen_commitment_serialize(sign, out2, &commit2) == 1); + CHECK(memcmp(out, out2, 33) == 0); + + secp256k1_context_destroy(none); + secp256k1_context_destroy(sign); + secp256k1_context_destroy(vrfy); + secp256k1_context_destroy(both); +} + +static void test_pedersen(void) { + secp256k1_pedersen_commitment commits[19]; + const secp256k1_pedersen_commitment *cptr[19]; + unsigned char blinds[32*19]; + const unsigned char *bptr[19]; + secp256k1_scalar s; + uint64_t values[19]; + int64_t totalv; + int i; + int inputs; + int outputs; + int total; + inputs = (secp256k1_rand32() & 7) + 1; + outputs = (secp256k1_rand32() & 7) + 2; + total = inputs + outputs; + for (i = 0; i < 19; i++) { + cptr[i] = &commits[i]; + bptr[i] = &blinds[i * 32]; + } + totalv = 0; + for (i = 0; i < inputs; i++) { + values[i] = secp256k1_rands64(0, INT64_MAX - totalv); + totalv += values[i]; + } + for (i = 0; i < outputs - 1; i++) { + values[i + inputs] = secp256k1_rands64(0, totalv); + totalv -= values[i + inputs]; + } + values[total - 1] = totalv; + + for (i = 0; i < total - 1; i++) { + random_scalar_order(&s); + secp256k1_scalar_get_b32(&blinds[i * 32], &s); + } + CHECK(secp256k1_pedersen_blind_sum(ctx, &blinds[(total - 1) * 32], bptr, total - 1, inputs)); + for (i = 0; i < total; i++) { + CHECK(secp256k1_pedersen_commit(ctx, &commits[i], &blinds[i * 32], values[i], &secp256k1_generator_const_h, &secp256k1_generator_const_g)); + } + CHECK(secp256k1_pedersen_verify_tally(ctx, cptr, inputs, &cptr[inputs], outputs)); + CHECK(secp256k1_pedersen_verify_tally(ctx, &cptr[inputs], outputs, cptr, inputs)); + if (inputs > 0 && values[0] > 0) { + CHECK(!secp256k1_pedersen_verify_tally(ctx, cptr, inputs - 1, &cptr[inputs], outputs)); + } + random_scalar_order(&s); + for (i = 0; i < 4; i++) { + secp256k1_scalar_get_b32(&blinds[i * 32], &s); + } + values[0] = INT64_MAX; + values[1] = 0; + values[2] = 1; + for (i = 0; i < 3; i++) { + CHECK(secp256k1_pedersen_commit(ctx, &commits[i], &blinds[i * 32], values[i], &secp256k1_generator_const_h, &secp256k1_generator_const_g)); + } + CHECK(secp256k1_pedersen_verify_tally(ctx, &cptr[0], 1, &cptr[0], 1)); + CHECK(secp256k1_pedersen_verify_tally(ctx, &cptr[1], 1, &cptr[1], 1)); +} + +#define MAX_N_GENS 30 +void test_multiple_generators(void) { + const size_t n_inputs = (secp256k1_rand32() % (MAX_N_GENS / 2)) + 1; + const size_t n_outputs = (secp256k1_rand32() % (MAX_N_GENS / 2)) + 1; + const size_t n_generators = n_inputs + n_outputs; + unsigned char *generator_blind[MAX_N_GENS]; + unsigned char *pedersen_blind[MAX_N_GENS]; + secp256k1_generator generator[MAX_N_GENS]; + secp256k1_pedersen_commitment commit[MAX_N_GENS]; + const secp256k1_pedersen_commitment *commit_ptr[MAX_N_GENS]; + size_t i; + int64_t total_value; + uint64_t value[MAX_N_GENS]; + + secp256k1_scalar s; + + unsigned char generator_seed[32]; + random_scalar_order(&s); + secp256k1_scalar_get_b32(generator_seed, &s); + /* Create all the needed generators */ + for (i = 0; i < n_generators; i++) { + generator_blind[i] = (unsigned char*) malloc(32); + pedersen_blind[i] = (unsigned char*) malloc(32); + + random_scalar_order(&s); + secp256k1_scalar_get_b32(generator_blind[i], &s); + random_scalar_order(&s); + secp256k1_scalar_get_b32(pedersen_blind[i], &s); + + CHECK(secp256k1_generator_generate_blinded(ctx, &generator[i], generator_seed, generator_blind[i])); + + commit_ptr[i] = &commit[i]; + } + + /* Compute all the values -- can be positive or negative */ + total_value = 0; + for (i = 0; i < n_outputs; i++) { + value[n_inputs + i] = secp256k1_rands64(0, INT64_MAX - total_value); + total_value += value[n_inputs + i]; + } + for (i = 0; i < n_inputs - 1; i++) { + value[i] = secp256k1_rands64(0, total_value); + total_value -= value[i]; + } + value[i] = total_value; + + /* check total */ + total_value = 0; + for (i = 0; i < n_outputs; i++) { + total_value += value[n_inputs + i]; + } + for (i = 0; i < n_inputs; i++) { + total_value -= value[i]; + } + CHECK(total_value == 0); + + /* Correct for blinding factors and do the commitments */ + CHECK(secp256k1_pedersen_blind_generator_blind_sum(ctx, value, (const unsigned char * const *) generator_blind, pedersen_blind, n_generators, n_inputs)); + for (i = 0; i < n_generators; i++) { + CHECK(secp256k1_pedersen_commit(ctx, &commit[i], pedersen_blind[i], value[i], &generator[i], &secp256k1_generator_const_g)); + } + + /* Verify */ + CHECK(secp256k1_pedersen_verify_tally(ctx, &commit_ptr[0], n_inputs, &commit_ptr[n_inputs], n_outputs)); + + /* Cleanup */ + for (i = 0; i < n_generators; i++) { + free(generator_blind[i]); + free(pedersen_blind[i]); + } +} +#undef MAX_N_GENS + +void test_switch(void) { + secp256k1_context *local_ctx; + secp256k1_pubkey tmp_pubkey; + unsigned char blind[32]; + unsigned char blind_switch[32]; + unsigned char blind_switch_2[32]; + uint64_t val = secp256k1_rand32(); + + secp256k1_generator GENERATOR_G = {{ + 0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb, 0xac, + 0x55, 0xa0, 0x62, 0x95, 0xce, 0x87, 0x0b, 0x07, + 0x02, 0x9b, 0xfc, 0xdb, 0x2d, 0xce, 0x28, 0xd9, + 0x59, 0xf2, 0x81, 0x5b, 0x16, 0xf8, 0x17, 0x98, + 0x48, 0x3a, 0xda, 0x77, 0x26, 0xa3, 0xc4, 0x65, + 0x5d, 0xa4, 0xfb, 0xfc, 0x0e, 0x11, 0x08, 0xa8, + 0xfd, 0x17, 0xb4, 0x48, 0xa6, 0x85, 0x54, 0x19, + 0x9c, 0x47, 0xd0, 0x8f, 0xfb, 0x10, 0xd4, 0xb8 + }}; + + secp256k1_generator GENERATOR_H = {{ + 0x50, 0x92, 0x9b, 0x74, 0xc1, 0xa0, 0x49, 0x54, + 0xb7, 0x8b, 0x4b, 0x60, 0x35, 0xe9, 0x7a, 0x5e, + 0x07, 0x8a, 0x5a, 0x0f, 0x28, 0xec, 0x96, 0xd5, + 0x47, 0xbf, 0xee, 0x9a, 0xce, 0x80, 0x3a, 0xc0, + 0x31, 0xd3, 0xc6, 0x86, 0x39, 0x73, 0x92, 0x6e, + 0x04, 0x9e, 0x63, 0x7c, 0xb1, 0xb5, 0xf4, 0x0a, + 0x36, 0xda, 0xc2, 0x8a, 0xf1, 0x76, 0x69, 0x68, + 0xc3, 0x0c, 0x23, 0x13, 0xf3, 0xa3, 0x89, 0x04 + }}; + + secp256k1_pubkey GENERATOR_J_PUB = {{ + 0x5f, 0x15, 0x21, 0x36, 0x93, 0x93, 0x01, 0x2a, + 0x8d, 0x8b, 0x39, 0x7e, 0x9b, 0xf4, 0x54, 0x29, + 0x2f, 0x5a, 0x1b, 0x3d, 0x38, 0x85, 0x16, 0xc2, + 0xf3, 0x03, 0xfc, 0x95, 0x67, 0xf5, 0x60, 0xb8, + 0x3a, 0xc4, 0xc5, 0xa6, 0xdc, 0xa2, 0x01, 0x59, + 0xfc, 0x56, 0xcf, 0x74, 0x9a, 0xa6, 0xa5, 0x65, + 0x31, 0x6a, 0xa5, 0x03, 0x74, 0x42, 0x3f, 0x42, + 0x53, 0x8f, 0xaa, 0x2c, 0xd3, 0x09, 0x3f, 0xa4 + }}; + + unsigned char GENERATOR_J_COMPR[33] = { + 0x02, + 0xb8, 0x60, 0xf5, 0x67, 0x95, 0xfc, 0x03, 0xf3, + 0xc2, 0x16, 0x85, 0x38, 0x3d, 0x1b, 0x5a, 0x2f, + 0x29, 0x54, 0xf4, 0x9b, 0x7e, 0x39, 0x8b, 0x8d, + 0x2a, 0x01, 0x93, 0x93, 0x36, 0x21, 0x15, 0x5f + }; + + local_ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + + CHECK(secp256k1_ec_pubkey_parse(local_ctx, &tmp_pubkey, GENERATOR_J_COMPR, 33)); + CHECK(memcmp(GENERATOR_J_PUB.data, tmp_pubkey.data, 64) == 0); + secp256k1_rand256(blind); + CHECK(secp256k1_blind_switch(local_ctx, blind_switch, blind, val, &GENERATOR_H, &GENERATOR_G, &GENERATOR_J_PUB)); + CHECK(memcmp(blind_switch, blind, 32) != 0); + CHECK(secp256k1_blind_switch(local_ctx, blind_switch_2, blind, val, &GENERATOR_H, &GENERATOR_G, &GENERATOR_J_PUB)); + CHECK(memcmp(blind_switch_2, blind_switch, 32) == 0); +} + +void run_commitment_tests(void) { + int i; + test_commitment_api(); + for (i = 0; i < 10*count; i++) { + test_pedersen(); + } + test_multiple_generators(); + test_switch(); +} + +#endif diff --git a/src/secp256k1/src/modules/generator/Makefile.am.include b/src/secp256k1/src/modules/generator/Makefile.am.include new file mode 100644 index 0000000000000..69933e999ca3b --- /dev/null +++ b/src/secp256k1/src/modules/generator/Makefile.am.include @@ -0,0 +1,9 @@ +include_HEADERS += include/secp256k1_generator.h +noinst_HEADERS += src/modules/generator/main_impl.h +noinst_HEADERS += src/modules/generator/tests_impl.h +if USE_BENCHMARK +noinst_PROGRAMS += bench_generator +bench_generator_SOURCES = src/bench_generator.c +bench_generator_LDADD = libsecp256k1.la $(SECP_LIBS) +bench_generator_LDFLAGS = -static +endif diff --git a/src/secp256k1/src/modules/generator/main_impl.h b/src/secp256k1/src/modules/generator/main_impl.h new file mode 100644 index 0000000000000..8edaa7a72e850 --- /dev/null +++ b/src/secp256k1/src/modules/generator/main_impl.h @@ -0,0 +1,247 @@ +/********************************************************************** + * Copyright (c) 2016 Andrew Poelstra & Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef SECP256K1_MODULE_GENERATOR_MAIN +#define SECP256K1_MODULE_GENERATOR_MAIN + +#include + +#include "field.h" +#include "group.h" +#include "hash.h" +#include "scalar.h" + +/** Standard secp256k1 generator */ +const secp256k1_generator secp256k1_generator_const_g = {{ + 0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb, 0xac, 0x55, 0xa0, 0x62, 0x95, 0xce, 0x87, 0x0b, 0x07, + 0x02, 0x9b, 0xfc, 0xdb, 0x2d, 0xce, 0x28, 0xd9, 0x59, 0xf2, 0x81, 0x5b, 0x16, 0xf8, 0x17, 0x98, + 0x48, 0x3a, 0xda, 0x77, 0x26, 0xa3, 0xc4, 0x65, 0x5d, 0xa4, 0xfb, 0xfc, 0x0e, 0x11, 0x08, 0xa8, + 0xfd, 0x17, 0xb4, 0x48, 0xa6, 0x85, 0x54, 0x19, 0x9c, 0x47, 0xd0, 0x8f, 0xfb, 0x10, 0xd4, 0xb8 +}}; + +/** Alternate secp256k1 generator, used in Elements Alpha. + * Computed as the hash of the above G, DER-encoded with 0x04 (uncompressed pubkey) as its flag byte. + * import hashlib + * C = EllipticCurve ([F (0), F (7)]) + * G_bytes = '0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8'.decode('hex') + * H = C.lift_x(int(hashlib.sha256(G_bytes).hexdigest(),16)) + */ +const secp256k1_generator secp256k1_generator_const_h = {{ + 0x50, 0x92, 0x9b, 0x74, 0xc1, 0xa0, 0x49, 0x54, 0xb7, 0x8b, 0x4b, 0x60, 0x35, 0xe9, 0x7a, 0x5e, + 0x07, 0x8a, 0x5a, 0x0f, 0x28, 0xec, 0x96, 0xd5, 0x47, 0xbf, 0xee, 0x9a, 0xce, 0x80, 0x3a, 0xc0, + 0x31, 0xd3, 0xc6, 0x86, 0x39, 0x73, 0x92, 0x6e, 0x04, 0x9e, 0x63, 0x7c, 0xb1, 0xb5, 0xf4, 0x0a, + 0x36, 0xda, 0xc2, 0x8a, 0xf1, 0x76, 0x69, 0x68, 0xc3, 0x0c, 0x23, 0x13, 0xf3, 0xa3, 0x89, 0x04 +}}; + +static void secp256k1_generator_load(secp256k1_ge* ge, const secp256k1_generator* gen) { + int succeed; + succeed = secp256k1_fe_set_b32(&ge->x, &gen->data[0]); + VERIFY_CHECK(succeed != 0); + succeed = secp256k1_fe_set_b32(&ge->y, &gen->data[32]); + VERIFY_CHECK(succeed != 0); + ge->infinity = 0; + (void) succeed; +} + +static void secp256k1_generator_save(secp256k1_generator *gen, secp256k1_ge* ge) { + VERIFY_CHECK(!secp256k1_ge_is_infinity(ge)); + secp256k1_fe_normalize_var(&ge->x); + secp256k1_fe_normalize_var(&ge->y); + secp256k1_fe_get_b32(&gen->data[0], &ge->x); + secp256k1_fe_get_b32(&gen->data[32], &ge->y); +} + +int secp256k1_generator_parse(const secp256k1_context* ctx, secp256k1_generator* gen, const unsigned char *input) { + secp256k1_fe x; + secp256k1_ge ge; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(gen != NULL); + ARG_CHECK(input != NULL); + + if ((input[0] & 0xFE) != 10 || + !secp256k1_fe_set_b32(&x, &input[1]) || + !secp256k1_ge_set_xquad(&ge, &x)) { + return 0; + } + if (input[0] & 1) { + secp256k1_ge_neg(&ge, &ge); + } + secp256k1_generator_save(gen, &ge); + return 1; +} + +int secp256k1_generator_serialize(const secp256k1_context* ctx, unsigned char *output, const secp256k1_generator* gen) { + secp256k1_ge ge; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(output != NULL); + ARG_CHECK(gen != NULL); + + secp256k1_generator_load(&ge, gen); + + output[0] = 11 ^ secp256k1_fe_is_quad_var(&ge.y); + secp256k1_fe_normalize_var(&ge.x); + secp256k1_fe_get_b32(&output[1], &ge.x); + return 1; +} + +static void shallue_van_de_woestijne(secp256k1_ge* ge, const secp256k1_fe* t) { + /* Implements the algorithm from: + * Indifferentiable Hashing to Barreto-Naehrig Curves + * Pierre-Alain Fouque and Mehdi Tibouchi + * Latincrypt 2012 + */ + + /* Basic algorithm: + + c = sqrt(-3) + d = (c - 1)/2 + + w = c * t / (1 + b + t^2) [with b = 7] + x1 = d - t*w + x2 = -(x1 + 1) + x3 = 1 + 1/w^2 + + To avoid the 2 divisions, compute the above in numerator/denominator form: + wn = c * t + wd = 1 + 7 + t^2 + x1n = d*wd - t*wn + x1d = wd + x2n = -(x1n + wd) + x2d = wd + x3n = wd^2 + c^2 + t^2 + x3d = (c * t)^2 + + The joint denominator j = wd * c^2 * t^2, and + 1 / x1d = 1/j * c^2 * t^2 + 1 / x2d = x3d = 1/j * wd + */ + + static const secp256k1_fe c = SECP256K1_FE_CONST(0x0a2d2ba9, 0x3507f1df, 0x233770c2, 0xa797962c, 0xc61f6d15, 0xda14ecd4, 0x7d8d27ae, 0x1cd5f852); + static const secp256k1_fe d = SECP256K1_FE_CONST(0x851695d4, 0x9a83f8ef, 0x919bb861, 0x53cbcb16, 0x630fb68a, 0xed0a766a, 0x3ec693d6, 0x8e6afa40); + static const secp256k1_fe b = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 7); + static const secp256k1_fe b_plus_one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 8); + + secp256k1_fe wn, wd, x1n, x2n, x3n, x3d, jinv, tmp, x1, x2, x3, alphain, betain, gammain, y1, y2, y3; + int alphaquad, betaquad; + + secp256k1_fe_mul(&wn, &c, t); /* mag 1 */ + secp256k1_fe_sqr(&wd, t); /* mag 1 */ + secp256k1_fe_add(&wd, &b_plus_one); /* mag 2 */ + secp256k1_fe_mul(&tmp, t, &wn); /* mag 1 */ + secp256k1_fe_negate(&tmp, &tmp, 1); /* mag 2 */ + secp256k1_fe_mul(&x1n, &d, &wd); /* mag 1 */ + secp256k1_fe_add(&x1n, &tmp); /* mag 3 */ + x2n = x1n; /* mag 3 */ + secp256k1_fe_add(&x2n, &wd); /* mag 5 */ + secp256k1_fe_negate(&x2n, &x2n, 5); /* mag 6 */ + secp256k1_fe_mul(&x3d, &c, t); /* mag 1 */ + secp256k1_fe_sqr(&x3d, &x3d); /* mag 1 */ + secp256k1_fe_sqr(&x3n, &wd); /* mag 1 */ + secp256k1_fe_add(&x3n, &x3d); /* mag 2 */ + secp256k1_fe_mul(&jinv, &x3d, &wd); /* mag 1 */ + secp256k1_fe_inv(&jinv, &jinv); /* mag 1 */ + secp256k1_fe_mul(&x1, &x1n, &x3d); /* mag 1 */ + secp256k1_fe_mul(&x1, &x1, &jinv); /* mag 1 */ + secp256k1_fe_mul(&x2, &x2n, &x3d); /* mag 1 */ + secp256k1_fe_mul(&x2, &x2, &jinv); /* mag 1 */ + secp256k1_fe_mul(&x3, &x3n, &wd); /* mag 1 */ + secp256k1_fe_mul(&x3, &x3, &jinv); /* mag 1 */ + + secp256k1_fe_sqr(&alphain, &x1); /* mag 1 */ + secp256k1_fe_mul(&alphain, &alphain, &x1); /* mag 1 */ + secp256k1_fe_add(&alphain, &b); /* mag 2 */ + secp256k1_fe_sqr(&betain, &x2); /* mag 1 */ + secp256k1_fe_mul(&betain, &betain, &x2); /* mag 1 */ + secp256k1_fe_add(&betain, &b); /* mag 2 */ + secp256k1_fe_sqr(&gammain, &x3); /* mag 1 */ + secp256k1_fe_mul(&gammain, &gammain, &x3); /* mag 1 */ + secp256k1_fe_add(&gammain, &b); /* mag 2 */ + + alphaquad = secp256k1_fe_sqrt(&y1, &alphain); + betaquad = secp256k1_fe_sqrt(&y2, &betain); + secp256k1_fe_sqrt(&y3, &gammain); + + secp256k1_fe_cmov(&x1, &x2, (!alphaquad) & betaquad); + secp256k1_fe_cmov(&y1, &y2, (!alphaquad) & betaquad); + secp256k1_fe_cmov(&x1, &x3, (!alphaquad) & !betaquad); + secp256k1_fe_cmov(&y1, &y3, (!alphaquad) & !betaquad); + + secp256k1_ge_set_xy(ge, &x1, &y1); + + /* The linked algorithm from the paper uses the Jacobi symbol of t to + * determine the Jacobi symbol of the produced y coordinate. Since the + * rest of the algorithm only uses t^2, we can safely use another criterion + * as long as negation of t results in negation of the y coordinate. Here + * we choose to use t's oddness, as it is faster to determine. */ + secp256k1_fe_negate(&tmp, &ge->y, 1); + secp256k1_fe_cmov(&ge->y, &tmp, secp256k1_fe_is_odd(t)); +} + +static int secp256k1_generator_generate_internal(const secp256k1_context* ctx, secp256k1_generator* gen, const unsigned char *key32, const unsigned char *blind32) { + static const unsigned char prefix1[17] = "1st generation: "; + static const unsigned char prefix2[17] = "2nd generation: "; + secp256k1_fe t = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 4); + secp256k1_ge add; + secp256k1_gej accum; + int overflow; + secp256k1_sha256 sha256; + unsigned char b32[32]; + int ret = 1; + + if (blind32) { + secp256k1_scalar blind; + secp256k1_scalar_set_b32(&blind, blind32, &overflow); + ret = !overflow; + CHECK(ret); + secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &accum, &blind); + } + + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, prefix1, 16); + secp256k1_sha256_write(&sha256, key32, 32); + secp256k1_sha256_finalize(&sha256, b32); + ret &= secp256k1_fe_set_b32(&t, b32); + CHECK(ret); + shallue_van_de_woestijne(&add, &t); + if (blind32) { + secp256k1_gej_add_ge(&accum, &accum, &add); + } else { + secp256k1_gej_set_ge(&accum, &add); + } + + secp256k1_sha256_initialize(&sha256); + secp256k1_sha256_write(&sha256, prefix2, 16); + secp256k1_sha256_write(&sha256, key32, 32); + secp256k1_sha256_finalize(&sha256, b32); + ret &= secp256k1_fe_set_b32(&t, b32); + CHECK(ret); + shallue_van_de_woestijne(&add, &t); + secp256k1_gej_add_ge(&accum, &accum, &add); + + secp256k1_ge_set_gej(&add, &accum); + secp256k1_generator_save(gen, &add); + return ret; +} + +int secp256k1_generator_generate(const secp256k1_context* ctx, secp256k1_generator* gen, const unsigned char *key32) { + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(gen != NULL); + ARG_CHECK(key32 != NULL); + return secp256k1_generator_generate_internal(ctx, gen, key32, NULL); +} + +int secp256k1_generator_generate_blinded(const secp256k1_context* ctx, secp256k1_generator* gen, const unsigned char *key32, const unsigned char *blind32) { + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(gen != NULL); + ARG_CHECK(key32 != NULL); + ARG_CHECK(blind32 != NULL); + ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + return secp256k1_generator_generate_internal(ctx, gen, key32, blind32); +} + +#endif diff --git a/src/secp256k1/src/modules/generator/tests_impl.h b/src/secp256k1/src/modules/generator/tests_impl.h new file mode 100644 index 0000000000000..20acf2e7da1bb --- /dev/null +++ b/src/secp256k1/src/modules/generator/tests_impl.h @@ -0,0 +1,219 @@ +/********************************************************************** + * Copyright (c) 2016 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef SECP256K1_MODULE_GENERATOR_TESTS +#define SECP256K1_MODULE_GENERATOR_TESTS + +#include +#include + +#include "group.h" +#include "scalar.h" +#include "testrand.h" +#include "util.h" + +#include "include/secp256k1_generator.h" + +void test_generator_api(void) { + unsigned char key[32]; + unsigned char blind[32]; + unsigned char sergen[33]; + secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE); + secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); + secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY); + secp256k1_generator gen; + int32_t ecount = 0; + + secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); + secp256k1_rand256(key); + secp256k1_rand256(blind); + + CHECK(secp256k1_generator_generate(none, &gen, key) == 1); + CHECK(ecount == 0); + CHECK(secp256k1_generator_generate(none, NULL, key) == 0); + CHECK(ecount == 1); + CHECK(secp256k1_generator_generate(none, &gen, NULL) == 0); + CHECK(ecount == 2); + + CHECK(secp256k1_generator_generate_blinded(sign, &gen, key, blind) == 1); + CHECK(ecount == 2); + CHECK(secp256k1_generator_generate_blinded(vrfy, &gen, key, blind) == 0); + CHECK(ecount == 3); + CHECK(secp256k1_generator_generate_blinded(none, &gen, key, blind) == 0); + CHECK(ecount == 4); + CHECK(secp256k1_generator_generate_blinded(vrfy, NULL, key, blind) == 0); + CHECK(ecount == 5); + CHECK(secp256k1_generator_generate_blinded(vrfy, &gen, NULL, blind) == 0); + CHECK(ecount == 6); + CHECK(secp256k1_generator_generate_blinded(vrfy, &gen, key, NULL) == 0); + CHECK(ecount == 7); + + CHECK(secp256k1_generator_serialize(none, sergen, &gen) == 1); + CHECK(ecount == 7); + CHECK(secp256k1_generator_serialize(none, NULL, &gen) == 0); + CHECK(ecount == 8); + CHECK(secp256k1_generator_serialize(none, sergen, NULL) == 0); + CHECK(ecount == 9); + + CHECK(secp256k1_generator_serialize(none, sergen, &gen) == 1); + CHECK(secp256k1_generator_parse(none, &gen, sergen) == 1); + CHECK(ecount == 9); + CHECK(secp256k1_generator_parse(none, NULL, sergen) == 0); + CHECK(ecount == 10); + CHECK(secp256k1_generator_parse(none, &gen, NULL) == 0); + CHECK(ecount == 11); + + secp256k1_context_destroy(none); + secp256k1_context_destroy(sign); + secp256k1_context_destroy(vrfy); +} + +void test_shallue_van_de_woestijne(void) { + /* Matches with the output of the shallue_van_de_woestijne.sage SAGE program */ + static const secp256k1_ge_storage results[32] = { + SECP256K1_GE_STORAGE_CONST(0xedd1fd3e, 0x327ce90c, 0xc7a35426, 0x14289aee, 0x9682003e, 0x9cf7dcc9, 0xcf2ca974, 0x3be5aa0c, 0x0225f529, 0xee75acaf, 0xccfc4560, 0x26c5e46b, 0xf80237a3, 0x3924655a, 0x16f90e88, 0x085ed52a), + SECP256K1_GE_STORAGE_CONST(0xedd1fd3e, 0x327ce90c, 0xc7a35426, 0x14289aee, 0x9682003e, 0x9cf7dcc9, 0xcf2ca974, 0x3be5aa0c, 0xfdda0ad6, 0x118a5350, 0x3303ba9f, 0xd93a1b94, 0x07fdc85c, 0xc6db9aa5, 0xe906f176, 0xf7a12705), + SECP256K1_GE_STORAGE_CONST(0x2c5cdc9c, 0x338152fa, 0x85de92cb, 0x1bee9907, 0x765a922e, 0x4f037cce, 0x14ecdbf2, 0x2f78fe15, 0x56716069, 0x6818286b, 0x72f01a3e, 0x5e8caca7, 0x36249160, 0xc7ded69d, 0xd51913c3, 0x03a2fa97), + SECP256K1_GE_STORAGE_CONST(0x2c5cdc9c, 0x338152fa, 0x85de92cb, 0x1bee9907, 0x765a922e, 0x4f037cce, 0x14ecdbf2, 0x2f78fe15, 0xa98e9f96, 0x97e7d794, 0x8d0fe5c1, 0xa1735358, 0xc9db6e9f, 0x38212962, 0x2ae6ec3b, 0xfc5d0198), + SECP256K1_GE_STORAGE_CONST(0x531f7239, 0xaebc780e, 0x179fbf8d, 0x412a1b01, 0x511f0abc, 0xe0c46151, 0x8b38db84, 0xcc2467f3, 0x82387d45, 0xec7bd5cc, 0x61fcb9df, 0x41cddd7b, 0x217d8114, 0x3577dc8f, 0x23de356a, 0x7e97704e), + SECP256K1_GE_STORAGE_CONST(0x531f7239, 0xaebc780e, 0x179fbf8d, 0x412a1b01, 0x511f0abc, 0xe0c46151, 0x8b38db84, 0xcc2467f3, 0x7dc782ba, 0x13842a33, 0x9e034620, 0xbe322284, 0xde827eeb, 0xca882370, 0xdc21ca94, 0x81688be1), + SECP256K1_GE_STORAGE_CONST(0x2c5cdc9c, 0x338152fa, 0x85de92cb, 0x1bee9907, 0x765a922e, 0x4f037cce, 0x14ecdbf2, 0x2f78fe15, 0x56716069, 0x6818286b, 0x72f01a3e, 0x5e8caca7, 0x36249160, 0xc7ded69d, 0xd51913c3, 0x03a2fa97), + SECP256K1_GE_STORAGE_CONST(0x2c5cdc9c, 0x338152fa, 0x85de92cb, 0x1bee9907, 0x765a922e, 0x4f037cce, 0x14ecdbf2, 0x2f78fe15, 0xa98e9f96, 0x97e7d794, 0x8d0fe5c1, 0xa1735358, 0xc9db6e9f, 0x38212962, 0x2ae6ec3b, 0xfc5d0198), + SECP256K1_GE_STORAGE_CONST(0x5e5936b1, 0x81db0b65, 0x8e33a8c6, 0x1aa687dd, 0x31d11e15, 0x85e35664, 0x6b4c2071, 0xcde7e942, 0x88bb5332, 0xa8e05654, 0x78d4f60c, 0x0cd979ec, 0x938558f2, 0xcac11216, 0x7c387a56, 0xe3a6d5f3), + SECP256K1_GE_STORAGE_CONST(0x5e5936b1, 0x81db0b65, 0x8e33a8c6, 0x1aa687dd, 0x31d11e15, 0x85e35664, 0x6b4c2071, 0xcde7e942, 0x7744accd, 0x571fa9ab, 0x872b09f3, 0xf3268613, 0x6c7aa70d, 0x353eede9, 0x83c785a8, 0x1c59263c), + SECP256K1_GE_STORAGE_CONST(0x657d438f, 0xfac34a50, 0x463fd07c, 0x3f09f320, 0x4c98e8ed, 0x6927e330, 0xc0c7735f, 0x76d32f6d, 0x577c2b11, 0xcaca2f6f, 0xd60bcaf0, 0x3e7cebe9, 0x5da6e1f4, 0xbb557f12, 0x2a397331, 0x81df897f), + SECP256K1_GE_STORAGE_CONST(0x657d438f, 0xfac34a50, 0x463fd07c, 0x3f09f320, 0x4c98e8ed, 0x6927e330, 0xc0c7735f, 0x76d32f6d, 0xa883d4ee, 0x3535d090, 0x29f4350f, 0xc1831416, 0xa2591e0b, 0x44aa80ed, 0xd5c68ccd, 0x7e2072b0), + SECP256K1_GE_STORAGE_CONST(0xbe0bc11b, 0x2bc639cb, 0xc28f72a8, 0xd07c21cc, 0xbc06cfa7, 0x4c2ff25e, 0x630c9740, 0x23128eab, 0x6f062fc8, 0x75148197, 0xd10375c3, 0xcc3fadb6, 0x20277e9c, 0x00579c55, 0xeddd7f95, 0xe95604db), + SECP256K1_GE_STORAGE_CONST(0xbe0bc11b, 0x2bc639cb, 0xc28f72a8, 0xd07c21cc, 0xbc06cfa7, 0x4c2ff25e, 0x630c9740, 0x23128eab, 0x90f9d037, 0x8aeb7e68, 0x2efc8a3c, 0x33c05249, 0xdfd88163, 0xffa863aa, 0x12228069, 0x16a9f754), + SECP256K1_GE_STORAGE_CONST(0xedd1fd3e, 0x327ce90c, 0xc7a35426, 0x14289aee, 0x9682003e, 0x9cf7dcc9, 0xcf2ca974, 0x3be5aa0c, 0xfdda0ad6, 0x118a5350, 0x3303ba9f, 0xd93a1b94, 0x07fdc85c, 0xc6db9aa5, 0xe906f176, 0xf7a12705), + SECP256K1_GE_STORAGE_CONST(0xedd1fd3e, 0x327ce90c, 0xc7a35426, 0x14289aee, 0x9682003e, 0x9cf7dcc9, 0xcf2ca974, 0x3be5aa0c, 0x0225f529, 0xee75acaf, 0xccfc4560, 0x26c5e46b, 0xf80237a3, 0x3924655a, 0x16f90e88, 0x085ed52a), + SECP256K1_GE_STORAGE_CONST(0xaee172d4, 0xce7c5010, 0xdb20a88f, 0x469598c1, 0xd7f7926f, 0xabb85cb5, 0x339f1403, 0x87e6b494, 0x38065980, 0x4de81b35, 0x098c7190, 0xe3380f9d, 0x95b2ed6c, 0x6c869e85, 0xc772bc5a, 0x7bc3d9d5), + SECP256K1_GE_STORAGE_CONST(0xaee172d4, 0xce7c5010, 0xdb20a88f, 0x469598c1, 0xd7f7926f, 0xabb85cb5, 0x339f1403, 0x87e6b494, 0xc7f9a67f, 0xb217e4ca, 0xf6738e6f, 0x1cc7f062, 0x6a4d1293, 0x9379617a, 0x388d43a4, 0x843c225a), + SECP256K1_GE_STORAGE_CONST(0xc28f5c28, 0xf5c28f5c, 0x28f5c28f, 0x5c28f5c2, 0x8f5c28f5, 0xc28f5c28, 0xf5c28f5b, 0x6666635a, 0x0c4da840, 0x1b2cf5be, 0x4604e6ec, 0xf92b2780, 0x063a5351, 0xe294bf65, 0xbb2f8b61, 0x00902db7), + SECP256K1_GE_STORAGE_CONST(0xc28f5c28, 0xf5c28f5c, 0x28f5c28f, 0x5c28f5c2, 0x8f5c28f5, 0xc28f5c28, 0xf5c28f5b, 0x6666635a, 0xf3b257bf, 0xe4d30a41, 0xb9fb1913, 0x06d4d87f, 0xf9c5acae, 0x1d6b409a, 0x44d0749d, 0xff6fce78), + SECP256K1_GE_STORAGE_CONST(0xecf56be6, 0x9c8fde26, 0x152832c6, 0xe043b3d5, 0xaf9a723f, 0x789854a0, 0xcb1b810d, 0xe2614ece, 0x66127ae4, 0xe4c17a75, 0x60a727e6, 0xffd2ea7f, 0xaed99088, 0xbec465c6, 0xbde56791, 0x37ed5572), + SECP256K1_GE_STORAGE_CONST(0xecf56be6, 0x9c8fde26, 0x152832c6, 0xe043b3d5, 0xaf9a723f, 0x789854a0, 0xcb1b810d, 0xe2614ece, 0x99ed851b, 0x1b3e858a, 0x9f58d819, 0x002d1580, 0x51266f77, 0x413b9a39, 0x421a986d, 0xc812a6bd), + SECP256K1_GE_STORAGE_CONST(0xba72860f, 0x10fcd142, 0x23f71e3c, 0x228deb9a, 0xc46c5ff5, 0x90b884e5, 0xcc60d51e, 0x0629d16e, 0x67999f31, 0x5a74ada3, 0x526832cf, 0x76b9fec3, 0xa348cc97, 0x33c3aa67, 0x02bd2516, 0x7814f635), + SECP256K1_GE_STORAGE_CONST(0xba72860f, 0x10fcd142, 0x23f71e3c, 0x228deb9a, 0xc46c5ff5, 0x90b884e5, 0xcc60d51e, 0x0629d16e, 0x986660ce, 0xa58b525c, 0xad97cd30, 0x8946013c, 0x5cb73368, 0xcc3c5598, 0xfd42dae8, 0x87eb05fa), + SECP256K1_GE_STORAGE_CONST(0x92ef5657, 0xdba51cc7, 0xf3e1b442, 0xa6a0916b, 0x8ce03079, 0x2ef5657d, 0xba51cc7e, 0xab2beb65, 0x782c65d2, 0x3f1e0eb2, 0x9179a994, 0xe5e8ff80, 0x5a0d50d9, 0xdeeaed90, 0xcec96ca5, 0x973e2ad3), + SECP256K1_GE_STORAGE_CONST(0x92ef5657, 0xdba51cc7, 0xf3e1b442, 0xa6a0916b, 0x8ce03079, 0x2ef5657d, 0xba51cc7e, 0xab2beb65, 0x87d39a2d, 0xc0e1f14d, 0x6e86566b, 0x1a17007f, 0xa5f2af26, 0x2115126f, 0x31369359, 0x68c1d15c), + SECP256K1_GE_STORAGE_CONST(0x9468ad22, 0xf921fc78, 0x8de3f1b0, 0x586c58eb, 0x5e6f0270, 0xe950b602, 0x7ada90d9, 0xd71ae323, 0x922a0c6a, 0x9ccc31d9, 0xc3bf87fd, 0x88381739, 0x35fe393f, 0xa64dfdec, 0x29f2846d, 0x12918d86), + SECP256K1_GE_STORAGE_CONST(0x9468ad22, 0xf921fc78, 0x8de3f1b0, 0x586c58eb, 0x5e6f0270, 0xe950b602, 0x7ada90d9, 0xd71ae323, 0x6dd5f395, 0x6333ce26, 0x3c407802, 0x77c7e8c6, 0xca01c6c0, 0x59b20213, 0xd60d7b91, 0xed6e6ea9), + SECP256K1_GE_STORAGE_CONST(0x76ddc7f5, 0xe029e59e, 0x22b0e54f, 0xa811db94, 0x5a209c4f, 0x5e912ca2, 0x8b4da6a7, 0x4c1e00a2, 0x1e8f516c, 0x91c20437, 0x50f6e24e, 0x8c2cf202, 0xacf68291, 0xbf8b66eb, 0xf7335b62, 0xec2c88fe), + SECP256K1_GE_STORAGE_CONST(0x76ddc7f5, 0xe029e59e, 0x22b0e54f, 0xa811db94, 0x5a209c4f, 0x5e912ca2, 0x8b4da6a7, 0x4c1e00a2, 0xe170ae93, 0x6e3dfbc8, 0xaf091db1, 0x73d30dfd, 0x53097d6e, 0x40749914, 0x08cca49c, 0x13d37331), + SECP256K1_GE_STORAGE_CONST(0xf75763bc, 0x2907e79b, 0x125e33c3, 0x9a027f48, 0x0f8c6409, 0x2153432f, 0x967bc2b1, 0x1d1f5cf0, 0xb4a8edc6, 0x36391b39, 0x9bc219c0, 0x3d033128, 0xdbcd463e, 0xd2506394, 0x061b87a5, 0x9e510235), + SECP256K1_GE_STORAGE_CONST(0xf75763bc, 0x2907e79b, 0x125e33c3, 0x9a027f48, 0x0f8c6409, 0x2153432f, 0x967bc2b1, 0x1d1f5cf0, 0x4b571239, 0xc9c6e4c6, 0x643de63f, 0xc2fcced7, 0x2432b9c1, 0x2daf9c6b, 0xf9e47859, 0x61aef9fa), + }; + + secp256k1_ge ge; + secp256k1_fe fe; + secp256k1_ge_storage ges; + int i, s; + for (i = 1; i <= 16; i++) { + secp256k1_fe_set_int(&fe, i); + + for (s = 0; s < 2; s++) { + if (s) { + secp256k1_fe_negate(&fe, &fe, 1); + secp256k1_fe_normalize(&fe); + } + shallue_van_de_woestijne(&ge, &fe); + secp256k1_ge_to_storage(&ges, &ge); + + CHECK(memcmp(&ges, &results[i * 2 + s - 2], sizeof(secp256k1_ge_storage)) == 0); + } + } +} + +void test_generator_generate(void) { + static const secp256k1_ge_storage results[32] = { + SECP256K1_GE_STORAGE_CONST(0x806cd8ed, 0xd6c153e3, 0x4aa9b9a0, 0x8755c4be, 0x4718b1ef, 0xb26cb93f, 0xfdd99e1b, 0x21f2af8e, 0xc7062208, 0xcc649a03, 0x1bdc1a33, 0x9d01f115, 0x4bcd0dca, 0xfe0b875d, 0x62f35f73, 0x28673006), + SECP256K1_GE_STORAGE_CONST(0xd91b15ec, 0x47a811f4, 0xaa189561, 0xd13f5c4d, 0x4e81f10d, 0xc7dc551f, 0x4fea9b84, 0x610314c4, 0x9b0ada1e, 0xb38efd67, 0x8bff0b6c, 0x7d7315f7, 0xb49b8cc5, 0xa679fad4, 0xc94f9dc6, 0x9da66382), + SECP256K1_GE_STORAGE_CONST(0x11c00de6, 0xf885035e, 0x76051430, 0xa3c38b2a, 0x5f86ab8c, 0xf66dae58, 0x04ea7307, 0x348b19bf, 0xe0858ae7, 0x61dcb1ba, 0xff247e37, 0xd38fcd88, 0xf3bd7911, 0xaa4ed6e0, 0x28d792dd, 0x3ee1ac09), + SECP256K1_GE_STORAGE_CONST(0x986b99eb, 0x3130e7f0, 0xe779f674, 0xb85cb514, 0x46a676bf, 0xb1dfb603, 0x4c4bb639, 0x7c406210, 0xdf900609, 0x8b3ef1e0, 0x30e32fb0, 0xd97a4329, 0xff98aed0, 0xcd278c3f, 0xe6078467, 0xfbd12f35), + SECP256K1_GE_STORAGE_CONST(0xae528146, 0x03fdf91e, 0xc592977e, 0x12461dc7, 0xb9e038f8, 0x048dcb62, 0xea264756, 0xd459ae42, 0x80ef658d, 0x92becb84, 0xdba8e4f9, 0x560d7a72, 0xbaf4c393, 0xfbcf6007, 0x11039f1c, 0x224faaad), + SECP256K1_GE_STORAGE_CONST(0x00df3d91, 0x35975eee, 0x91fab903, 0xe3128e4a, 0xca071dde, 0x270814e5, 0xcbda69ec, 0xcad58f46, 0x11b590aa, 0x92d89969, 0x2dbd932f, 0x08013b8b, 0x45afabc6, 0x43677db2, 0x143e0c0f, 0x5865fb03), + SECP256K1_GE_STORAGE_CONST(0x1168155b, 0x987e9bc8, 0x84c5f3f4, 0x92ebf784, 0xcc8c6735, 0x39d8e5e8, 0xa967115a, 0x2949da9b, 0x0858a470, 0xf403ca97, 0xb1827f6f, 0x544c2c67, 0x08f6cb83, 0xc510c317, 0x96c981ed, 0xb9f61780), + SECP256K1_GE_STORAGE_CONST(0xe8d7c0cf, 0x2bb4194c, 0x97bf2a36, 0xbd115ba0, 0x81a9afe8, 0x7663fa3c, 0x9c3cd253, 0x79fe2571, 0x2028ad04, 0xefa00119, 0x5a25d598, 0x67e79502, 0x49de7c61, 0x4751cd9d, 0x4fb317f6, 0xf76f1110), + SECP256K1_GE_STORAGE_CONST(0x9532c491, 0xa64851dd, 0xcd0d3e5a, 0x93e17267, 0xa10aca95, 0xa23781aa, 0x5087f340, 0xc45fecc3, 0xb691ddc2, 0x3143a7b6, 0x09969302, 0x258affb8, 0x5bbf8666, 0xe1192319, 0xeb174d88, 0x308bd57a), + SECP256K1_GE_STORAGE_CONST(0x6b20b6e2, 0x1ba6cc44, 0x3f2c3a0c, 0x5283ba44, 0xbee43a0a, 0x2799a6cf, 0xbecc0f8a, 0xf8c583ac, 0xf7021e76, 0xd51291a6, 0xf9396215, 0x686f25aa, 0xbec36282, 0x5e11eeea, 0x6e51a6e6, 0xd7d7c006), + SECP256K1_GE_STORAGE_CONST(0xde27e6ff, 0x219b3ab1, 0x2b0a9e4e, 0x51fc6092, 0x96e55af6, 0xc6f717d6, 0x12cd6cce, 0x65d6c8f2, 0x48166884, 0x4dc13fd2, 0xed7a7d81, 0x66a0839a, 0x8a960863, 0xfe0001c1, 0x35d206fd, 0x63b87c09), + SECP256K1_GE_STORAGE_CONST(0x79a96fb8, 0xd88a08d3, 0x055d38d1, 0x3346b0d4, 0x47d838ca, 0xfcc8fa40, 0x6d3a7157, 0xef84e7e3, 0x6bab9c45, 0x2871b51d, 0xb0df2369, 0xe7860e01, 0x2e37ffea, 0x6689fd1a, 0x9c6fe9cf, 0xb940acea), + SECP256K1_GE_STORAGE_CONST(0x06c4d4cb, 0xd32c0ddb, 0x67e988c6, 0x2bdbe6ad, 0xa39b80cc, 0x61afb347, 0x234abe27, 0xa689618c, 0x5b355949, 0xf904fe08, 0x569b2313, 0xe8f19f8d, 0xc5b79e27, 0x70da0832, 0x5fb7a229, 0x238ca6b6), + SECP256K1_GE_STORAGE_CONST(0x7027e566, 0x3e727c28, 0x42aa14e5, 0x52c2d2ec, 0x1d8beaa9, 0x8a22ceab, 0x15ccafc3, 0xb4f06249, 0x9b3dffbc, 0xdbd5e045, 0x6931fd03, 0x8b1c6a9b, 0x4c168c6d, 0xa6553897, 0xfe11ce49, 0xac728139), + SECP256K1_GE_STORAGE_CONST(0xee3520c3, 0x9f2b954d, 0xf8e15547, 0xdaeb6cc8, 0x04c8f3b0, 0x9301f53e, 0xe0c11ea1, 0xeace539d, 0x244ff873, 0x7e060c98, 0xe843c353, 0xcd35d2e4, 0x3cd8b082, 0xcffbc9ae, 0x81eafa70, 0x332f9748), + SECP256K1_GE_STORAGE_CONST(0xdaecd756, 0xf5b706a4, 0xc14e1095, 0x3e2f70df, 0xa81276e7, 0x71806b89, 0x4d8a5502, 0xa0ef4998, 0xbac906c0, 0x948b1d48, 0xe023f439, 0xfd3770b8, 0x837f60cc, 0x40552a51, 0x433d0b79, 0x6610da27), + SECP256K1_GE_STORAGE_CONST(0x55e1ca28, 0x750fe2d0, 0x57f7449b, 0x3f49d999, 0x3b9616dd, 0x5387bc2e, 0x6e6698f8, 0xc4ea49f4, 0xe339e0e9, 0xa4c7fa99, 0xd063e062, 0x6582bce2, 0x33c6b1ee, 0x17a5b47f, 0x6d43ecf8, 0x98b40120), + SECP256K1_GE_STORAGE_CONST(0xdd82cac2, 0x9e0e0135, 0x4964d3bc, 0x27469233, 0xf13bbd5e, 0xd7aff24b, 0x4902fca8, 0x17294b12, 0x561ab1d6, 0xcd9bcb6e, 0x805585cf, 0x3df8714c, 0x1bfa6304, 0x5efbf122, 0x1a3d8fd9, 0x3827764a), + SECP256K1_GE_STORAGE_CONST(0xda5cbfb7, 0x3522e9c7, 0xcb594436, 0x83677038, 0x0eaa64a9, 0x2eca3888, 0x0fe4c9d6, 0xdeb22dbf, 0x4f46de68, 0x0447c780, 0xc54a314b, 0x5389a926, 0xbba8910b, 0x869fc6cd, 0x42ee82e8, 0x5895e42a), + SECP256K1_GE_STORAGE_CONST(0x4e09830e, 0xc8894c58, 0x4e6278de, 0x167a96b0, 0x20d60463, 0xee48f788, 0x4974d66e, 0x871e35e9, 0x21259c4d, 0x332ca932, 0x2e187df9, 0xe7afbc23, 0x9d171ebc, 0x7d9e2560, 0x503f50b1, 0x9fe45834), + SECP256K1_GE_STORAGE_CONST(0xabfff6ca, 0x41dcfd17, 0x03cae629, 0x9d127971, 0xf19ee000, 0x2db332e6, 0x5cc209a3, 0xc21b8f54, 0x65991d60, 0xee54f5cc, 0xddf7a732, 0xa76b0303, 0xb9f519a6, 0x22ea0390, 0x8af23ffa, 0x35ae6632), + SECP256K1_GE_STORAGE_CONST(0xc6c9b92c, 0x91e045a5, 0xa1913277, 0x44d6fce2, 0x11b12c7c, 0x9b3112d6, 0xc61e14a6, 0xd6b1ae12, 0x04ab0396, 0xebdc4c6a, 0xc213cc3e, 0x077a2e80, 0xb4ba7b2b, 0x33907d56, 0x2c98ccf7, 0xb82a2e9f), + SECP256K1_GE_STORAGE_CONST(0x66f6e6d9, 0xc4bb9a5f, 0x99085781, 0x83cb9362, 0x2ea437d8, 0xccd31969, 0xffadca3a, 0xff1d3935, 0x50a5b06e, 0x39e039d7, 0x1dfb2723, 0x18db74e5, 0x5af64da1, 0xdfc34586, 0x6aac3bd0, 0x5792a890), + SECP256K1_GE_STORAGE_CONST(0x58ded03c, 0x98e1a890, 0x63fc7793, 0xe3ecd896, 0x235e75c9, 0x82e7008f, 0xddbf3ca8, 0x5b7e9ecb, 0x34594776, 0x58ab6821, 0xaf43a453, 0xa946fda9, 0x13d24999, 0xccf22df8, 0xd291ef59, 0xb08975c0), + SECP256K1_GE_STORAGE_CONST(0x74557864, 0x4f2b0486, 0xd5beea7c, 0x2d258ccb, 0x78a870e1, 0x848982d8, 0xed3f91a4, 0x9db83a36, 0xd84e940e, 0x1d33c28a, 0x62398ec8, 0xc493aee7, 0x7c2ba722, 0x42dee7ae, 0x3c35c256, 0xad00cf42), + SECP256K1_GE_STORAGE_CONST(0x7fc7963a, 0x16abc8fb, 0x5d61eb61, 0x0fc50a68, 0x754470d2, 0xf43df3be, 0x52228f66, 0x522fe61b, 0x499f9e7f, 0x462c6545, 0x29687af4, 0x9f7c732d, 0x48801ce5, 0x21acd546, 0xc6fb903c, 0x7c265032), + SECP256K1_GE_STORAGE_CONST(0xb2f6257c, 0xc58df82f, 0xb9ba4f36, 0x7ededf03, 0xf8ea10f3, 0x104d7ae6, 0x233b7ac4, 0x725e11de, 0x9c7a32df, 0x4842f33d, 0xaad84f0b, 0x62e88b40, 0x46ddcbde, 0xbbeec6f8, 0x93bfde27, 0x0561dc73), + SECP256K1_GE_STORAGE_CONST(0xe2cdfd27, 0x8a8e22be, 0xabf08b79, 0x1bc6ae38, 0x41d22a9a, 0x9472e266, 0x1a7c6e83, 0xa2f74725, 0x0e26c103, 0xe0dd93b2, 0x3724f3b7, 0x8bb7366e, 0x2c245768, 0xd64f3283, 0xd8316e8a, 0x1383b977), + SECP256K1_GE_STORAGE_CONST(0x757c13e7, 0xe866017e, 0xe6af61d7, 0x161d208a, 0xc438f712, 0x242fcd23, 0x63a10e59, 0xd67e41fb, 0xb550c6a9, 0x4ddb15f3, 0xfeea4bfe, 0xd2faa19f, 0x2aa2fbd3, 0x0c6ae785, 0xe357f365, 0xb30d12e0), + SECP256K1_GE_STORAGE_CONST(0x528d525e, 0xac30095b, 0x5e5f83ca, 0x4d3dea63, 0xeb608f2d, 0x18dd25a7, 0x2529c8e5, 0x1ae5f9f1, 0xfde2860b, 0x492a4106, 0x9f356c05, 0x3ebc045e, 0x4ad08b79, 0x3e264935, 0xf25785a9, 0x8690b5ee), + SECP256K1_GE_STORAGE_CONST(0x150df593, 0x5b6956a0, 0x0cfed843, 0xb9d6ffce, 0x4f790022, 0xea18730f, 0xc495111d, 0x91568e55, 0x6700a2ca, 0x9ff4ed32, 0xc1697312, 0x4eb51ce3, 0x5656344b, 0x65a1e3d5, 0xd6c1f7ce, 0x29233f82), + SECP256K1_GE_STORAGE_CONST(0x38e02eaf, 0x2c8774fd, 0x58b8b373, 0x732457f1, 0x16dbe53b, 0xea5683d9, 0xada20dd7, 0x14ce20a6, 0x6ac5362e, 0xbb425416, 0x8250f43f, 0xa4ee2b63, 0x0406324f, 0x1c876d60, 0xebe5be2c, 0x6eb1515b), + }; + secp256k1_generator gen; + secp256k1_ge ge; + secp256k1_ge_storage ges; + int i; + unsigned char v[32]; + static const unsigned char s[32] = {0}; + secp256k1_scalar sc; + secp256k1_scalar_set_b32(&sc, s, NULL); + for (i = 1; i <= 32; i++) { + memset(v, 0, 31); + v[31] = i; + CHECK(secp256k1_generator_generate_blinded(ctx, &gen, v, s)); + secp256k1_generator_load(&ge, &gen); + secp256k1_ge_to_storage(&ges, &ge); + CHECK(memcmp(&ges, &results[i - 1], sizeof(secp256k1_ge_storage)) == 0); + CHECK(secp256k1_generator_generate(ctx, &gen, v)); + secp256k1_generator_load(&ge, &gen); + secp256k1_ge_to_storage(&ges, &ge); + CHECK(memcmp(&ges, &results[i - 1], sizeof(secp256k1_ge_storage)) == 0); + } +} + +void test_generator_fixed_vector(void) { + const unsigned char two_g[33] = { + 0x0b, + 0xc6, 0x04, 0x7f, 0x94, 0x41, 0xed, 0x7d, 0x6d, 0x30, 0x45, 0x40, 0x6e, 0x95, 0xc0, 0x7c, 0xd8, + 0x5c, 0x77, 0x8e, 0x4b, 0x8c, 0xef, 0x3c, 0xa7, 0xab, 0xac, 0x09, 0xb9, 0x5c, 0x70, 0x9e, 0xe5 + }; + unsigned char result[33]; + secp256k1_generator parse; + + CHECK(secp256k1_generator_parse(ctx, &parse, two_g)); + CHECK(secp256k1_generator_serialize(ctx, result, &parse)); + CHECK(memcmp(two_g, result, 33) == 0); + + result[0] = 0x0a; + CHECK(secp256k1_generator_parse(ctx, &parse, result)); + result[0] = 0x08; + CHECK(!secp256k1_generator_parse(ctx, &parse, result)); +} + +void run_generator_tests(void) { + test_shallue_van_de_woestijne(); + test_generator_fixed_vector(); + test_generator_api(); + test_generator_generate(); +} + +#endif diff --git a/src/secp256k1/src/modules/schnorrsig/main_impl.h b/src/secp256k1/src/modules/schnorrsig/main_impl.h index cd651591c4e16..564f679484a92 100644 --- a/src/secp256k1/src/modules/schnorrsig/main_impl.h +++ b/src/secp256k1/src/modules/schnorrsig/main_impl.h @@ -264,4 +264,175 @@ int secp256k1_schnorrsig_verify(const secp256k1_context* ctx, const unsigned cha secp256k1_fe_equal_var(&rx, &r.x); } +/* Data that is used by the batch verification ecmult callback */ +typedef struct { + const secp256k1_context *ctx; + /* Seed for the random number generator */ + unsigned char chacha_seed[32]; + /* Caches randomizers generated by the PRNG which returns two randomizers per call. Caching + * avoids having to call the PRNG twice as often. The very first randomizer will be set to 1 and + * the PRNG is called at every odd indexed schnorrsig to fill the cache. */ + secp256k1_scalar randomizer_cache[2]; + /* Signature, message, public key tuples to verify */ + const uint8_t *const *sig; + const unsigned char *const *msg32; + const secp256k1_pubkey *const *pk; + size_t n_sigs; +} secp256k1_schnorrsig_verify_ecmult_context; + +/* Callback function which is called by ecmult_multi in order to convert the ecmult_context + * consisting of signature, message and public key tuples into scalars and points. */ +static int secp256k1_schnorrsig_verify_batch_ecmult_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) { + secp256k1_schnorrsig_verify_ecmult_context *ecmult_context = (secp256k1_schnorrsig_verify_ecmult_context *) data; + + if (idx % 4 == 2) { + /* Every idx corresponds to a (scalar,point)-tuple. So this callback is called with 4 + * consecutive tuples before we need to call the RNG for new randomizers: + * (-randomizer_cache[0], R1) + * (-randomizer_cache[0]*e1, P1) + * (-randomizer_cache[1], R2) + * (-randomizer_cache[1]*e2, P2) */ + secp256k1_scalar_chacha20(&ecmult_context->randomizer_cache[0], &ecmult_context->randomizer_cache[1], ecmult_context->chacha_seed, idx / 4); + } + + /* R */ + if (idx % 2 == 0) { + secp256k1_fe rx; + *sc = ecmult_context->randomizer_cache[(idx / 2) % 2]; + if (!secp256k1_fe_set_b32(&rx, &ecmult_context->sig[idx / 2])) { + return 0; + } + if (!secp256k1_ge_set_xquad(pt, &rx)) { + return 0; + } + /* eP */ + } else { + unsigned char buf[33]; + size_t buflen = sizeof(buf); + secp256k1_sha256 sha; + secp256k1_sha256_initialize(&sha); + secp256k1_sha256_write(&sha, ecmult_context->sig[idx / 2], 32); + secp256k1_ec_pubkey_serialize(ecmult_context->ctx, buf, &buflen, ecmult_context->pk[idx / 2], SECP256K1_EC_COMPRESSED); + secp256k1_sha256_write(&sha, buf, buflen); + secp256k1_sha256_write(&sha, ecmult_context->msg32[idx / 2], 32); + secp256k1_sha256_finalize(&sha, buf); + + secp256k1_scalar_set_b32(sc, buf, NULL); + secp256k1_scalar_mul(sc, sc, &ecmult_context->randomizer_cache[(idx / 2) % 2]); + + if (!secp256k1_pubkey_load(ecmult_context->ctx, pt, ecmult_context->pk[idx / 2])) { + return 0; + } + } + return 1; +} + +/** Helper function for batch verification. Hashes signature verification data into the + * randomization seed and initializes ecmult_context. + * + * Returns 1 if the randomizer was successfully initialized. + * + * Args: ctx: a secp256k1 context object + * Out: ecmult_context: context for batch_ecmult_callback + * In/Out sha: an initialized sha256 object which hashes the schnorrsig input in order to get a + * seed for the randomizer PRNG + * In: sig: array of signatures, or NULL if there are no signatures + * msg32: array of messages, or NULL if there are no signatures + * pk: array of public keys, or NULL if there are no signatures + * n_sigs: number of signatures in above arrays (must be 0 if they are NULL) + */ +int secp256k1_schnorrsig_verify_batch_init_randomizer(const secp256k1_context *ctx, secp256k1_schnorrsig_verify_ecmult_context *ecmult_context, secp256k1_sha256 *sha, const uint8_t *const *sig, const unsigned char *const *msg32, const secp256k1_pubkey *const *pk, size_t n_sigs) { + size_t i; + + if (n_sigs > 0) { + ARG_CHECK(sig != NULL); + ARG_CHECK(msg32 != NULL); + ARG_CHECK(pk != NULL); + } + + for (i = 0; i < n_sigs; i++) { + unsigned char buf[33]; + size_t buflen = sizeof(buf); + secp256k1_sha256_write(sha, sig[i], 64); + secp256k1_sha256_write(sha, msg32[i], 32); + secp256k1_ec_pubkey_serialize(ctx, buf, &buflen, pk[i], SECP256K1_EC_COMPRESSED); + secp256k1_sha256_write(sha, buf, 32); + } + ecmult_context->ctx = ctx; + ecmult_context->sig = sig; + ecmult_context->msg32 = msg32; + ecmult_context->pk = pk; + ecmult_context->n_sigs = n_sigs; + + return 1; +} + +/** Helper function for batch verification. Sums the s part of all signatures multiplied by their + * randomizer. + * + * Returns 1 if s is successfully summed. + * + * In/Out: s: the s part of the input sigs is added to this s argument + * In: chacha_seed: PRNG seed for computing randomizers + * sig: array of signatures, or NULL if there are no signatures + * n_sigs: number of signatures in above array (must be 0 if they are NULL) + */ +int secp256k1_schnorrsig_verify_batch_sum_s(secp256k1_scalar *s, unsigned char *chacha_seed, const uint8_t *const *sig, size_t n_sigs) { + secp256k1_scalar randomizer_cache[2]; + size_t i; + + secp256k1_scalar_set_int(&randomizer_cache[0], 1); + for (i = 0; i < n_sigs; i++) { + int overflow; + secp256k1_scalar term; + if (i % 2 == 1) { + secp256k1_scalar_chacha20(&randomizer_cache[0], &randomizer_cache[1], chacha_seed, i / 2); + } + + secp256k1_scalar_set_b32(&term, &sig[i][32], &overflow); + if (overflow) { + return 0; + } + secp256k1_scalar_mul(&term, &term, &randomizer_cache[i % 2]); + secp256k1_scalar_add(s, s, &term); + } + return 1; +} + +/* schnorrsig batch verification. + * Seeds a random number generator with the inputs and derives a random number ai for every + * signature i. Fails if y-coordinate of any R is not a quadratic residue or if + * 0 != -(s1 + a2*s2 + ... + au*su)G + R1 + a2*R2 + ... + au*Ru + e1*P1 + (a2*e2)P2 + ... + (au*eu)Pu. */ +int secp256k1_schnorrsig_verify_batch(const secp256k1_context *ctx, secp256k1_scratch *scratch, const uint8_t *const *sig, const unsigned char *const *msg32, const secp256k1_pubkey *const *pk, size_t n_sigs) { + secp256k1_schnorrsig_verify_ecmult_context ecmult_context; + secp256k1_sha256 sha; + secp256k1_scalar s; + secp256k1_gej rj; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(scratch != NULL); + /* Check that n_sigs is less than half of the maximum size_t value. This is necessary because + * the number of points given to ecmult_multi is 2*n_sigs. */ + ARG_CHECK(n_sigs <= SIZE_MAX / 2); + /* Check that n_sigs is less than 2^31 to ensure the same behavior of this function on 32-bit + * and 64-bit platforms. */ + ARG_CHECK(n_sigs < (size_t)(1 << 31)); + + secp256k1_sha256_initialize(&sha); + if (!secp256k1_schnorrsig_verify_batch_init_randomizer(ctx, &ecmult_context, &sha, sig, msg32, pk, n_sigs)) { + return 0; + } + secp256k1_sha256_finalize(&sha, ecmult_context.chacha_seed); + secp256k1_scalar_set_int(&ecmult_context.randomizer_cache[0], 1); + + secp256k1_scalar_clear(&s); + if (!secp256k1_schnorrsig_verify_batch_sum_s(&s, ecmult_context.chacha_seed, sig, n_sigs)) { + return 0; + } + secp256k1_scalar_negate(&s, &s); + + return secp256k1_ecmult_multi_var(&ctx->error_callback, scratch, &rj, &s, secp256k1_schnorrsig_verify_batch_ecmult_callback, (void *) &ecmult_context, 2 * n_sigs) + && secp256k1_gej_is_infinity(&rj); +} + #endif diff --git a/src/secp256k1/src/scalar.h b/src/secp256k1/src/scalar.h index aaaa3d88277ad..65565213efd14 100644 --- a/src/secp256k1/src/scalar.h +++ b/src/secp256k1/src/scalar.h @@ -46,6 +46,9 @@ static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned c /** Set a scalar to an unsigned integer. */ static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v); +/** Set a scalar to an unsigned 64-bit integer */ +static void secp256k1_scalar_set_u64(secp256k1_scalar *r, uint64_t v); + /** Convert a scalar to a byte array. */ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a); @@ -62,6 +65,9 @@ static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, * the low bits that were shifted off */ static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n); +/** Compute the square of a scalar (modulo the group order). */ +static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a); + /** Compute the inverse of a scalar (modulo the group order). */ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *a); @@ -99,6 +105,9 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar /** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */ static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift); +/** Generate two scalars from a 32-byte seed and an integer using the chacha20 stream cipher */ +static void secp256k1_scalar_chacha20(secp256k1_scalar* r1, secp256k1_scalar* r2, const unsigned char* seed, uint64_t idx); + /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/ static void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag); diff --git a/src/secp256k1/src/scalar_4x64_impl.h b/src/secp256k1/src/scalar_4x64_impl.h index a1def26fca7af..4d09dfa1ace25 100644 --- a/src/secp256k1/src/scalar_4x64_impl.h +++ b/src/secp256k1/src/scalar_4x64_impl.h @@ -40,6 +40,13 @@ SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsig r->d[3] = 0; } +SECP256K1_INLINE static void secp256k1_scalar_set_u64(secp256k1_scalar *r, uint64_t v) { + r->d[0] = v; + r->d[1] = 0; + r->d[2] = 0; + r->d[3] = 0; +} + SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6); return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1); @@ -723,6 +730,143 @@ static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, c #endif } +static void secp256k1_scalar_sqr_512(uint64_t l[8], const secp256k1_scalar *a) { +#ifdef USE_ASM_X86_64 + __asm__ __volatile__( + /* Preload */ + "movq 0(%%rdi), %%r11\n" + "movq 8(%%rdi), %%r12\n" + "movq 16(%%rdi), %%r13\n" + "movq 24(%%rdi), %%r14\n" + /* (rax,rdx) = a0 * a0 */ + "movq %%r11, %%rax\n" + "mulq %%r11\n" + /* Extract l0 */ + "movq %%rax, 0(%%rsi)\n" + /* (r8,r9,r10) = (rdx,0) */ + "movq %%rdx, %%r8\n" + "xorq %%r9, %%r9\n" + "xorq %%r10, %%r10\n" + /* (r8,r9,r10) += 2 * a0 * a1 */ + "movq %%r11, %%rax\n" + "mulq %%r12\n" + "addq %%rax, %%r8\n" + "adcq %%rdx, %%r9\n" + "adcq $0, %%r10\n" + "addq %%rax, %%r8\n" + "adcq %%rdx, %%r9\n" + "adcq $0, %%r10\n" + /* Extract l1 */ + "movq %%r8, 8(%%rsi)\n" + "xorq %%r8, %%r8\n" + /* (r9,r10,r8) += 2 * a0 * a2 */ + "movq %%r11, %%rax\n" + "mulq %%r13\n" + "addq %%rax, %%r9\n" + "adcq %%rdx, %%r10\n" + "adcq $0, %%r8\n" + "addq %%rax, %%r9\n" + "adcq %%rdx, %%r10\n" + "adcq $0, %%r8\n" + /* (r9,r10,r8) += a1 * a1 */ + "movq %%r12, %%rax\n" + "mulq %%r12\n" + "addq %%rax, %%r9\n" + "adcq %%rdx, %%r10\n" + "adcq $0, %%r8\n" + /* Extract l2 */ + "movq %%r9, 16(%%rsi)\n" + "xorq %%r9, %%r9\n" + /* (r10,r8,r9) += 2 * a0 * a3 */ + "movq %%r11, %%rax\n" + "mulq %%r14\n" + "addq %%rax, %%r10\n" + "adcq %%rdx, %%r8\n" + "adcq $0, %%r9\n" + "addq %%rax, %%r10\n" + "adcq %%rdx, %%r8\n" + "adcq $0, %%r9\n" + /* (r10,r8,r9) += 2 * a1 * a2 */ + "movq %%r12, %%rax\n" + "mulq %%r13\n" + "addq %%rax, %%r10\n" + "adcq %%rdx, %%r8\n" + "adcq $0, %%r9\n" + "addq %%rax, %%r10\n" + "adcq %%rdx, %%r8\n" + "adcq $0, %%r9\n" + /* Extract l3 */ + "movq %%r10, 24(%%rsi)\n" + "xorq %%r10, %%r10\n" + /* (r8,r9,r10) += 2 * a1 * a3 */ + "movq %%r12, %%rax\n" + "mulq %%r14\n" + "addq %%rax, %%r8\n" + "adcq %%rdx, %%r9\n" + "adcq $0, %%r10\n" + "addq %%rax, %%r8\n" + "adcq %%rdx, %%r9\n" + "adcq $0, %%r10\n" + /* (r8,r9,r10) += a2 * a2 */ + "movq %%r13, %%rax\n" + "mulq %%r13\n" + "addq %%rax, %%r8\n" + "adcq %%rdx, %%r9\n" + "adcq $0, %%r10\n" + /* Extract l4 */ + "movq %%r8, 32(%%rsi)\n" + "xorq %%r8, %%r8\n" + /* (r9,r10,r8) += 2 * a2 * a3 */ + "movq %%r13, %%rax\n" + "mulq %%r14\n" + "addq %%rax, %%r9\n" + "adcq %%rdx, %%r10\n" + "adcq $0, %%r8\n" + "addq %%rax, %%r9\n" + "adcq %%rdx, %%r10\n" + "adcq $0, %%r8\n" + /* Extract l5 */ + "movq %%r9, 40(%%rsi)\n" + /* (r10,r8) += a3 * a3 */ + "movq %%r14, %%rax\n" + "mulq %%r14\n" + "addq %%rax, %%r10\n" + "adcq %%rdx, %%r8\n" + /* Extract l6 */ + "movq %%r10, 48(%%rsi)\n" + /* Extract l7 */ + "movq %%r8, 56(%%rsi)\n" + : + : "S"(l), "D"(a->d) + : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc", "memory"); +#else + /* 160 bit accumulator. */ + uint64_t c0 = 0, c1 = 0; + uint32_t c2 = 0; + + /* l[0..7] = a[0..3] * b[0..3]. */ + muladd_fast(a->d[0], a->d[0]); + extract_fast(l[0]); + muladd2(a->d[0], a->d[1]); + extract(l[1]); + muladd2(a->d[0], a->d[2]); + muladd(a->d[1], a->d[1]); + extract(l[2]); + muladd2(a->d[0], a->d[3]); + muladd2(a->d[1], a->d[2]); + extract(l[3]); + muladd2(a->d[1], a->d[3]); + muladd(a->d[2], a->d[2]); + extract(l[4]); + muladd2(a->d[2], a->d[3]); + extract(l[5]); + muladd_fast(a->d[3], a->d[3]); + extract_fast(l[6]); + VERIFY_CHECK(c1 == 0); + l[7] = c0; +#endif +} + #undef sumadd #undef sumadd_fast #undef muladd @@ -748,6 +892,12 @@ static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { return ret; } +static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) { + uint64_t l[8]; + secp256k1_scalar_sqr_512(l, a); + secp256k1_scalar_reduce_512(r, l); +} + static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { r1->d[0] = k->d[0]; r1->d[1] = k->d[1]; @@ -780,6 +930,91 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1); } +#define ROTL32(x,n) ((x) << (n) | (x) >> (32-(n))) +#define QUARTERROUND(a,b,c,d) \ + a += b; d = ROTL32(d ^ a, 16); \ + c += d; b = ROTL32(b ^ c, 12); \ + a += b; d = ROTL32(d ^ a, 8); \ + c += d; b = ROTL32(b ^ c, 7); + +#ifdef WORDS_BIGENDIAN +#define LE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) +#define BE32(p) (p) +#else +#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) +#define LE32(p) (p) +#endif + +static void secp256k1_scalar_chacha20(secp256k1_scalar *r1, secp256k1_scalar *r2, const unsigned char *seed, uint64_t idx) { + size_t n; + size_t over_count = 0; + uint32_t seed32[8]; + uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15; + int over1, over2; + + memcpy((void *) seed32, (const void *) seed, 32); + do { + x0 = 0x61707865; + x1 = 0x3320646e; + x2 = 0x79622d32; + x3 = 0x6b206574; + x4 = LE32(seed32[0]); + x5 = LE32(seed32[1]); + x6 = LE32(seed32[2]); + x7 = LE32(seed32[3]); + x8 = LE32(seed32[4]); + x9 = LE32(seed32[5]); + x10 = LE32(seed32[6]); + x11 = LE32(seed32[7]); + x12 = idx; + x13 = idx >> 32; + x14 = 0; + x15 = over_count; + + n = 10; + while (n--) { + QUARTERROUND(x0, x4, x8,x12) + QUARTERROUND(x1, x5, x9,x13) + QUARTERROUND(x2, x6,x10,x14) + QUARTERROUND(x3, x7,x11,x15) + QUARTERROUND(x0, x5,x10,x15) + QUARTERROUND(x1, x6,x11,x12) + QUARTERROUND(x2, x7, x8,x13) + QUARTERROUND(x3, x4, x9,x14) + } + + x0 += 0x61707865; + x1 += 0x3320646e; + x2 += 0x79622d32; + x3 += 0x6b206574; + x4 += LE32(seed32[0]); + x5 += LE32(seed32[1]); + x6 += LE32(seed32[2]); + x7 += LE32(seed32[3]); + x8 += LE32(seed32[4]); + x9 += LE32(seed32[5]); + x10 += LE32(seed32[6]); + x11 += LE32(seed32[7]); + x12 += idx; + x13 += idx >> 32; + x14 += 0; + x15 += over_count; + + r1->d[3] = BE32((uint64_t) x0) << 32 | BE32(x1); + r1->d[2] = BE32((uint64_t) x2) << 32 | BE32(x3); + r1->d[1] = BE32((uint64_t) x4) << 32 | BE32(x5); + r1->d[0] = BE32((uint64_t) x6) << 32 | BE32(x7); + r2->d[3] = BE32((uint64_t) x8) << 32 | BE32(x9); + r2->d[2] = BE32((uint64_t) x10) << 32 | BE32(x11); + r2->d[1] = BE32((uint64_t) x12) << 32 | BE32(x13); + r2->d[0] = BE32((uint64_t) x14) << 32 | BE32(x15); + + over1 = secp256k1_scalar_check_overflow(r1); + over2 = secp256k1_scalar_check_overflow(r2); + over_count++; + } while (over1 | over2); +} + static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { uint64_t mask0, mask1; VG_CHECK_VERIFY(r->d, sizeof(r->d)); diff --git a/src/secp256k1/src/scalar_8x32_impl.h b/src/secp256k1/src/scalar_8x32_impl.h index 62c7ae7156d37..da6bf84f67502 100644 --- a/src/secp256k1/src/scalar_8x32_impl.h +++ b/src/secp256k1/src/scalar_8x32_impl.h @@ -58,6 +58,17 @@ SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsig r->d[7] = 0; } +SECP256K1_INLINE static void secp256k1_scalar_set_u64(secp256k1_scalar *r, uint64_t v) { + r->d[0] = v; + r->d[1] = v >> 32; + r->d[2] = 0; + r->d[3] = 0; + r->d[4] = 0; + r->d[5] = 0; + r->d[6] = 0; + r->d[7] = 0; +} + SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5); return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1); @@ -556,6 +567,66 @@ static void secp256k1_scalar_mul_512(uint32_t *l, const secp256k1_scalar *a, con l[15] = c0; } +static void secp256k1_scalar_sqr_512(uint32_t *l, const secp256k1_scalar *a) { + /* 96 bit accumulator. */ + uint32_t c0 = 0, c1 = 0, c2 = 0; + + /* l[0..15] = a[0..7]^2. */ + muladd_fast(a->d[0], a->d[0]); + extract_fast(l[0]); + muladd2(a->d[0], a->d[1]); + extract(l[1]); + muladd2(a->d[0], a->d[2]); + muladd(a->d[1], a->d[1]); + extract(l[2]); + muladd2(a->d[0], a->d[3]); + muladd2(a->d[1], a->d[2]); + extract(l[3]); + muladd2(a->d[0], a->d[4]); + muladd2(a->d[1], a->d[3]); + muladd(a->d[2], a->d[2]); + extract(l[4]); + muladd2(a->d[0], a->d[5]); + muladd2(a->d[1], a->d[4]); + muladd2(a->d[2], a->d[3]); + extract(l[5]); + muladd2(a->d[0], a->d[6]); + muladd2(a->d[1], a->d[5]); + muladd2(a->d[2], a->d[4]); + muladd(a->d[3], a->d[3]); + extract(l[6]); + muladd2(a->d[0], a->d[7]); + muladd2(a->d[1], a->d[6]); + muladd2(a->d[2], a->d[5]); + muladd2(a->d[3], a->d[4]); + extract(l[7]); + muladd2(a->d[1], a->d[7]); + muladd2(a->d[2], a->d[6]); + muladd2(a->d[3], a->d[5]); + muladd(a->d[4], a->d[4]); + extract(l[8]); + muladd2(a->d[2], a->d[7]); + muladd2(a->d[3], a->d[6]); + muladd2(a->d[4], a->d[5]); + extract(l[9]); + muladd2(a->d[3], a->d[7]); + muladd2(a->d[4], a->d[6]); + muladd(a->d[5], a->d[5]); + extract(l[10]); + muladd2(a->d[4], a->d[7]); + muladd2(a->d[5], a->d[6]); + extract(l[11]); + muladd2(a->d[5], a->d[7]); + muladd(a->d[6], a->d[6]); + extract(l[12]); + muladd2(a->d[6], a->d[7]); + extract(l[13]); + muladd_fast(a->d[7], a->d[7]); + extract_fast(l[14]); + VERIFY_CHECK(c1 == 0); + l[15] = c0; +} + #undef sumadd #undef sumadd_fast #undef muladd @@ -585,6 +656,12 @@ static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { return ret; } +static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) { + uint32_t l[16]; + secp256k1_scalar_sqr_512(l, a); + secp256k1_scalar_reduce_512(r, l); +} + static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { r1->d[0] = k->d[0]; r1->d[1] = k->d[1]; @@ -629,6 +706,99 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1); } +#define ROTL32(x,n) ((x) << (n) | (x) >> (32-(n))) +#define QUARTERROUND(a,b,c,d) \ + a += b; d = ROTL32(d ^ a, 16); \ + c += d; b = ROTL32(b ^ c, 12); \ + a += b; d = ROTL32(d ^ a, 8); \ + c += d; b = ROTL32(b ^ c, 7); + +#ifdef WORDS_BIGENDIAN +#define LE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) +#define BE32(p) (p) +#else +#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) +#define LE32(p) (p) +#endif + +static void secp256k1_scalar_chacha20(secp256k1_scalar *r1, secp256k1_scalar *r2, const unsigned char *seed, uint64_t idx) { + size_t n; + size_t over_count = 0; + uint32_t seed32[8]; + uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15; + int over1, over2; + + memcpy((void *) seed32, (const void *) seed, 32); + do { + x0 = 0x61707865; + x1 = 0x3320646e; + x2 = 0x79622d32; + x3 = 0x6b206574; + x4 = LE32(seed32[0]); + x5 = LE32(seed32[1]); + x6 = LE32(seed32[2]); + x7 = LE32(seed32[3]); + x8 = LE32(seed32[4]); + x9 = LE32(seed32[5]); + x10 = LE32(seed32[6]); + x11 = LE32(seed32[7]); + x12 = idx; + x13 = idx >> 32; + x14 = 0; + x15 = over_count; + + n = 10; + while (n--) { + QUARTERROUND(x0, x4, x8,x12) + QUARTERROUND(x1, x5, x9,x13) + QUARTERROUND(x2, x6,x10,x14) + QUARTERROUND(x3, x7,x11,x15) + QUARTERROUND(x0, x5,x10,x15) + QUARTERROUND(x1, x6,x11,x12) + QUARTERROUND(x2, x7, x8,x13) + QUARTERROUND(x3, x4, x9,x14) + } + + x0 += 0x61707865; + x1 += 0x3320646e; + x2 += 0x79622d32; + x3 += 0x6b206574; + x4 += LE32(seed32[0]); + x5 += LE32(seed32[1]); + x6 += LE32(seed32[2]); + x7 += LE32(seed32[3]); + x8 += LE32(seed32[4]); + x9 += LE32(seed32[5]); + x10 += LE32(seed32[6]); + x11 += LE32(seed32[7]); + x12 += idx; + x13 += idx >> 32; + x14 += 0; + x15 += over_count; + + r1->d[7] = BE32(x0); + r1->d[6] = BE32(x1); + r1->d[5] = BE32(x2); + r1->d[4] = BE32(x3); + r1->d[3] = BE32(x4); + r1->d[2] = BE32(x5); + r1->d[1] = BE32(x6); + r1->d[0] = BE32(x7); + r2->d[7] = BE32(x8); + r2->d[6] = BE32(x9); + r2->d[5] = BE32(x10); + r2->d[4] = BE32(x11); + r2->d[3] = BE32(x12); + r2->d[2] = BE32(x13); + r2->d[1] = BE32(x14); + r2->d[0] = BE32(x15); + + over1 = secp256k1_scalar_check_overflow(r1); + over2 = secp256k1_scalar_check_overflow(r2); + over_count++; + } while (over1 | over2); +} + static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { uint32_t mask0, mask1; VG_CHECK_VERIFY(r->d, sizeof(r->d)); diff --git a/src/secp256k1/src/scalar_low_impl.h b/src/secp256k1/src/scalar_low_impl.h index 7176f0b2caeab..efb2f02ce92c8 100644 --- a/src/secp256k1/src/scalar_low_impl.h +++ b/src/secp256k1/src/scalar_low_impl.h @@ -17,6 +17,7 @@ SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r = 0; } SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { *r = v; } +SECP256K1_INLINE static void secp256k1_scalar_set_u64(secp256k1_scalar *r, uint64_t v) { *r = v % EXHAUSTIVE_TEST_ORDER; } SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { if (offset < 32) @@ -104,6 +105,10 @@ static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { return ret; } +static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) { + *r = (*a * *a) % EXHAUSTIVE_TEST_ORDER; +} + static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { *r1 = *a; *r2 = 0; @@ -113,6 +118,11 @@ SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const return *a == *b; } +SECP256K1_INLINE static void secp256k1_scalar_chacha20(secp256k1_scalar *r1, secp256k1_scalar *r2, const unsigned char *seed, uint64_t n) { + *r1 = (seed[0] + n) % EXHAUSTIVE_TEST_ORDER; + *r2 = (seed[1] + n) % EXHAUSTIVE_TEST_ORDER; +} + static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { uint32_t mask0, mask1; VG_CHECK_VERIFY(r, sizeof(*r)); diff --git a/src/secp256k1/src/secp256k1.c b/src/secp256k1/src/secp256k1.c index 8f34c35283d3d..2edb1ee2a6374 100644 --- a/src/secp256k1/src/secp256k1.c +++ b/src/secp256k1/src/secp256k1.c @@ -31,6 +31,22 @@ # include #endif +#ifdef ENABLE_MODULE_GENERATOR +# include "include/secp256k1_generator.h" +#endif + +#ifdef ENABLE_MODULE_COMMITMENT +# include "include/secp256k1_commitment.h" +#endif + +#ifdef ENABLE_MODULE_BULLETPROOF +# include "include/secp256k1_bulletproofs.h" +#endif + +#ifdef ENABLE_MODULE_AGGSIG +# include "include/secp256k1_aggsig.h" +#endif + #define ARG_CHECK(cond) do { \ if (EXPECT(!(cond), 0)) { \ secp256k1_callback_call(&ctx->illegal_callback, #cond); \ @@ -737,6 +753,46 @@ int secp256k1_ec_pubkey_combine(const secp256k1_context* ctx, secp256k1_pubkey * return 1; } +int secp256k1_ec_privkey_tweak_inv(const secp256k1_context* ctx, unsigned char *seckey) { + secp256k1_scalar sec; + secp256k1_scalar inv; + int ret = 0; + int overflow = 0; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(seckey != NULL); + + secp256k1_scalar_set_b32(&sec, seckey, &overflow); + ret = !overflow; + memset(seckey, 0, 32); + if (ret) { + secp256k1_scalar_inverse(&inv, &sec); + secp256k1_scalar_get_b32(seckey, &inv); + secp256k1_scalar_clear(&inv); + } + secp256k1_scalar_clear(&sec); + return ret; +} + +int secp256k1_ec_privkey_tweak_neg(const secp256k1_context* ctx, unsigned char *seckey) { + secp256k1_scalar sec; + secp256k1_scalar neg; + int ret = 0; + int overflow = 0; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(seckey != NULL); + + secp256k1_scalar_set_b32(&sec, seckey, &overflow); + ret = !overflow; + memset(seckey, 0, 32); + if (ret) { + secp256k1_scalar_negate(&neg, &sec); + secp256k1_scalar_get_b32(seckey, &neg); + secp256k1_scalar_clear(&neg); + } + secp256k1_scalar_clear(&sec); + return ret; +} + int secp256k1_tagged_sha256(const secp256k1_context* ctx, unsigned char *hash32, const unsigned char *tag, size_t taglen, const unsigned char *msg, size_t msglen) { secp256k1_sha256 sha; VERIFY_CHECK(ctx != NULL); @@ -758,6 +814,22 @@ int secp256k1_tagged_sha256(const secp256k1_context* ctx, unsigned char *hash32, # include "modules/recovery/main_impl.h" #endif +#ifdef ENABLE_MODULE_GENERATOR +# include "modules/generator/main_impl.h" +#endif + +#ifdef ENABLE_MODULE_COMMITMENT +# include "modules/commitment/main_impl.h" +#endif + +#ifdef ENABLE_MODULE_BULLETPROOF +# include "modules/bulletproofs/main_impl.h" +#endif + +#ifdef ENABLE_MODULE_AGGSIG +# include "modules/aggsig/main_impl.h" +#endif + #ifdef ENABLE_MODULE_EXTRAKEYS # include "modules/extrakeys/main_impl.h" #endif