From 9907c88fdb0f07582d02d9ee8a8a3f25ef0198cc Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Fri, 16 Dec 2022 11:50:49 +1100 Subject: [PATCH] Upgrade the vendored libsecp256k1 code `libsecp256k1` v0.2.0 was just released. Update the vendored code using `./vendor-libsecp.sh depend 0_8_0 21ffe4b` ``` git show 21ffe4b commit 21ffe4b22a9683cf24ae0763359e401d1284cc7a (tag: v0.2.0) Merge: 8c949f5 e025ccd Author: Pieter Wuille Date: Mon Dec 12 17:00:52 2022 -0500 Merge bitcoin-core/secp256k1#1055: Prepare initial release e025ccdf7473702a76bb13d763dc096548ffefba release: prepare for initial release 0.2.0 (Jonas Nick) 6d1784a2e2c1c5a8d89ffb08a7f76fa15e84fff5 build: add missing files to EXTRA_DIST (Jonas Nick) 13bf1b6b324f2ed1c1fb4c8d17a4febd3556839e changelog: make order of change types match keepachangelog.com (Jonas Nick) b1f992a552785395d2e60b10862626fd11f66f84 doc: improve release process (Jonas Nick) ad39e2dc417f85c1577a6a6a9c519f5c60453def build: change package version to 0.1.0-dev (Jonas Nick) 90618e9263ebc2a0d73d487d6d94fd3af96b973c doc: move CHANGELOG from doc/ to root directory (Jonas Nick) Pull request description: Based on #964 ACKs for top commit: sipa: ACK e025ccdf7473702a76bb13d763dc096548ffefba Tree-SHA512: b9ab71d7362537d383a32b5e321ef44069f00e3e92340375bcd662267bc5a60c2bad60222998e6602cfac24ad65efb23d772eac37c86065036b90ef090b54c49 ``` Requires a new version of `secp256k1-sys`, use v0.8.0 - Update the `secp256k1-sys` manifest (including links field) - Update symbols to use 0_8_0 - Add a changelog entry - depend on the new version in `secp256k1` --- Cargo.toml | 2 +- secp256k1-sys/CHANGELOG.md | 4 + secp256k1-sys/Cargo.toml | 4 +- .../depend/secp256k1-HEAD-revision.txt | 2 +- secp256k1-sys/depend/secp256k1/.cirrus.yml | 183 +- secp256k1-sys/depend/secp256k1/.gitignore | 10 +- secp256k1-sys/depend/secp256k1/CHANGELOG.md | 28 + secp256k1-sys/depend/secp256k1/Makefile.am | 76 +- secp256k1-sys/depend/secp256k1/README.md | 19 +- .../secp256k1/build-aux/m4/bitcoin_secp.m4 | 17 +- secp256k1-sys/depend/secp256k1/ci/cirrus.sh | 45 +- .../secp256k1/ci/linux-debian.Dockerfile | 32 +- secp256k1-sys/depend/secp256k1/configure.ac | 193 +- .../secp256k1/contrib/lax_der_parsing.c | 8 +- .../secp256k1/contrib/lax_der_parsing.h | 10 +- .../contrib/lax_der_privatekey_parsing.c | 14 +- .../contrib/lax_der_privatekey_parsing.h | 9 +- .../depend/secp256k1/doc/CHANGELOG.md | 12 - .../depend/secp256k1/doc/release-process.md | 62 +- .../secp256k1/examples/EXAMPLES_COPYING | 121 + .../depend/secp256k1/examples/ecdh.c | 123 + .../depend/secp256k1/examples/ecdsa.c | 133 + .../depend/secp256k1/examples/random.h | 73 + .../depend/secp256k1/examples/schnorr.c | 148 + .../depend/secp256k1/include/secp256k1.h | 447 +- .../depend/secp256k1/include/secp256k1.h.orig | 467 +- .../depend/secp256k1/include/secp256k1_ecdh.h | 26 +- .../secp256k1/include/secp256k1_extrakeys.h | 129 +- .../include/secp256k1_preallocated.h | 56 +- .../secp256k1/include/secp256k1_recovery.h | 50 +- .../secp256k1/include/secp256k1_schnorrsig.h | 55 +- .../secp256k1/sage/gen_exhaustive_groups.sage | 6 +- .../sage/gen_split_lambda_constants.sage | 16 +- .../depend/secp256k1/sage/group_prover.sage | 64 +- .../sage/prove_group_implementations.sage | 171 +- .../secp256k1/sage/weierstrass_prover.sage | 13 +- .../secp256k1/src/asm/field_10x26_arm.s | 16 +- .../depend/secp256k1/src/assumptions.h | 9 +- .../depend/secp256k1/src/basic-config.h | 17 - secp256k1-sys/depend/secp256k1/src/bench.c | 49 +- secp256k1-sys/depend/secp256k1/src/bench.h | 18 +- .../depend/secp256k1/src/bench_ecmult.c | 130 +- .../depend/secp256k1/src/bench_internal.c | 159 +- secp256k1-sys/depend/secp256k1/src/ecdsa.h | 8 +- .../depend/secp256k1/src/ecdsa_impl.h | 124 +- secp256k1-sys/depend/secp256k1/src/eckey.h | 12 +- .../depend/secp256k1/src/eckey_impl.h | 74 +- secp256k1-sys/depend/secp256k1/src/ecmult.h | 17 +- .../secp256k1/src/ecmult_compute_table.h | 6 +- .../secp256k1/src/ecmult_compute_table_impl.h | 36 +- .../depend/secp256k1/src/ecmult_const.h | 2 +- .../depend/secp256k1/src/ecmult_const_impl.h | 99 +- .../depend/secp256k1/src/ecmult_gen.h | 26 +- .../secp256k1/src/ecmult_gen_compute_table.h | 2 +- .../src/ecmult_gen_compute_table_impl.h | 42 +- .../depend/secp256k1/src/ecmult_gen_impl.h | 99 +- .../depend/secp256k1/src/ecmult_impl.h | 548 ++- secp256k1-sys/depend/secp256k1/src/field.h | 71 +- .../depend/secp256k1/src/field_10x26.h | 4 +- .../depend/secp256k1/src/field_10x26_impl.h | 236 +- .../depend/secp256k1/src/field_5x52.h | 4 +- .../secp256k1/src/field_5x52_asm_impl.h | 4 +- .../depend/secp256k1/src/field_5x52_impl.h | 212 +- .../secp256k1/src/field_5x52_int128_impl.h | 256 +- .../depend/secp256k1/src/field_impl.h | 86 +- secp256k1-sys/depend/secp256k1/src/group.h | 120 +- .../depend/secp256k1/src/group_impl.h | 645 +-- secp256k1-sys/depend/secp256k1/src/hash.h | 30 +- .../depend/secp256k1/src/hash_impl.h | 179 +- secp256k1-sys/depend/secp256k1/src/int128.h | 85 + .../depend/secp256k1/src/int128_impl.h | 18 + .../depend/secp256k1/src/int128_native.h | 19 + .../depend/secp256k1/src/int128_native_impl.h | 87 + .../depend/secp256k1/src/int128_struct.h | 14 + .../depend/secp256k1/src/int128_struct_impl.h | 192 + secp256k1-sys/depend/secp256k1/src/modinv32.h | 12 +- .../depend/secp256k1/src/modinv32_impl.h | 154 +- secp256k1-sys/depend/secp256k1/src/modinv64.h | 12 +- .../depend/secp256k1/src/modinv64_impl.h | 374 +- .../src/modules/ecdh/Makefile.am.include | 2 +- .../secp256k1/src/modules/ecdh/bench_impl.h | 14 +- .../secp256k1/src/modules/ecdh/main_impl.h | 46 +- .../secp256k1/src/modules/ecdh/tests_impl.h | 103 +- .../src/modules/extrakeys/Makefile.am.include | 2 +- .../src/modules/extrakeys/main_impl.h | 162 +- .../modules/extrakeys/tests_exhaustive_impl.h | 48 +- .../src/modules/extrakeys/tests_impl.h | 532 +-- .../src/modules/recovery/Makefile.am.include | 2 +- .../src/modules/recovery/bench_impl.h | 18 +- .../src/modules/recovery/main_impl.h | 116 +- .../modules/recovery/tests_exhaustive_impl.h | 82 +- .../src/modules/recovery/tests_impl.h | 268 +- .../modules/schnorrsig/Makefile.am.include | 2 +- .../src/modules/schnorrsig/bench_impl.h | 40 +- .../src/modules/schnorrsig/main_impl.h | 174 +- .../schnorrsig/tests_exhaustive_impl.h | 72 +- .../src/modules/schnorrsig/tests_impl.h | 347 +- .../depend/secp256k1/src/precompute_ecmult.c | 21 +- .../secp256k1/src/precompute_ecmult_gen.c | 9 +- .../depend/secp256k1/src/precomputed_ecmult.c | 8 +- .../depend/secp256k1/src/precomputed_ecmult.h | 8 +- .../secp256k1/src/precomputed_ecmult_gen.c | 2 +- .../secp256k1/src/precomputed_ecmult_gen.h | 4 +- secp256k1-sys/depend/secp256k1/src/scalar.h | 50 +- .../depend/secp256k1/src/scalar_4x64.h | 2 +- .../depend/secp256k1/src/scalar_4x64_impl.h | 264 +- .../depend/secp256k1/src/scalar_8x32.h | 2 +- .../depend/secp256k1/src/scalar_8x32_impl.h | 114 +- .../depend/secp256k1/src/scalar_impl.h | 74 +- .../depend/secp256k1/src/scalar_low.h | 2 +- .../depend/secp256k1/src/scalar_low_impl.h | 52 +- secp256k1-sys/depend/secp256k1/src/scratch.h | 20 +- .../depend/secp256k1/src/scratch_impl.h | 76 +- .../depend/secp256k1/src/scratch_impl.h.orig | 99 + .../depend/secp256k1/src/secp256k1.c | 500 +- .../depend/secp256k1/src/secp256k1.c.orig | 530 ++- secp256k1-sys/depend/secp256k1/src/selftest.h | 16 +- secp256k1-sys/depend/secp256k1/src/testrand.h | 22 +- .../depend/secp256k1/src/testrand_impl.h | 88 +- secp256k1-sys/depend/secp256k1/src/tests.c | 4216 ++++++++++------- .../depend/secp256k1/src/tests_exhaustive.c | 250 +- secp256k1-sys/depend/secp256k1/src/util.h | 124 +- .../depend/secp256k1/src/util.h.orig | 132 +- .../secp256k1/src/valgrind_ctime_test.c | 64 +- secp256k1-sys/src/lib.rs | 118 +- secp256k1-sys/src/recovery.rs | 10 +- 126 files changed, 9287 insertions(+), 6755 deletions(-) create mode 100644 secp256k1-sys/depend/secp256k1/CHANGELOG.md delete mode 100644 secp256k1-sys/depend/secp256k1/doc/CHANGELOG.md create mode 100644 secp256k1-sys/depend/secp256k1/examples/EXAMPLES_COPYING create mode 100644 secp256k1-sys/depend/secp256k1/examples/ecdh.c create mode 100644 secp256k1-sys/depend/secp256k1/examples/ecdsa.c create mode 100644 secp256k1-sys/depend/secp256k1/examples/random.h create mode 100644 secp256k1-sys/depend/secp256k1/examples/schnorr.c delete mode 100644 secp256k1-sys/depend/secp256k1/src/basic-config.h create mode 100644 secp256k1-sys/depend/secp256k1/src/int128.h create mode 100644 secp256k1-sys/depend/secp256k1/src/int128_impl.h create mode 100644 secp256k1-sys/depend/secp256k1/src/int128_native.h create mode 100644 secp256k1-sys/depend/secp256k1/src/int128_native_impl.h create mode 100644 secp256k1-sys/depend/secp256k1/src/int128_struct.h create mode 100644 secp256k1-sys/depend/secp256k1/src/int128_struct_impl.h create mode 100644 secp256k1-sys/depend/secp256k1/src/scratch_impl.h.orig diff --git a/Cargo.toml b/Cargo.toml index 806b73c51..4ac03df65 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,7 +37,7 @@ global-context = ["std"] global-context-less-secure = ["global-context"] [dependencies] -secp256k1-sys = { version = "0.7.0", default-features = false, path = "./secp256k1-sys" } +secp256k1-sys = { version = "0.8.0", default-features = false, path = "./secp256k1-sys" } serde = { version = "1.0", default-features = false, optional = true } # You likely only want to enable these if you explicitly do not want to use "std", otherwise enable diff --git a/secp256k1-sys/CHANGELOG.md b/secp256k1-sys/CHANGELOG.md index a9a18b833..99d959f69 100644 --- a/secp256k1-sys/CHANGELOG.md +++ b/secp256k1-sys/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.8.0 - 2022-12-16 + +* Upgrade to libsecp256k1 v0.2.0 + # 0.7.0 - 2022-12-01 * [Make comparison functions stable across library versions](https://github.com/rust-bitcoin/rust-secp256k1/pull/518) diff --git a/secp256k1-sys/Cargo.toml b/secp256k1-sys/Cargo.toml index 3890c5af3..1e67dc347 100644 --- a/secp256k1-sys/Cargo.toml +++ b/secp256k1-sys/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "secp256k1-sys" -version = "0.7.0" +version = "0.8.0" authors = [ "Dawid Ciężarkiewicz ", "Andrew Poelstra ", "Steven Roose " ] @@ -12,7 +12,7 @@ description = "FFI for Pieter Wuille's `libsecp256k1` library." keywords = [ "secp256k1", "libsecp256k1", "ffi" ] readme = "README.md" build = "build.rs" -links = "rustsecp256k1_v0_7_0" +links = "rustsecp256k1_v0_8_0" edition = "2018" # Should make docs.rs show all functions, even those behind non-default features diff --git a/secp256k1-sys/depend/secp256k1-HEAD-revision.txt b/secp256k1-sys/depend/secp256k1-HEAD-revision.txt index 2b27a9672..e105753bc 100644 --- a/secp256k1-sys/depend/secp256k1-HEAD-revision.txt +++ b/secp256k1-sys/depend/secp256k1-HEAD-revision.txt @@ -1,2 +1,2 @@ # This file was automatically created by ./vendor-libsecp.sh -a1102b12196ea27f44d6201de4d25926a2ae9640 +21ffe4b22a9683cf24ae0763359e401d1284cc7a diff --git a/secp256k1-sys/depend/secp256k1/.cirrus.yml b/secp256k1-sys/depend/secp256k1/.cirrus.yml index ffbd82071..51e3bc948 100644 --- a/secp256k1-sys/depend/secp256k1/.cirrus.yml +++ b/secp256k1-sys/depend/secp256k1/.cirrus.yml @@ -23,6 +23,13 @@ env: BENCH: yes SECP256K1_BENCH_ITERS: 2 CTIMETEST: yes + # Compile and run the tests + EXAMPLES: yes + +# https://cirrus-ci.org/pricing/#compute-credits +credits_snippet: &CREDITS + # Don't use any credits for now. + use_compute_credits: false cat_logs_snippet: &CAT_LOGS always: @@ -34,7 +41,6 @@ cat_logs_snippet: &CAT_LOGS - cat valgrind_ctime_test.log || true cat_bench_log_script: - cat bench.log || true - on_failure: cat_config_log_script: - cat config.log || true cat_test_env_script: @@ -65,12 +71,13 @@ task: << : *LINUX_CONTAINER matrix: &ENV_MATRIX - env: {WIDEMUL: int64, RECOVERY: yes} - - env: {WIDEMUL: int64, ECDH: yes, EXPERIMENTAL: yes, SCHNORRSIG: yes} + - env: {WIDEMUL: int64, ECDH: yes, SCHNORRSIG: yes} - env: {WIDEMUL: int128} - - env: {WIDEMUL: int128, RECOVERY: yes, EXPERIMENTAL: yes, SCHNORRSIG: yes} - - env: {WIDEMUL: int128, ECDH: yes, EXPERIMENTAL: yes, SCHNORRSIG: yes} + - env: {WIDEMUL: int128_struct} + - env: {WIDEMUL: int128, RECOVERY: yes, SCHNORRSIG: yes} + - env: {WIDEMUL: int128, ECDH: yes, SCHNORRSIG: yes} - env: {WIDEMUL: int128, ASM: x86_64} - - env: { RECOVERY: yes, EXPERIMENTAL: yes, SCHNORRSIG: yes} + - env: { RECOVERY: yes, SCHNORRSIG: yes} - env: {BUILD: distcheck, WITH_VALGRIND: no, CTIMETEST: no, BENCH: no} - env: {CPPFLAGS: -DDETERMINISTIC} - env: {CFLAGS: -O0, CTIMETEST: no} @@ -93,7 +100,6 @@ task: HOST: i686-linux-gnu ECDH: yes RECOVERY: yes - EXPERIMENTAL: yes SCHNORRSIG: yes matrix: - env: @@ -106,64 +112,32 @@ task: << : *CAT_LOGS task: - name: "x86_64: macOS Catalina" + name: "arm64: macOS Ventura" macos_instance: - image: catalina-base + image: ghcr.io/cirruslabs/macos-ventura-base:latest env: HOMEBREW_NO_AUTO_UPDATE: 1 HOMEBREW_NO_INSTALL_CLEANUP: 1 - # Cirrus gives us a fixed number of 12 virtual CPUs. Not that we even have that many jobs at the moment... - MAKEFLAGS: -j13 + # Cirrus gives us a fixed number of 4 virtual CPUs. Not that we even have that many jobs at the moment... + MAKEFLAGS: -j5 matrix: << : *ENV_MATRIX + env: + ASM: no + WITH_VALGRIND: no + CTIMETEST: no matrix: - env: - CC: gcc-9 + CC: gcc - env: CC: clang - # Update Command Line Tools - # Uncomment this if the Command Line Tools on the CirrusCI macOS image are too old to brew valgrind. - # See https://apple.stackexchange.com/a/195963 for the implementation. - ## update_clt_script: - ## - system_profiler SPSoftwareDataType - ## - touch /tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress - ## - |- - ## PROD=$(softwareupdate -l | grep "*.*Command Line" | tail -n 1 | awk -F"*" '{print $2}' | sed -e 's/^ *//' | sed 's/Label: //g' | tr -d '\n') - ## # For debugging - ## - softwareupdate -l && echo "PROD: $PROD" - ## - softwareupdate -i "$PROD" --verbose - ## - rm /tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress - ## - brew_valgrind_pre_script: - - brew update - - brew config - - brew tap LouisBrunner/valgrind - # Fetch valgrind source but don't build it yet. - - brew fetch --HEAD LouisBrunner/valgrind/valgrind - brew_valgrind_cache: - # This is $(brew --cellar valgrind) but command substition does not work here. - folder: /usr/local/Cellar/valgrind - # Rebuild cache if ... - fingerprint_script: - # ... macOS version changes: - - sw_vers - # ... brew changes: - - brew config - # ... valgrind changes: - - git -C "$(brew --cache)/valgrind--git" rev-parse HEAD - populate_script: - # If there's no hit in the cache, build and install valgrind. - - brew install --HEAD LouisBrunner/valgrind/valgrind - brew_valgrind_post_script: - # If we have restored valgrind from the cache, tell brew to create symlink to the PATH. - # If we haven't restored from cached (and just run brew install), this is a no-op. - - brew link valgrind brew_script: - - brew install automake libtool gcc@9 + - brew install automake libtool gcc << : *MERGE_BASE test_script: - ./ci/cirrus.sh << : *CAT_LOGS + << : *CREDITS task: name: "s390x (big-endian): Linux (Debian stable, QEMU)" @@ -175,7 +149,6 @@ task: WITH_VALGRIND: no ECDH: yes RECOVERY: yes - EXPERIMENTAL: yes SCHNORRSIG: yes CTIMETEST: no << : *MERGE_BASE @@ -195,12 +168,11 @@ task: WITH_VALGRIND: no ECDH: yes RECOVERY: yes - EXPERIMENTAL: yes SCHNORRSIG: yes CTIMETEST: no matrix: - env: {} - - env: {ASM: arm} + - env: {EXPERIMENTAL: yes, ASM: arm} << : *MERGE_BASE test_script: - ./ci/cirrus.sh @@ -216,7 +188,6 @@ task: WITH_VALGRIND: no ECDH: yes RECOVERY: yes - EXPERIMENTAL: yes SCHNORRSIG: yes CTIMETEST: no << : *MERGE_BASE @@ -234,7 +205,6 @@ task: WITH_VALGRIND: no ECDH: yes RECOVERY: yes - EXPERIMENTAL: yes SCHNORRSIG: yes CTIMETEST: no << : *MERGE_BASE @@ -243,18 +213,63 @@ task: << : *CAT_LOGS task: - name: "x86_64 (mingw32-w64): Windows (Debian stable, Wine)" << : *LINUX_CONTAINER env: - WRAPPER_CMD: wine64-stable - SECP256K1_TEST_ITERS: 16 - HOST: x86_64-w64-mingw32 + WRAPPER_CMD: wine + WITH_VALGRIND: no + ECDH: yes + RECOVERY: yes + SCHNORRSIG: yes + CTIMETEST: no + matrix: + - name: "x86_64 (mingw32-w64): Windows (Debian stable, Wine)" + env: + HOST: x86_64-w64-mingw32 + - name: "i686 (mingw32-w64): Windows (Debian stable, Wine)" + env: + HOST: i686-w64-mingw32 + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +task: + << : *LINUX_CONTAINER + env: + WRAPPER_CMD: wine + WERROR_CFLAGS: -WX WITH_VALGRIND: no ECDH: yes RECOVERY: yes EXPERIMENTAL: yes SCHNORRSIG: yes CTIMETEST: no + # Use a MinGW-w64 host to tell ./configure we're building for Windows. + # This will detect some MinGW-w64 tools but then make will need only + # the MSVC tools CC, AR and NM as specified below. + HOST: x86_64-w64-mingw32 + CC: /opt/msvc/bin/x64/cl + AR: /opt/msvc/bin/x64/lib + NM: /opt/msvc/bin/x64/dumpbin -symbols -headers + # Set non-essential options that affect the CLI messages here. + # (They depend on the user's taste, so we don't want to set them automatically in configure.ac.) + CFLAGS: -nologo -diagnostics:caret + LDFLAGS: -XCClinker -nologo -XCClinker -diagnostics:caret + matrix: + - name: "x86_64 (MSVC): Windows (Debian stable, Wine)" + - name: "x86_64 (MSVC): Windows (Debian stable, Wine, int128_struct)" + env: + WIDEMUL: int128_struct + - name: "x86_64 (MSVC): Windows (Debian stable, Wine, int128_struct with __(u)mulh)" + env: + WIDEMUL: int128_struct + CPPFLAGS: -DSECP256K1_MSVC_MULH_TEST_OVERRIDE + - name: "i686 (MSVC): Windows (Debian stable, Wine)" + env: + HOST: i686-w64-mingw32 + CC: /opt/msvc/bin/x86/cl + AR: /opt/msvc/bin/x86/lib + NM: /opt/msvc/bin/x86/dumpbin -symbols -headers << : *MERGE_BASE test_script: - ./ci/cirrus.sh @@ -266,7 +281,6 @@ task: env: ECDH: yes RECOVERY: yes - EXPERIMENTAL: yes SCHNORRSIG: yes CTIMETEST: no matrix: @@ -305,16 +319,40 @@ task: - ./ci/cirrus.sh << : *CAT_LOGS +# Memory sanitizers +task: + << : *LINUX_CONTAINER + name: "MSan" + env: + ECDH: yes + RECOVERY: yes + SCHNORRSIG: yes + CTIMETEST: no + CC: clang + SECP256K1_TEST_ITERS: 32 + ASM: no + container: + memory: 2G + matrix: + - env: + CFLAGS: "-fsanitize=memory -g" + - env: + ECMULTGENPRECISION: 2 + ECMULTWINDOW: 2 + CFLAGS: "-fsanitize=memory -g -O3" + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + task: - name: "C++ -fpermissive" + name: "C++ -fpermissive (entire project)" << : *LINUX_CONTAINER env: - # ./configure correctly errors out when given CC=g++. - # We hack around this by passing CC=g++ only to make. - CC: gcc - MAKEFLAGS: -j4 CC=g++ CFLAGS=-fpermissive\ -g + CC: g++ + CFLAGS: -fpermissive -g + CPPFLAGS: -DSECP256K1_CPLUSPLUS_TEST_OVERRIDE WERROR_CFLAGS: - EXPERIMENTAL: yes ECDH: yes RECOVERY: yes SCHNORRSIG: yes @@ -322,3 +360,18 @@ task: test_script: - ./ci/cirrus.sh << : *CAT_LOGS + +task: + name: "C++ (public headers)" + << : *LINUX_CONTAINER + test_script: + - g++ -Werror include/*.h + - clang -Werror -x c++-header include/*.h + - /opt/msvc/bin/x64/cl.exe -c -WX -TP include/*.h + +task: + name: "sage prover" + << : *LINUX_CONTAINER + test_script: + - cd sage + - sage prove_group_implementations.sage diff --git a/secp256k1-sys/depend/secp256k1/.gitignore b/secp256k1-sys/depend/secp256k1/.gitignore index cb52998ad..80c646b77 100644 --- a/secp256k1-sys/depend/secp256k1/.gitignore +++ b/secp256k1-sys/depend/secp256k1/.gitignore @@ -6,11 +6,16 @@ exhaustive_tests precompute_ecmult_gen precompute_ecmult valgrind_ctime_test +ecdh_example +ecdsa_example +schnorr_example *.exe *.so *.a *.csv -!.gitignore +*.log +*.trs +*.sage.py Makefile configure @@ -29,8 +34,6 @@ libtool *.lo *.o *~ -*.log -*.trs coverage/ coverage.html @@ -41,6 +44,7 @@ coverage.*.html src/libsecp256k1-config.h src/libsecp256k1-config.h.in +build-aux/ar-lib build-aux/config.guess build-aux/config.sub build-aux/depcomp diff --git a/secp256k1-sys/depend/secp256k1/CHANGELOG.md b/secp256k1-sys/depend/secp256k1/CHANGELOG.md new file mode 100644 index 000000000..32a044be7 --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/CHANGELOG.md @@ -0,0 +1,28 @@ +# Changelog + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +## [Unreleased] + +## [0.2.0] - 2022-12-12 + +### Added + - Added `rustsecp256k1_v0_8_0_selftest`, to be used in conjunction with `rustsecp256k1_v0_8_0_context_static`. + +### Changed + - Enabled modules schnorrsig, extrakeys and ECDH by default in `./configure`. + +### Deprecated + - Deprecated context flags `SECP256K1_CONTEXT_VERIFY` and `SECP256K1_CONTEXT_SIGN`. Use `SECP256K1_CONTEXT_NONE` instead. + - Renamed `rustsecp256k1_v0_8_0_context_no_precomp` to `rustsecp256k1_v0_8_0_context_static`. + +### ABI Compatibility + +Since this is the first release, we do not compare application binary interfaces. +However, there are unreleased versions of libsecp256k1 that are *not* ABI compatible with this version. + +## [0.1.0] - 2013-03-05 to 2021-12-25 + +This version was in fact never released. +The number was given by the build system since the introduction of autotools in Jan 2014 (ea0fe5a5bf0c04f9cc955b2966b614f5f378c6f6). +Therefore, this version number does not uniquely identify a set of source files. diff --git a/secp256k1-sys/depend/secp256k1/Makefile.am b/secp256k1-sys/depend/secp256k1/Makefile.am index 8fcda9075..cddd4cd51 100644 --- a/secp256k1-sys/depend/secp256k1/Makefile.am +++ b/secp256k1-sys/depend/secp256k1/Makefile.am @@ -8,7 +8,7 @@ AM_CFLAGS = $(SECP_CFLAGS) lib_LTLIBRARIES = libsecp256k1.la include_HEADERS = include/secp256k1.h -include_HEADERS += include/rustsecp256k1_v0_7_0_preallocated.h +include_HEADERS += include/rustsecp256k1_v0_8_0_preallocated.h noinst_HEADERS = noinst_HEADERS += src/scalar.h noinst_HEADERS += src/scalar_4x64.h @@ -48,6 +48,12 @@ noinst_HEADERS += src/precomputed_ecmult.h noinst_HEADERS += src/precomputed_ecmult_gen.h noinst_HEADERS += src/assumptions.h noinst_HEADERS += src/util.h +noinst_HEADERS += src/int128.h +noinst_HEADERS += src/int128_impl.h +noinst_HEADERS += src/int128_native.h +noinst_HEADERS += src/int128_native_impl.h +noinst_HEADERS += src/int128_struct.h +noinst_HEADERS += src/int128_struct_impl.h noinst_HEADERS += src/scratch.h noinst_HEADERS += src/scratch_impl.h noinst_HEADERS += src/selftest.h @@ -58,19 +64,19 @@ noinst_HEADERS += src/hash_impl.h noinst_HEADERS += src/field.h noinst_HEADERS += src/field_impl.h noinst_HEADERS += src/bench.h -noinst_HEADERS += src/basic-config.h noinst_HEADERS += contrib/lax_der_parsing.h noinst_HEADERS += contrib/lax_der_parsing.c noinst_HEADERS += contrib/lax_der_privatekey_parsing.h noinst_HEADERS += contrib/lax_der_privatekey_parsing.c +noinst_HEADERS += examples/random.h -PRECOMPUTED_LIB = librustsecp256k1_v0_7_0_precomputed.la +PRECOMPUTED_LIB = librustsecp256k1_v0_8_0_precomputed.la noinst_LTLIBRARIES = $(PRECOMPUTED_LIB) -librustsecp256k1_v0_7_0_precomputed_la_SOURCES = src/precomputed_ecmult.c src/precomputed_ecmult_gen.c -librustsecp256k1_v0_7_0_precomputed_la_CPPFLAGS = $(SECP_INCLUDES) +librustsecp256k1_v0_8_0_precomputed_la_SOURCES = src/precomputed_ecmult.c src/precomputed_ecmult_gen.c +librustsecp256k1_v0_8_0_precomputed_la_CPPFLAGS = $(SECP_INCLUDES) if USE_EXTERNAL_ASM -COMMON_LIB = librustsecp256k1_v0_7_0_common.la +COMMON_LIB = librustsecp256k1_v0_8_0_common.la else COMMON_LIB = endif @@ -81,17 +87,17 @@ pkgconfig_DATA = libsecp256k1.pc if USE_EXTERNAL_ASM if USE_ASM_ARM -librustsecp256k1_v0_7_0_common_la_SOURCES = src/asm/field_10x26_arm.s +librustsecp256k1_v0_8_0_common_la_SOURCES = src/asm/field_10x26_arm.s endif endif -librustsecp256k1_v0_7_0_la_SOURCES = src/secp256k1.c -librustsecp256k1_v0_7_0_la_CPPFLAGS = -I$(top_srcdir)/include -I$(top_srcdir)/src $(SECP_INCLUDES) -librustsecp256k1_v0_7_0_la_LIBADD = $(SECP_LIBS) $(COMMON_LIB) $(PRECOMPUTED_LIB) -librustsecp256k1_v0_7_0_la_LDFLAGS = -no-undefined -version-info $(LIB_VERSION_CURRENT):$(LIB_VERSION_REVISION):$(LIB_VERSION_AGE) +librustsecp256k1_v0_8_0_la_SOURCES = src/secp256k1.c +librustsecp256k1_v0_8_0_la_CPPFLAGS = $(SECP_INCLUDES) +librustsecp256k1_v0_8_0_la_LIBADD = $(SECP_LIBS) $(COMMON_LIB) $(PRECOMPUTED_LIB) +librustsecp256k1_v0_8_0_la_LDFLAGS = -no-undefined -version-info $(LIB_VERSION_CURRENT):$(LIB_VERSION_REVISION):$(LIB_VERSION_AGE) if VALGRIND_ENABLED -librustsecp256k1_v0_7_0_la_CPPFLAGS += -DVALGRIND +librustsecp256k1_v0_8_0_la_CPPFLAGS += -DVALGRIND endif noinst_PROGRAMS = @@ -111,7 +117,7 @@ TESTS = if USE_TESTS noinst_PROGRAMS += tests tests_SOURCES = src/tests.c -tests_CPPFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/include $(SECP_INCLUDES) $(SECP_TEST_INCLUDES) +tests_CPPFLAGS = $(SECP_INCLUDES) $(SECP_TEST_INCLUDES) if VALGRIND_ENABLED tests_CPPFLAGS += -DVALGRIND noinst_PROGRAMS += valgrind_ctime_test @@ -139,6 +145,40 @@ exhaustive_tests_LDFLAGS = -static TESTS += exhaustive_tests endif +if USE_EXAMPLES +noinst_PROGRAMS += ecdsa_example +ecdsa_example_SOURCES = examples/ecdsa.c +ecdsa_example_CPPFLAGS = -I$(top_srcdir)/include +ecdsa_example_LDADD = libsecp256k1.la +ecdsa_example_LDFLAGS = -static +if BUILD_WINDOWS +ecdsa_example_LDFLAGS += -lbcrypt +endif +TESTS += ecdsa_example +if ENABLE_MODULE_ECDH +noinst_PROGRAMS += ecdh_example +ecdh_example_SOURCES = examples/ecdh.c +ecdh_example_CPPFLAGS = -I$(top_srcdir)/include +ecdh_example_LDADD = libsecp256k1.la +ecdh_example_LDFLAGS = -static +if BUILD_WINDOWS +ecdh_example_LDFLAGS += -lbcrypt +endif +TESTS += ecdh_example +endif +if ENABLE_MODULE_SCHNORRSIG +noinst_PROGRAMS += schnorr_example +schnorr_example_SOURCES = examples/schnorr.c +schnorr_example_CPPFLAGS = -I$(top_srcdir)/include +schnorr_example_LDADD = libsecp256k1.la +schnorr_example_LDFLAGS = -static +if BUILD_WINDOWS +schnorr_example_LDFLAGS += -lbcrypt +endif +TESTS += schnorr_example +endif +endif + ### Precomputed tables EXTRA_PROGRAMS = precompute_ecmult precompute_ecmult_gen CLEANFILES = $(EXTRA_PROGRAMS) @@ -176,7 +216,15 @@ maintainer-clean-local: clean-precomp clean-precomp: rm -f $(PRECOMP) -EXTRA_DIST = autogen.sh SECURITY.md +EXTRA_DIST = autogen.sh CHANGELOG.md SECURITY.md +EXTRA_DIST += doc/release-process.md doc/safegcd_implementation.md +EXTRA_DIST += examples/EXAMPLES_COPYING +EXTRA_DIST += sage/gen_exhaustive_groups.sage +EXTRA_DIST += sage/gen_split_lambda_constants.sage +EXTRA_DIST += sage/group_prover.sage +EXTRA_DIST += sage/prove_group_implementations.sage +EXTRA_DIST += sage/rustsecp256k1_v0_8_0_params.sage +EXTRA_DIST += sage/weierstrass_prover.sage if ENABLE_MODULE_ECDH include src/modules/ecdh/Makefile.am.include diff --git a/secp256k1-sys/depend/secp256k1/README.md b/secp256k1-sys/depend/secp256k1/README.md index 5fc07dd4f..ffdc9aeae 100644 --- a/secp256k1-sys/depend/secp256k1/README.md +++ b/secp256k1-sys/depend/secp256k1/README.md @@ -2,6 +2,8 @@ libsecp256k1 ============ [![Build Status](https://api.cirrus-ci.com/github/bitcoin-core/secp256k1.svg?branch=master)](https://cirrus-ci.com/github/bitcoin-core/secp256k1) +![Dependencies: None](https://img.shields.io/badge/dependencies-none-success) +[![irc.libera.chat #secp256k1](https://img.shields.io/badge/irc.libera.chat-%23secp256k1-success)](https://web.libera.chat/#secp256k1) Optimized C library for ECDSA signatures and secret/public key operations on curve secp256k1. @@ -15,11 +17,10 @@ Features: * Derandomized ECDSA (via RFC6979 or with a caller provided function.) * Very efficient implementation. * Suitable for embedded systems. +* No runtime dependencies. * Optional module for public key recovery. * Optional module for ECDH key exchange. -* Optional module for Schnorr signatures according to [BIP-340](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki) (experimental). - -Experimental features have not received enough scrutiny to satisfy the standard of quality of this library but are made available for testing and review by the community. The APIs of these features should not be considered stable. +* Optional module for Schnorr signatures according to [BIP-340](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki). Implementation details ---------------------- @@ -35,6 +36,7 @@ Implementation details * Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1). * Using 5 52-bit limbs (including hand-optimized assembly for x86_64, by Diederik Huys). * Using 10 26-bit limbs (including hand-optimized assembly for 32-bit ARM, by Wladimir J. van der Laan). + * This is an experimental feature that has not received enough scrutiny to satisfy the standard of quality of this library but is made available for testing and review by the community. * Scalar operations * Optimized implementation without data-dependent branches of arithmetic modulo the curve's order. * Using 4 64-bit limbs (relying on __int128 support in the compiler). @@ -69,6 +71,17 @@ libsecp256k1 is built using autotools: $ make check # run the test suite $ sudo make install # optional +To compile optional modules (such as Schnorr signatures), you need to run `./configure` with additional flags (such as `--enable-module-schnorrsig`). Run `./configure --help` to see the full list of available flags. + +Usage examples +----------- +Usage examples can be found in the [examples](examples) directory. To compile them you need to configure with `--enable-examples`. + * [ECDSA example](examples/ecdsa.c) + * [Schnorr signatures example](examples/schnorr.c) + * [Deriving a shared secret (ECDH) example](examples/ecdh.c) + +To compile the Schnorr signature and ECDH examples, you also need to configure with `--enable-module-schnorrsig` and `--enable-module-ecdh`. + Test coverage ----------- diff --git a/secp256k1-sys/depend/secp256k1/build-aux/m4/bitcoin_secp.m4 b/secp256k1-sys/depend/secp256k1/build-aux/m4/bitcoin_secp.m4 index c14d09fa1..98be915b6 100644 --- a/secp256k1-sys/depend/secp256k1/build-aux/m4/bitcoin_secp.m4 +++ b/secp256k1-sys/depend/secp256k1/build-aux/m4/bitcoin_secp.m4 @@ -1,7 +1,7 @@ dnl escape "$0x" below using the m4 quadrigaph @S|@, and escape it again with a \ for the shell. AC_DEFUN([SECP_64BIT_ASM_CHECK],[ AC_MSG_CHECKING(for x86_64 assembly availability) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ +AC_LINK_IFELSE([AC_LANG_PROGRAM([[ #include ]],[[ uint64_t a = 11, tmp; __asm__ __volatile__("movq \@S|@0x100000000,%1; mulq %%rsi" : "+a"(a) : "S"(tmp) : "cc", "%rdx"); @@ -10,6 +10,7 @@ AC_MSG_RESULT([$has_64bit_asm]) ]) AC_DEFUN([SECP_VALGRIND_CHECK],[ +AC_MSG_CHECKING([for valgrind support]) if test x"$has_valgrind" != x"yes"; then CPPFLAGS_TEMP="$CPPFLAGS" CPPFLAGS="$VALGRIND_CPPFLAGS $CPPFLAGS" @@ -21,6 +22,7 @@ if test x"$has_valgrind" != x"yes"; then #endif ]])], [has_valgrind=yes; AC_DEFINE(HAVE_VALGRIND,1,[Define this symbol if valgrind is installed, and it supports the host platform])]) fi +AC_MSG_RESULT($has_valgrind) ]) dnl SECP_TRY_APPEND_CFLAGS(flags, VAR) @@ -38,3 +40,16 @@ AC_DEFUN([SECP_TRY_APPEND_CFLAGS], [ unset flag_works AC_SUBST($2) ]) + +dnl SECP_SET_DEFAULT(VAR, default, default-dev-mode) +dnl Set VAR to default or default-dev-mode, depending on whether dev mode is enabled +AC_DEFUN([SECP_SET_DEFAULT], [ + if test "${enable_dev_mode+set}" != set; then + AC_MSG_ERROR([[Set enable_dev_mode before calling SECP_SET_DEFAULT]]) + fi + if test x"$enable_dev_mode" = x"yes"; then + $1="$3" + else + $1="$2" + fi +]) diff --git a/secp256k1-sys/depend/secp256k1/ci/cirrus.sh b/secp256k1-sys/depend/secp256k1/ci/cirrus.sh index 4379e2504..fb5854a77 100755 --- a/secp256k1-sys/depend/secp256k1/ci/cirrus.sh +++ b/secp256k1-sys/depend/secp256k1/ci/cirrus.sh @@ -5,10 +5,47 @@ set -x export LC_ALL=C +# Print relevant CI environment to allow reproducing the job outside of CI. +print_environment() { + # Turn off -x because it messes up the output + set +x + # There are many ways to print variable names and their content. This one + # does not rely on bash. + for i in WERROR_CFLAGS MAKEFLAGS BUILD \ + ECMULTWINDOW ECMULTGENPRECISION ASM WIDEMUL WITH_VALGRIND EXTRAFLAGS \ + EXPERIMENTAL ECDH RECOVERY SCHNORRSIG \ + SECP256K1_TEST_ITERS BENCH SECP256K1_BENCH_ITERS CTIMETEST\ + EXAMPLES \ + WRAPPER_CMD CC AR NM HOST + do + eval 'printf "%s %s " "$i=\"${'"$i"'}\""' + done + echo "$0" + set -x +} +print_environment + +# Start persistent wineserver if necessary. +# This speeds up jobs with many invocations of wine (e.g., ./configure with MSVC) tremendously. +case "$WRAPPER_CMD" in + *wine*) + # This is apparently only reliable when we run a dummy command such as "hh.exe" afterwards. + wineserver -p && wine hh.exe + ;; +esac + env >> test_env.log -$CC -v || true -valgrind --version || true +if [ -n "$CC" ]; then + # The MSVC compiler "cl" doesn't understand "-v" + $CC -v || true +fi +if [ "$WITH_VALGRIND" = "yes" ]; then + valgrind --version +fi +if [ -n "$WRAPPER_CMD" ]; then + $WRAPPER_CMD --version +fi ./autogen.sh @@ -19,6 +56,7 @@ valgrind --version || true --with-ecmult-gen-precision="$ECMULTGENPRECISION" \ --enable-module-ecdh="$ECDH" --enable-module-recovery="$RECOVERY" \ --enable-module-schnorrsig="$SCHNORRSIG" \ + --enable-examples="$EXAMPLES" \ --with-valgrind="$WITH_VALGRIND" \ --host="$HOST" $EXTRAFLAGS @@ -62,6 +100,9 @@ then make precomp fi +# Shutdown wineserver again +wineserver -k || true + # Check that no repo files have been modified by the build. # (This fails for example if the precomp files need to be updated in the repo.) git diff --exit-code diff --git a/secp256k1-sys/depend/secp256k1/ci/linux-debian.Dockerfile b/secp256k1-sys/depend/secp256k1/ci/linux-debian.Dockerfile index fdba12aa0..a83a4e36d 100644 --- a/secp256k1-sys/depend/secp256k1/ci/linux-debian.Dockerfile +++ b/secp256k1-sys/depend/secp256k1/ci/linux-debian.Dockerfile @@ -1,15 +1,14 @@ FROM debian:stable -RUN dpkg --add-architecture i386 -RUN dpkg --add-architecture s390x -RUN dpkg --add-architecture armhf -RUN dpkg --add-architecture arm64 -RUN dpkg --add-architecture ppc64el -RUN apt-get update +RUN dpkg --add-architecture i386 && \ + dpkg --add-architecture s390x && \ + dpkg --add-architecture armhf && \ + dpkg --add-architecture arm64 && \ + dpkg --add-architecture ppc64el # dkpg-dev: to make pkg-config work in cross-builds # llvm: for llvm-symbolizer, which is used by clang's UBSan for symbolized stack traces -RUN apt-get install --no-install-recommends --no-upgrade -y \ +RUN apt-get update && apt-get install --no-install-recommends -y \ git ca-certificates \ make automake libtool pkg-config dpkg-dev valgrind qemu-user \ gcc clang llvm libc6-dbg \ @@ -19,7 +18,20 @@ RUN apt-get install --no-install-recommends --no-upgrade -y \ gcc-arm-linux-gnueabihf libc6-dev-armhf-cross libc6-dbg:armhf \ gcc-aarch64-linux-gnu libc6-dev-arm64-cross libc6-dbg:arm64 \ gcc-powerpc64le-linux-gnu libc6-dev-ppc64el-cross libc6-dbg:ppc64el \ - wine gcc-mingw-w64-x86-64 + gcc-mingw-w64-x86-64-win32 wine64 wine \ + gcc-mingw-w64-i686-win32 wine32 \ + sagemath -# Run a dummy command in wine to make it set up configuration -RUN wine64-stable xcopy || true +WORKDIR /root +# The "wine" package provides a convience wrapper that we need +RUN apt-get update && apt-get install --no-install-recommends -y \ + git ca-certificates wine64 wine python3-simplejson python3-six msitools winbind procps && \ + git clone https://github.com/mstorsjo/msvc-wine && \ + mkdir /opt/msvc && \ + python3 msvc-wine/vsdownload.py --accept-license --dest /opt/msvc Microsoft.VisualStudio.Workload.VCTools && \ + msvc-wine/install.sh /opt/msvc + +# Initialize the wine environment. Wait until the wineserver process has +# exited before closing the session, to avoid corrupting the wine prefix. +RUN wine64 wineboot --init && \ + while (ps -A | grep wineserver) > /dev/null; do sleep 1; done diff --git a/secp256k1-sys/depend/secp256k1/configure.ac b/secp256k1-sys/depend/secp256k1/configure.ac index 92e497df8..68f279b17 100644 --- a/secp256k1-sys/depend/secp256k1/configure.ac +++ b/secp256k1-sys/depend/secp256k1/configure.ac @@ -4,20 +4,20 @@ AC_PREREQ([2.60]) # the API. All changes in experimental modules are treated as # backwards-compatible and therefore at most increase the minor version. define(_PKG_VERSION_MAJOR, 0) -define(_PKG_VERSION_MINOR, 1) -define(_PKG_VERSION_BUILD, 0) -define(_PKG_VERSION_IS_RELEASE, false) +define(_PKG_VERSION_MINOR, 2) +define(_PKG_VERSION_PATCH, 0) +define(_PKG_VERSION_IS_RELEASE, true) # The library version is based on libtool versioning of the ABI. The set of # rules for updating the version can be found here: # https://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html # All changes in experimental modules are treated as if they don't affect the # interface and therefore only increase the revision. -define(_LIB_VERSION_CURRENT, 0) +define(_LIB_VERSION_CURRENT, 1) define(_LIB_VERSION_REVISION, 0) define(_LIB_VERSION_AGE, 0) -AC_INIT([libsecp256k1],m4_join([.], _PKG_VERSION_MAJOR, _PKG_VERSION_MINOR, _PKG_VERSION_BUILD)m4_if(_PKG_VERSION_IS_RELEASE, [true], [], [-pre]),[https://github.com/bitcoin-core/secp256k1/issues],[libsecp256k1],[https://github.com/bitcoin-core/secp256k1]) +AC_INIT([libsecp256k1],m4_join([.], _PKG_VERSION_MAJOR, _PKG_VERSION_MINOR, _PKG_VERSION_PATCH)m4_if(_PKG_VERSION_IS_RELEASE, [true], [], [-dev]),[https://github.com/bitcoin-core/secp256k1/issues],[libsecp256k1],[https://github.com/bitcoin-core/secp256k1]) AC_CONFIG_AUX_DIR([build-aux]) AC_CONFIG_MACRO_DIR([build-aux/m4]) @@ -25,25 +25,25 @@ AC_CANONICAL_HOST AH_TOP([#ifndef LIBSECP256K1_CONFIG_H]) AH_TOP([#define LIBSECP256K1_CONFIG_H]) AH_BOTTOM([#endif /*LIBSECP256K1_CONFIG_H*/]) -AM_INIT_AUTOMAKE([foreign subdir-objects]) -LT_INIT([win32-dll]) +# Require Automake 1.11.2 for AM_PROG_AR +AM_INIT_AUTOMAKE([1.11.2 foreign subdir-objects]) # Make the compilation flags quiet unless V=1 is used. m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) -PKG_PROG_PKG_CONFIG +AC_PROG_CC +AM_PROG_AS +AM_PROG_AR -AC_PATH_TOOL(AR, ar) -AC_PATH_TOOL(RANLIB, ranlib) -AC_PATH_TOOL(STRIP, strip) +# Clear some cache variables as a workaround for a bug that appears due to a bad +# interaction between AM_PROG_AR and LT_INIT when combining MSVC's archiver lib.exe. +# https://debbugs.gnu.org/cgi/bugreport.cgi?bug=54421 +AS_UNSET(ac_cv_prog_AR) +AS_UNSET(ac_cv_prog_ac_ct_AR) +LT_INIT([win32-dll]) -AM_PROG_CC_C_O -AC_PROG_CC_C89 -if test x"$ac_cv_prog_cc_c89" = x"no"; then - AC_MSG_ERROR([c89 compiler support required]) -fi -AM_PROG_AS +build_windows=no case $host_os in *darwin*) @@ -68,6 +68,9 @@ case $host_os in fi fi ;; + cygwin*|mingw*) + build_windows=yes + ;; esac # Try if some desirable compiler flags are supported and append them to SECP_CFLAGS. @@ -86,23 +89,35 @@ esac # # TODO We should analogously not touch CPPFLAGS and LDFLAGS but currently there are no issues. AC_DEFUN([SECP_TRY_APPEND_DEFAULT_CFLAGS], [ - # Try to append -Werror=unknown-warning-option to CFLAGS temporarily. Otherwise clang will - # not error out if it gets unknown warning flags and the checks here will always succeed - # no matter if clang knows the flag or not. - SECP_TRY_APPEND_DEFAULT_CFLAGS_saved_CFLAGS="$CFLAGS" - SECP_TRY_APPEND_CFLAGS([-Werror=unknown-warning-option], CFLAGS) - - SECP_TRY_APPEND_CFLAGS([-std=c89 -pedantic -Wno-long-long -Wnested-externs -Wshadow -Wstrict-prototypes -Wundef], $1) # GCC >= 3.0, -Wlong-long is implied by -pedantic. - SECP_TRY_APPEND_CFLAGS([-Wno-overlength-strings], $1) # GCC >= 4.2, -Woverlength-strings is implied by -pedantic. - SECP_TRY_APPEND_CFLAGS([-Wall], $1) # GCC >= 2.95 and probably many other compilers - SECP_TRY_APPEND_CFLAGS([-Wno-unused-function], $1) # GCC >= 3.0, -Wunused-function is implied by -Wall. - SECP_TRY_APPEND_CFLAGS([-Wextra], $1) # GCC >= 3.4, this is the newer name of -W, which we don't use because older GCCs will warn about unused functions. - SECP_TRY_APPEND_CFLAGS([-Wcast-align], $1) # GCC >= 2.95 - SECP_TRY_APPEND_CFLAGS([-Wcast-align=strict], $1) # GCC >= 8.0 - SECP_TRY_APPEND_CFLAGS([-Wconditional-uninitialized], $1) # Clang >= 3.0 only - SECP_TRY_APPEND_CFLAGS([-fvisibility=hidden], $1) # GCC >= 4.0 - - CFLAGS="$SECP_TRY_APPEND_DEFAULT_CFLAGS_saved_CFLAGS" + # GCC and compatible (incl. clang) + if test "x$GCC" = "xyes"; then + # Try to append -Werror=unknown-warning-option to CFLAGS temporarily. Otherwise clang will + # not error out if it gets unknown warning flags and the checks here will always succeed + # no matter if clang knows the flag or not. + SECP_TRY_APPEND_DEFAULT_CFLAGS_saved_CFLAGS="$CFLAGS" + SECP_TRY_APPEND_CFLAGS([-Werror=unknown-warning-option], CFLAGS) + + SECP_TRY_APPEND_CFLAGS([-std=c89 -pedantic -Wno-long-long -Wnested-externs -Wshadow -Wstrict-prototypes -Wundef], $1) # GCC >= 3.0, -Wlong-long is implied by -pedantic. + SECP_TRY_APPEND_CFLAGS([-Wno-overlength-strings], $1) # GCC >= 4.2, -Woverlength-strings is implied by -pedantic. + SECP_TRY_APPEND_CFLAGS([-Wall], $1) # GCC >= 2.95 and probably many other compilers + SECP_TRY_APPEND_CFLAGS([-Wno-unused-function], $1) # GCC >= 3.0, -Wunused-function is implied by -Wall. + SECP_TRY_APPEND_CFLAGS([-Wextra], $1) # GCC >= 3.4, this is the newer name of -W, which we don't use because older GCCs will warn about unused functions. + SECP_TRY_APPEND_CFLAGS([-Wcast-align], $1) # GCC >= 2.95 + SECP_TRY_APPEND_CFLAGS([-Wcast-align=strict], $1) # GCC >= 8.0 + SECP_TRY_APPEND_CFLAGS([-Wconditional-uninitialized], $1) # Clang >= 3.0 only + SECP_TRY_APPEND_CFLAGS([-fvisibility=hidden], $1) # GCC >= 4.0 + + CFLAGS="$SECP_TRY_APPEND_DEFAULT_CFLAGS_saved_CFLAGS" + fi + + # MSVC + # Assume MSVC if we're building for Windows but not with GCC or compatible; + # libtool makes the same assumption internally. + # Note that "/opt" and "-opt" are equivalent for MSVC; we use "-opt" because "/opt" looks like a path. + if test x"$GCC" != x"yes" && test x"$build_windows" = x"yes"; then + SECP_TRY_APPEND_CFLAGS([-W2 -wd4146], $1) # Moderate warning level, disable warning C4146 "unary minus operator applied to unsigned type, result still unsigned" + SECP_TRY_APPEND_CFLAGS([-external:anglebrackets -external:W0], $1) # Suppress warnings from #include <...> files + fi ]) SECP_TRY_APPEND_DEFAULT_CFLAGS(SECP_CFLAGS) @@ -110,58 +125,61 @@ SECP_TRY_APPEND_DEFAULT_CFLAGS(SECP_CFLAGS) ### Define config arguments ### +# In dev mode, we enable all binaries and modules by default but individual options can still be overridden explicitly. +# Check for dev mode first because SECP_SET_DEFAULT needs enable_dev_mode set. +AC_ARG_ENABLE(dev_mode, [], [], + [enable_dev_mode=no]) + AC_ARG_ENABLE(benchmark, - AS_HELP_STRING([--enable-benchmark],[compile benchmark [default=yes]]), - [use_benchmark=$enableval], - [use_benchmark=yes]) + AS_HELP_STRING([--enable-benchmark],[compile benchmark [default=yes]]), [], + [SECP_SET_DEFAULT([enable_benchmark], [yes], [yes])]) AC_ARG_ENABLE(coverage, - AS_HELP_STRING([--enable-coverage],[enable compiler flags to support kcov coverage analysis [default=no]]), - [enable_coverage=$enableval], - [enable_coverage=no]) + AS_HELP_STRING([--enable-coverage],[enable compiler flags to support kcov coverage analysis [default=no]]), [], + [SECP_SET_DEFAULT([enable_coverage], [no], [no])]) AC_ARG_ENABLE(tests, - AS_HELP_STRING([--enable-tests],[compile tests [default=yes]]), - [use_tests=$enableval], - [use_tests=yes]) + AS_HELP_STRING([--enable-tests],[compile tests [default=yes]]), [], + [SECP_SET_DEFAULT([enable_tests], [yes], [yes])]) AC_ARG_ENABLE(experimental, - AS_HELP_STRING([--enable-experimental],[allow experimental configure options [default=no]]), - [use_experimental=$enableval], - [use_experimental=no]) + AS_HELP_STRING([--enable-experimental],[allow experimental configure options [default=no]]), [], + [SECP_SET_DEFAULT([enable_experimental], [no], [yes])]) AC_ARG_ENABLE(exhaustive_tests, - AS_HELP_STRING([--enable-exhaustive-tests],[compile exhaustive tests [default=yes]]), - [use_exhaustive_tests=$enableval], - [use_exhaustive_tests=yes]) + AS_HELP_STRING([--enable-exhaustive-tests],[compile exhaustive tests [default=yes]]), [], + [SECP_SET_DEFAULT([enable_exhaustive_tests], [yes], [yes])]) + +AC_ARG_ENABLE(examples, + AS_HELP_STRING([--enable-examples],[compile the examples [default=no]]), [], + [SECP_SET_DEFAULT([enable_examples], [no], [yes])]) AC_ARG_ENABLE(module_ecdh, - AS_HELP_STRING([--enable-module-ecdh],[enable ECDH shared secret computation]), - [enable_module_ecdh=$enableval], - [enable_module_ecdh=no]) + AS_HELP_STRING([--enable-module-ecdh],[enable ECDH module [default=yes]]), [], + [SECP_SET_DEFAULT([enable_module_ecdh], [yes], [yes])]) AC_ARG_ENABLE(module_recovery, - AS_HELP_STRING([--enable-module-recovery],[enable ECDSA pubkey recovery module [default=no]]), - [enable_module_recovery=$enableval], - [enable_module_recovery=no]) + AS_HELP_STRING([--enable-module-recovery],[enable ECDSA pubkey recovery module [default=no]]), [], + [SECP_SET_DEFAULT([enable_module_recovery], [no], [yes])]) AC_ARG_ENABLE(module_extrakeys, - AS_HELP_STRING([--enable-module-extrakeys],[enable extrakeys module (experimental)]), - [enable_module_extrakeys=$enableval], - [enable_module_extrakeys=no]) + AS_HELP_STRING([--enable-module-extrakeys],[enable extrakeys module [default=yes]]), [], + [SECP_SET_DEFAULT([enable_module_extrakeys], [yes], [yes])]) AC_ARG_ENABLE(module_schnorrsig, - AS_HELP_STRING([--enable-module-schnorrsig],[enable schnorrsig module (experimental)]), - [enable_module_schnorrsig=$enableval], - [enable_module_schnorrsig=no]) + AS_HELP_STRING([--enable-module-schnorrsig],[enable schnorrsig module [default=yes]]), [], + [SECP_SET_DEFAULT([enable_module_schnorrsig], [yes], [yes])]) AC_ARG_ENABLE(external_default_callbacks, - AS_HELP_STRING([--enable-external-default-callbacks],[enable external default callback functions [default=no]]), - [use_external_default_callbacks=$enableval], - [use_external_default_callbacks=no]) + AS_HELP_STRING([--enable-external-default-callbacks],[enable external default callback functions [default=no]]), [], + [SECP_SET_DEFAULT([enable_external_default_callbacks], [no], [no])]) # Test-only override of the (autodetected by the C code) "widemul" setting. -# Legal values are int64 (for [u]int64_t), int128 (for [unsigned] __int128), and auto (the default). +# Legal values are: +# * int64 (for [u]int64_t), +# * int128 (for [unsigned] __int128), +# * int128_struct (for int128 implemented as a structure), +# * and auto (the default). AC_ARG_WITH([test-override-wide-multiply], [] ,[set_widemul=$withval], [set_widemul=auto]) AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto], @@ -248,14 +266,14 @@ else fi # Select assembly optimization -use_external_asm=no +enable_external_asm=no case $set_asm in x86_64) AC_DEFINE(USE_ASM_X86_64, 1, [Define this symbol to enable x86_64 assembly optimizations]) ;; arm) - use_external_asm=yes + enable_external_asm=yes ;; no) ;; @@ -264,13 +282,16 @@ no) ;; esac -if test x"$use_external_asm" = x"yes"; then +if test x"$enable_external_asm" = x"yes"; then AC_DEFINE(USE_EXTERNAL_ASM, 1, [Define this symbol if an external (non-inline) assembly implementation is used]) fi # Select wide multiplication implementation case $set_widemul in +int128_struct) + AC_DEFINE(USE_FORCE_WIDEMUL_INT128_STRUCT, 1, [Define this symbol to force the use of the structure for simulating (unsigned) int128 based wide multiplication]) + ;; int128) AC_DEFINE(USE_FORCE_WIDEMUL_INT128, 1, [Define this symbol to force the use of the (unsigned) __int128 based wide multiplication implementation]) ;; @@ -326,7 +347,9 @@ if test x"$enable_valgrind" = x"yes"; then SECP_INCLUDES="$SECP_INCLUDES $VALGRIND_CPPFLAGS" fi -# Add -Werror and similar flags passed from the outside (for testing, e.g., in CI) +# Add -Werror and similar flags passed from the outside (for testing, e.g., in CI). +# We don't want to set the user variable CFLAGS in CI because this would disable +# autoconf's logic for setting default CFLAGS, which we would like to test in CI. SECP_CFLAGS="$SECP_CFLAGS $WERROR_CFLAGS" ### @@ -352,7 +375,7 @@ if test x"$enable_module_extrakeys" = x"yes"; then AC_DEFINE(ENABLE_MODULE_EXTRAKEYS, 1, [Define this symbol to enable the extrakeys module]) fi -if test x"$use_external_default_callbacks" = x"yes"; then +if test x"$enable_external_default_callbacks" = x"yes"; then AC_DEFINE(USE_EXTERNAL_DEFAULT_CALLBACKS, 1, [Define this symbol if an external implementation of the default callbacks is used]) fi @@ -364,16 +387,8 @@ if test x"$enable_experimental" = x"yes"; then AC_MSG_NOTICE([******]) AC_MSG_NOTICE([WARNING: experimental build]) AC_MSG_NOTICE([Experimental features do not have stable APIs or properties, and may not be safe for production use.]) - AC_MSG_NOTICE([Building extrakeys module: $enable_module_extrakeys]) - AC_MSG_NOTICE([Building schnorrsig module: $enable_module_schnorrsig]) AC_MSG_NOTICE([******]) else - if test x"$enable_module_extrakeys" = x"yes"; then - AC_MSG_ERROR([extrakeys module is experimental. Use --enable-experimental to allow.]) - fi - if test x"$enable_module_schnorrsig" = x"yes"; then - AC_MSG_ERROR([schnorrsig module is experimental. Use --enable-experimental to allow.]) - fi if test x"$set_asm" = x"arm"; then AC_MSG_ERROR([ARM assembly optimization is experimental. Use --enable-experimental to allow.]) fi @@ -391,32 +406,30 @@ AC_SUBST(SECP_TEST_LIBS) AC_SUBST(SECP_TEST_INCLUDES) AC_SUBST(SECP_CFLAGS) AM_CONDITIONAL([ENABLE_COVERAGE], [test x"$enable_coverage" = x"yes"]) -AM_CONDITIONAL([USE_TESTS], [test x"$use_tests" != x"no"]) -AM_CONDITIONAL([USE_EXHAUSTIVE_TESTS], [test x"$use_exhaustive_tests" != x"no"]) -AM_CONDITIONAL([USE_BENCHMARK], [test x"$use_benchmark" = x"yes"]) +AM_CONDITIONAL([USE_TESTS], [test x"$enable_tests" != x"no"]) +AM_CONDITIONAL([USE_EXHAUSTIVE_TESTS], [test x"$enable_exhaustive_tests" != x"no"]) +AM_CONDITIONAL([USE_EXAMPLES], [test x"$enable_examples" != x"no"]) +AM_CONDITIONAL([USE_BENCHMARK], [test x"$enable_benchmark" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_ECDH], [test x"$enable_module_ecdh" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_RECOVERY], [test x"$enable_module_recovery" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_EXTRAKEYS], [test x"$enable_module_extrakeys" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_SCHNORRSIG], [test x"$enable_module_schnorrsig" = x"yes"]) -AM_CONDITIONAL([USE_EXTERNAL_ASM], [test x"$use_external_asm" = x"yes"]) +AM_CONDITIONAL([USE_EXTERNAL_ASM], [test x"$enable_external_asm" = x"yes"]) AM_CONDITIONAL([USE_ASM_ARM], [test x"$set_asm" = x"arm"]) +AM_CONDITIONAL([BUILD_WINDOWS], [test "$build_windows" = "yes"]) AC_SUBST(LIB_VERSION_CURRENT, _LIB_VERSION_CURRENT) AC_SUBST(LIB_VERSION_REVISION, _LIB_VERSION_REVISION) AC_SUBST(LIB_VERSION_AGE, _LIB_VERSION_AGE) -# Make sure nothing new is exported so that we don't break the cache. -PKGCONFIG_PATH_TEMP="$PKG_CONFIG_PATH" -unset PKG_CONFIG_PATH -PKG_CONFIG_PATH="$PKGCONFIG_PATH_TEMP" - AC_OUTPUT echo echo "Build Options:" -echo " with external callbacks = $use_external_default_callbacks" -echo " with benchmarks = $use_benchmark" -echo " with tests = $use_tests" +echo " with external callbacks = $enable_external_default_callbacks" +echo " with benchmarks = $enable_benchmark" +echo " with tests = $enable_tests" echo " with coverage = $enable_coverage" +echo " with examples = $enable_examples" echo " module ecdh = $enable_module_ecdh" echo " module recovery = $enable_module_recovery" echo " module extrakeys = $enable_module_extrakeys" diff --git a/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.c b/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.c index 8f005012b..cd2d2d73a 100644 --- a/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.c +++ b/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.c @@ -8,7 +8,7 @@ #include "lax_der_parsing.h" -int rustsecp256k1_v0_7_0_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { +int rustsecp256k1_v0_8_0_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { size_t rpos, rlen, spos, slen; size_t pos = 0; size_t lenbyte; @@ -16,7 +16,7 @@ int rustsecp256k1_v0_7_0_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_7_ int overflow = 0; /* Hack to initialize sig with a correctly-parsed but invalid signature. */ - rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig); + rustsecp256k1_v0_8_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig); /* Sequence tag byte */ if (pos == inputlen || input[pos] != 0x30) { @@ -137,11 +137,11 @@ int rustsecp256k1_v0_7_0_ecdsa_signature_parse_der_lax(const rustsecp256k1_v0_7_ } if (!overflow) { - overflow = !rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig); + overflow = !rustsecp256k1_v0_8_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig); } if (overflow) { memset(tmpsig, 0, 64); - rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig); + rustsecp256k1_v0_8_0_ecdsa_signature_parse_compact(ctx, sig, tmpsig); } return 1; } diff --git a/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.h b/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.h index 2313d8ace..adbcc6d9c 100644 --- a/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.h +++ b/secp256k1-sys/depend/secp256k1/contrib/lax_der_parsing.h @@ -26,8 +26,8 @@ * certain violations are easily supported. You may need to adapt it. * * Do not use this for new systems. Use well-defined DER or compact signatures - * instead if you have the choice (see rustsecp256k1_v0_7_0_ecdsa_signature_parse_der and - * rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact). + * instead if you have the choice (see rustsecp256k1_v0_8_0_ecdsa_signature_parse_der and + * rustsecp256k1_v0_8_0_ecdsa_signature_parse_compact). * * The supported violations are: * - All numbers are parsed as nonnegative integers, even though X.609-0207 @@ -83,9 +83,9 @@ extern "C" { * encoded numbers are out of range, signature validation with it is * guaranteed to fail for every message and public key. */ -int rustsecp256k1_v0_7_0_ecdsa_signature_parse_der_lax( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_ecdsa_signature* sig, +int rustsecp256k1_v0_8_0_ecdsa_signature_parse_der_lax( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); diff --git a/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c b/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c index c8264ba28..1ab2c9e4a 100644 --- a/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c +++ b/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c @@ -8,7 +8,7 @@ #include "lax_der_privatekey_parsing.h" -int ec_privkey_import_der(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *out32, const unsigned char *privkey, size_t privkeylen) { +int ec_privkey_import_der(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *out32, const unsigned char *privkey, size_t privkeylen) { const unsigned char *end = privkey + privkeylen; int lenb = 0; int len = 0; @@ -45,17 +45,17 @@ int ec_privkey_import_der(const rustsecp256k1_v0_7_0_context* ctx, unsigned char return 0; } if (privkey[1]) memcpy(out32 + 32 - privkey[1], privkey + 2, privkey[1]); - if (!rustsecp256k1_v0_7_0_ec_seckey_verify(ctx, out32)) { + if (!rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, out32)) { memset(out32, 0, 32); return 0; } return 1; } -int ec_privkey_export_der(const rustsecp256k1_v0_7_0_context *ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *key32, int compressed) { - rustsecp256k1_v0_7_0_pubkey pubkey; +int ec_privkey_export_der(const rustsecp256k1_v0_8_0_context *ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *key32, int compressed) { + rustsecp256k1_v0_8_0_pubkey pubkey; size_t pubkeylen = 0; - if (!rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey, key32)) { + if (!rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey, key32)) { *privkeylen = 0; return 0; } @@ -79,7 +79,7 @@ int ec_privkey_export_der(const rustsecp256k1_v0_7_0_context *ctx, unsigned char memcpy(ptr, key32, 32); ptr += 32; memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle); pubkeylen = 33; - rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED); + rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED); ptr += pubkeylen; *privkeylen = ptr - privkey; } else { @@ -104,7 +104,7 @@ int ec_privkey_export_der(const rustsecp256k1_v0_7_0_context *ctx, unsigned char memcpy(ptr, key32, 32); ptr += 32; memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle); pubkeylen = 65; - rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_UNCOMPRESSED); + rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_UNCOMPRESSED); ptr += pubkeylen; *privkeylen = ptr - privkey; } diff --git a/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h b/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h index febca78bf..917ccdab6 100644 --- a/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h +++ b/secp256k1-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h @@ -43,8 +43,7 @@ extern "C" { /** Export a private key in DER format. * * Returns: 1 if the private key was valid. - * Args: ctx: pointer to a context object, initialized for signing (cannot - * be NULL) + * Args: ctx: pointer to a context object (not rustsecp256k1_v0_8_0_context_static). * Out: privkey: pointer to an array for storing the private key in BER. * Should have space for 279 bytes, and cannot be NULL. * privkeylen: Pointer to an int where the length of the private key in @@ -58,10 +57,10 @@ extern "C" { * simple 32-byte private keys are sufficient. * * Note that this function does not guarantee correct DER output. It is - * guaranteed to be parsable by rustsecp256k1_v0_7_0_ec_privkey_import_der + * guaranteed to be parsable by rustsecp256k1_v0_8_0_ec_privkey_import_der */ SECP256K1_WARN_UNUSED_RESULT int ec_privkey_export_der( - const rustsecp256k1_v0_7_0_context* ctx, + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *seckey, @@ -83,7 +82,7 @@ SECP256K1_WARN_UNUSED_RESULT int ec_privkey_export_der( * key. */ SECP256K1_WARN_UNUSED_RESULT int ec_privkey_import_der( - const rustsecp256k1_v0_7_0_context* ctx, + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const unsigned char *privkey, size_t privkeylen diff --git a/secp256k1-sys/depend/secp256k1/doc/CHANGELOG.md b/secp256k1-sys/depend/secp256k1/doc/CHANGELOG.md deleted file mode 100644 index 3c4c2e458..000000000 --- a/secp256k1-sys/depend/secp256k1/doc/CHANGELOG.md +++ /dev/null @@ -1,12 +0,0 @@ -# Changelog - -This file is currently only a template for future use. - -Each change falls into one of the following categories: Added, Changed, Deprecated, Removed, Fixed or Security. - -## [Unreleased] - -## [MAJOR.MINOR.PATCH] - YYYY-MM-DD - -### Added/Changed/Deprecated/Removed/Fixed/Security -- [Title with link to Pull Request](https://link-to-pr) diff --git a/secp256k1-sys/depend/secp256k1/doc/release-process.md b/secp256k1-sys/depend/secp256k1/doc/release-process.md index a35b8a9db..91e361691 100644 --- a/secp256k1-sys/depend/secp256k1/doc/release-process.md +++ b/secp256k1-sys/depend/secp256k1/doc/release-process.md @@ -1,14 +1,52 @@ # Release Process -1. Open PR to master that - 1. adds release notes to `doc/CHANGELOG.md` and - 2. if this is **not** a patch release, updates `_PKG_VERSION_{MAJOR,MINOR}` and `_LIB_VERSIONS_*` in `configure.ac` -2. After the PR is merged, - * if this is **not** a patch release, create a release branch with name `MAJOR.MINOR`. - Make sure that the branch contains the right commits. - Create commit on the release branch that sets `_PKG_VERSION_IS_RELEASE` in `configure.ac` to `true`. - * if this **is** a patch release, open a pull request with the bugfixes to the `MAJOR.MINOR` branch. - Also include the release note commit bump `_PKG_VERSION_BUILD` and `_LIB_VERSIONS_*` in `configure.ac`. -4. Tag the commit with `git tag -s vMAJOR.MINOR.PATCH`. -5. Push branch and tag with `git push origin --tags`. -6. Create a new GitHub release with a link to the corresponding entry in `doc/CHANGELOG.md`. +This document outlines the process for releasing versions of the form `$MAJOR.$MINOR.$PATCH`. + +We distinguish between two types of releases: *regular* and *maintenance* releases. +Regular releases are releases of a new major or minor version as well as patches of the most recent release. +Maintenance releases, on the other hand, are required for patches of older releases. + +You should coordinate with the other maintainers on the release date, if possible. +This date will be part of the release entry in [CHANGELOG.md](../CHANGELOG.md) and it should match the dates of the remaining steps in the release process (including the date of the tag and the GitHub release). +It is best if the maintainers are present during the release, so they can help ensure that the process is followed correctly and, in the case of a regular release, they are aware that they should not modify the master branch between merging the PR in step 1 and the PR in step 3. + +This process also assumes that there will be no minor releases for old major releases. + +## Regular release + +1. Open a PR to the master branch with a commit (using message `"release: prepare for $MAJOR.$MINOR.$PATCH"`, for example) that + * finalizes the release notes in [CHANGELOG.md](../CHANGELOG.md) (make sure to include an entry for `### ABI Compatibility`) and + * updates `_PKG_VERSION_*`, `_LIB_VERSION_*`, and sets `_PKG_VERSION_IS_RELEASE` to `true` in `configure.ac`. +2. After the PR is merged, tag the commit and push it: + ``` + RELEASE_COMMIT= + git tag -s v$MAJOR.$MINOR.$PATCH -m "libsecp256k1 $MAJOR.$MINOR.$PATCH" $RELEASE_COMMIT + git push git@github.com:bitcoin-core/secp256k1.git v$MAJOR.$MINOR.$PATCH + ``` +3. Open a PR to the master branch with a commit (using message `"release: bump version after $MAJOR.$MINOR.$PATCH"`, for example) that sets `_PKG_VERSION_IS_RELEASE` to `false` and `_PKG_VERSION_PATCH` to `$PATCH + 1` and increases `_LIB_VERSION_REVISION`. If other maintainers are not present to approve the PR, it can be merged without ACKs. +4. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md). + +## Maintenance release + +Note that bugfixes only need to be backported to releases for which no compatible release without the bug exists. + +1. If `$PATCH = 1`, create maintenance branch `$MAJOR.$MINOR`: + ``` + git checkout -b $MAJOR.$MINOR v$MAJOR.$MINOR.0 + git push git@github.com:bitcoin-core/secp256k1.git $MAJOR.$MINOR + ``` +2. Open a pull request to the `$MAJOR.$MINOR` branch that + * includes the bugfixes, + * finalizes the release notes, + * bumps `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac` (with commit message `"release: update PKG_ and LIB_VERSION for $MAJOR.$MINOR.$PATCH"`, for example). +3. After the PRs are merged, update the release branch and tag the commit: + ``` + git checkout $MAJOR.$MINOR && git pull + git tag -s v$MAJOR.$MINOR.$PATCH -m "libsecp256k1 $MAJOR.$MINOR.$PATCH" + ``` +4. Push tag: + ``` + git push git@github.com:bitcoin-core/secp256k1.git v$MAJOR.$MINOR.$PATCH + ``` +5. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md). +6. Open PR to the master branch that includes a commit (with commit message `"release notes: add $MAJOR.$MINOR.$PATCH"`, for example) that adds release notes to [CHANGELOG.md](../CHANGELOG.md). diff --git a/secp256k1-sys/depend/secp256k1/examples/EXAMPLES_COPYING b/secp256k1-sys/depend/secp256k1/examples/EXAMPLES_COPYING new file mode 100644 index 000000000..0e259d42c --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/examples/EXAMPLES_COPYING @@ -0,0 +1,121 @@ +Creative Commons Legal Code + +CC0 1.0 Universal + + CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE + LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN + ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS + INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES + REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS + PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM + THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED + HEREUNDER. + +Statement of Purpose + +The laws of most jurisdictions throughout the world automatically confer +exclusive Copyright and Related Rights (defined below) upon the creator +and subsequent owner(s) (each and all, an "owner") of an original work of +authorship and/or a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for +the purpose of contributing to a commons of creative, cultural and +scientific works ("Commons") that the public can reliably and without fear +of later claims of infringement build upon, modify, incorporate in other +works, reuse and redistribute as freely as possible in any form whatsoever +and for any purposes, including without limitation commercial purposes. +These owners may contribute to the Commons to promote the ideal of a free +culture and the further production of creative, cultural and scientific +works, or to gain reputation or greater distribution for their Work in +part through the use and efforts of others. + +For these and/or other purposes and motivations, and without any +expectation of additional consideration or compensation, the person +associating CC0 with a Work (the "Affirmer"), to the extent that he or she +is an owner of Copyright and Related Rights in the Work, voluntarily +elects to apply CC0 to the Work and publicly distribute the Work under its +terms, with knowledge of his or her Copyright and Related Rights in the +Work and the meaning and intended legal effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be +protected by copyright and related or neighboring rights ("Copyright and +Related Rights"). Copyright and Related Rights include, but are not +limited to, the following: + + i. the right to reproduce, adapt, distribute, perform, display, + communicate, and translate a Work; + ii. moral rights retained by the original author(s) and/or performer(s); +iii. publicity and privacy rights pertaining to a person's image or + likeness depicted in a Work; + iv. rights protecting against unfair competition in regards to a Work, + subject to the limitations in paragraph 4(a), below; + v. rights protecting the extraction, dissemination, use and reuse of data + in a Work; + vi. database rights (such as those arising under Directive 96/9/EC of the + European Parliament and of the Council of 11 March 1996 on the legal + protection of databases, and under any national implementation + thereof, including any amended or successor version of such + directive); and +vii. other similar, equivalent or corresponding rights throughout the + world based on applicable law or treaty, and any national + implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention +of, applicable law, Affirmer hereby overtly, fully, permanently, +irrevocably and unconditionally waives, abandons, and surrenders all of +Affirmer's Copyright and Related Rights and associated claims and causes +of action, whether now known or unknown (including existing as well as +future claims and causes of action), in the Work (i) in all territories +worldwide, (ii) for the maximum duration provided by applicable law or +treaty (including future time extensions), (iii) in any current or future +medium and for any number of copies, and (iv) for any purpose whatsoever, +including without limitation commercial, advertising or promotional +purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each +member of the public at large and to the detriment of Affirmer's heirs and +successors, fully intending that such Waiver shall not be subject to +revocation, rescission, cancellation, termination, or any other legal or +equitable action to disrupt the quiet enjoyment of the Work by the public +as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason +be judged legally invalid or ineffective under applicable law, then the +Waiver shall be preserved to the maximum extent permitted taking into +account Affirmer's express Statement of Purpose. In addition, to the +extent the Waiver is so judged Affirmer hereby grants to each affected +person a royalty-free, non transferable, non sublicensable, non exclusive, +irrevocable and unconditional license to exercise Affirmer's Copyright and +Related Rights in the Work (i) in all territories worldwide, (ii) for the +maximum duration provided by applicable law or treaty (including future +time extensions), (iii) in any current or future medium and for any number +of copies, and (iv) for any purpose whatsoever, including without +limitation commercial, advertising or promotional purposes (the +"License"). The License shall be deemed effective as of the date CC0 was +applied by Affirmer to the Work. Should any part of the License for any +reason be judged legally invalid or ineffective under applicable law, such +partial invalidity or ineffectiveness shall not invalidate the remainder +of the License, and in such case Affirmer hereby affirms that he or she +will not (i) exercise any of his or her remaining Copyright and Related +Rights in the Work or (ii) assert any associated claims and causes of +action with respect to the Work, in either case contrary to Affirmer's +express Statement of Purpose. + +4. Limitations and Disclaimers. + + a. No trademark or patent rights held by Affirmer are waived, abandoned, + surrendered, licensed or otherwise affected by this document. + b. Affirmer offers the Work as-is and makes no representations or + warranties of any kind concerning the Work, express, implied, + statutory or otherwise, including without limitation warranties of + title, merchantability, fitness for a particular purpose, non + infringement, or the absence of latent or other defects, accuracy, or + the present or absence of errors, whether or not discoverable, all to + the greatest extent permissible under applicable law. + c. Affirmer disclaims responsibility for clearing rights of other persons + that may apply to the Work or any use thereof, including without + limitation any person's Copyright and Related Rights in the Work. + Further, Affirmer disclaims responsibility for obtaining any necessary + consents, permissions or other rights required for any use of the + Work. + d. Affirmer understands and acknowledges that Creative Commons is not a + party to this document and has no duty or obligation with respect to + this CC0 or use of the Work. diff --git a/secp256k1-sys/depend/secp256k1/examples/ecdh.c b/secp256k1-sys/depend/secp256k1/examples/ecdh.c new file mode 100644 index 000000000..8e082f7c4 --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/examples/ecdh.c @@ -0,0 +1,123 @@ +/************************************************************************* + * Written in 2020-2022 by Elichai Turkel * + * To the extent possible under law, the author(s) have dedicated all * + * copyright and related and neighboring rights to the software in this * + * file to the public domain worldwide. This software is distributed * + * without any warranty. For the CC0 Public Domain Dedication, see * + * EXAMPLES_COPYING or https://creativecommons.org/publicdomain/zero/1.0 * + *************************************************************************/ + +#include +#include +#include + +#include +#include + +#include "random.h" + + +int main(void) { + unsigned char seckey1[32]; + unsigned char seckey2[32]; + unsigned char compressed_pubkey1[33]; + unsigned char compressed_pubkey2[33]; + unsigned char shared_secret1[32]; + unsigned char shared_secret2[32]; + unsigned char randomize[32]; + int return_val; + size_t len; + rustsecp256k1_v0_8_0_pubkey pubkey1; + rustsecp256k1_v0_8_0_pubkey pubkey2; + + /* Before we can call actual API functions, we need to create a "context". */ + rustsecp256k1_v0_8_0_context* ctx = rustsecp256k1_v0_8_0_context_create(SECP256K1_CONTEXT_NONE); + if (!fill_random(randomize, sizeof(randomize))) { + printf("Failed to generate randomness\n"); + return 1; + } + /* Randomizing the context is recommended to protect against side-channel + * leakage See `rustsecp256k1_v0_8_0_context_randomize` in secp256k1.h for more + * information about it. This should never fail. */ + return_val = rustsecp256k1_v0_8_0_context_randomize(ctx, randomize); + assert(return_val); + + /*** Key Generation ***/ + + /* If the secret key is zero or out of range (bigger than secp256k1's + * order), we try to sample a new key. Note that the probability of this + * happening is negligible. */ + while (1) { + if (!fill_random(seckey1, sizeof(seckey1)) || !fill_random(seckey2, sizeof(seckey2))) { + printf("Failed to generate randomness\n"); + return 1; + } + if (rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, seckey1) && rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, seckey2)) { + break; + } + } + + /* Public key creation using a valid context with a verified secret key should never fail */ + return_val = rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey1, seckey1); + assert(return_val); + return_val = rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey2, seckey2); + assert(return_val); + + /* Serialize pubkey1 in a compressed form (33 bytes), should always return 1 */ + len = sizeof(compressed_pubkey1); + return_val = rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, compressed_pubkey1, &len, &pubkey1, SECP256K1_EC_COMPRESSED); + assert(return_val); + /* Should be the same size as the size of the output, because we passed a 33 byte array. */ + assert(len == sizeof(compressed_pubkey1)); + + /* Serialize pubkey2 in a compressed form (33 bytes) */ + len = sizeof(compressed_pubkey2); + return_val = rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, compressed_pubkey2, &len, &pubkey2, SECP256K1_EC_COMPRESSED); + assert(return_val); + /* Should be the same size as the size of the output, because we passed a 33 byte array. */ + assert(len == sizeof(compressed_pubkey2)); + + /*** Creating the shared secret ***/ + + /* Perform ECDH with seckey1 and pubkey2. Should never fail with a verified + * seckey and valid pubkey */ + return_val = rustsecp256k1_v0_8_0_ecdh(ctx, shared_secret1, &pubkey2, seckey1, NULL, NULL); + assert(return_val); + + /* Perform ECDH with seckey2 and pubkey1. Should never fail with a verified + * seckey and valid pubkey */ + return_val = rustsecp256k1_v0_8_0_ecdh(ctx, shared_secret2, &pubkey1, seckey2, NULL, NULL); + assert(return_val); + + /* Both parties should end up with the same shared secret */ + return_val = memcmp(shared_secret1, shared_secret2, sizeof(shared_secret1)); + assert(return_val == 0); + + printf("Secret Key1: "); + print_hex(seckey1, sizeof(seckey1)); + printf("Compressed Pubkey1: "); + print_hex(compressed_pubkey1, sizeof(compressed_pubkey1)); + printf("\nSecret Key2: "); + print_hex(seckey2, sizeof(seckey2)); + printf("Compressed Pubkey2: "); + print_hex(compressed_pubkey2, sizeof(compressed_pubkey2)); + printf("\nShared Secret: "); + print_hex(shared_secret1, sizeof(shared_secret1)); + + /* This will clear everything from the context and free the memory */ + rustsecp256k1_v0_8_0_context_destroy(ctx); + + /* It's best practice to try to clear secrets from memory after using them. + * This is done because some bugs can allow an attacker to leak memory, for + * example through "out of bounds" array access (see Heartbleed), Or the OS + * swapping them to disk. Hence, we overwrite the secret key buffer with zeros. + * + * TODO: Prevent these writes from being optimized out, as any good compiler + * will remove any writes that aren't used. */ + memset(seckey1, 0, sizeof(seckey1)); + memset(seckey2, 0, sizeof(seckey2)); + memset(shared_secret1, 0, sizeof(shared_secret1)); + memset(shared_secret2, 0, sizeof(shared_secret2)); + + return 0; +} diff --git a/secp256k1-sys/depend/secp256k1/examples/ecdsa.c b/secp256k1-sys/depend/secp256k1/examples/ecdsa.c new file mode 100644 index 000000000..9dab1910c --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/examples/ecdsa.c @@ -0,0 +1,133 @@ +/************************************************************************* + * Written in 2020-2022 by Elichai Turkel * + * To the extent possible under law, the author(s) have dedicated all * + * copyright and related and neighboring rights to the software in this * + * file to the public domain worldwide. This software is distributed * + * without any warranty. For the CC0 Public Domain Dedication, see * + * EXAMPLES_COPYING or https://creativecommons.org/publicdomain/zero/1.0 * + *************************************************************************/ + +#include +#include +#include + +#include + +#include "random.h" + + + +int main(void) { + /* Instead of signing the message directly, we must sign a 32-byte hash. + * Here the message is "Hello, world!" and the hash function was SHA-256. + * An actual implementation should just call SHA-256, but this example + * hardcodes the output to avoid depending on an additional library. + * See https://bitcoin.stackexchange.com/questions/81115/if-someone-wanted-to-pretend-to-be-satoshi-by-posting-a-fake-signature-to-defrau/81116#81116 */ + unsigned char msg_hash[32] = { + 0x31, 0x5F, 0x5B, 0xDB, 0x76, 0xD0, 0x78, 0xC4, + 0x3B, 0x8A, 0xC0, 0x06, 0x4E, 0x4A, 0x01, 0x64, + 0x61, 0x2B, 0x1F, 0xCE, 0x77, 0xC8, 0x69, 0x34, + 0x5B, 0xFC, 0x94, 0xC7, 0x58, 0x94, 0xED, 0xD3, + }; + unsigned char seckey[32]; + unsigned char randomize[32]; + unsigned char compressed_pubkey[33]; + unsigned char serialized_signature[64]; + size_t len; + int is_signature_valid; + int return_val; + rustsecp256k1_v0_8_0_pubkey pubkey; + rustsecp256k1_v0_8_0_ecdsa_signature sig; + /* Before we can call actual API functions, we need to create a "context". */ + rustsecp256k1_v0_8_0_context* ctx = rustsecp256k1_v0_8_0_context_create(SECP256K1_CONTEXT_NONE); + if (!fill_random(randomize, sizeof(randomize))) { + printf("Failed to generate randomness\n"); + return 1; + } + /* Randomizing the context is recommended to protect against side-channel + * leakage See `rustsecp256k1_v0_8_0_context_randomize` in secp256k1.h for more + * information about it. This should never fail. */ + return_val = rustsecp256k1_v0_8_0_context_randomize(ctx, randomize); + assert(return_val); + + /*** Key Generation ***/ + + /* If the secret key is zero or out of range (bigger than secp256k1's + * order), we try to sample a new key. Note that the probability of this + * happening is negligible. */ + while (1) { + if (!fill_random(seckey, sizeof(seckey))) { + printf("Failed to generate randomness\n"); + return 1; + } + if (rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, seckey)) { + break; + } + } + + /* Public key creation using a valid context with a verified secret key should never fail */ + return_val = rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey, seckey); + assert(return_val); + + /* Serialize the pubkey in a compressed form(33 bytes). Should always return 1. */ + len = sizeof(compressed_pubkey); + return_val = rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, compressed_pubkey, &len, &pubkey, SECP256K1_EC_COMPRESSED); + assert(return_val); + /* Should be the same size as the size of the output, because we passed a 33 byte array. */ + assert(len == sizeof(compressed_pubkey)); + + /*** Signing ***/ + + /* Generate an ECDSA signature `noncefp` and `ndata` allows you to pass a + * custom nonce function, passing `NULL` will use the RFC-6979 safe default. + * Signing with a valid context, verified secret key + * and the default nonce function should never fail. */ + return_val = rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &sig, msg_hash, seckey, NULL, NULL); + assert(return_val); + + /* Serialize the signature in a compact form. Should always return 1 + * according to the documentation in secp256k1.h. */ + return_val = rustsecp256k1_v0_8_0_ecdsa_signature_serialize_compact(ctx, serialized_signature, &sig); + assert(return_val); + + + /*** Verification ***/ + + /* Deserialize the signature. This will return 0 if the signature can't be parsed correctly. */ + if (!rustsecp256k1_v0_8_0_ecdsa_signature_parse_compact(ctx, &sig, serialized_signature)) { + printf("Failed parsing the signature\n"); + return 1; + } + + /* Deserialize the public key. This will return 0 if the public key can't be parsed correctly. */ + if (!rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, &pubkey, compressed_pubkey, sizeof(compressed_pubkey))) { + printf("Failed parsing the public key\n"); + return 1; + } + + /* Verify a signature. This will return 1 if it's valid and 0 if it's not. */ + is_signature_valid = rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &sig, msg_hash, &pubkey); + + printf("Is the signature valid? %s\n", is_signature_valid ? "true" : "false"); + printf("Secret Key: "); + print_hex(seckey, sizeof(seckey)); + printf("Public Key: "); + print_hex(compressed_pubkey, sizeof(compressed_pubkey)); + printf("Signature: "); + print_hex(serialized_signature, sizeof(serialized_signature)); + + + /* This will clear everything from the context and free the memory */ + rustsecp256k1_v0_8_0_context_destroy(ctx); + + /* It's best practice to try to clear secrets from memory after using them. + * This is done because some bugs can allow an attacker to leak memory, for + * example through "out of bounds" array access (see Heartbleed), Or the OS + * swapping them to disk. Hence, we overwrite the secret key buffer with zeros. + * + * TODO: Prevent these writes from being optimized out, as any good compiler + * will remove any writes that aren't used. */ + memset(seckey, 0, sizeof(seckey)); + + return 0; +} diff --git a/secp256k1-sys/depend/secp256k1/examples/random.h b/secp256k1-sys/depend/secp256k1/examples/random.h new file mode 100644 index 000000000..439226f09 --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/examples/random.h @@ -0,0 +1,73 @@ +/************************************************************************* + * Copyright (c) 2020-2021 Elichai Turkel * + * Distributed under the CC0 software license, see the accompanying file * + * EXAMPLES_COPYING or https://creativecommons.org/publicdomain/zero/1.0 * + *************************************************************************/ + +/* + * This file is an attempt at collecting best practice methods for obtaining randomness with different operating systems. + * It may be out-of-date. Consult the documentation of the operating system before considering to use the methods below. + * + * Platform randomness sources: + * Linux -> `getrandom(2)`(`sys/random.h`), if not available `/dev/urandom` should be used. http://man7.org/linux/man-pages/man2/getrandom.2.html, https://linux.die.net/man/4/urandom + * macOS -> `getentropy(2)`(`sys/random.h`), if not available `/dev/urandom` should be used. https://www.unix.com/man-page/mojave/2/getentropy, https://opensource.apple.com/source/xnu/xnu-517.12.7/bsd/man/man4/random.4.auto.html + * FreeBSD -> `getrandom(2)`(`sys/random.h`), if not available `kern.arandom` should be used. https://www.freebsd.org/cgi/man.cgi?query=getrandom, https://www.freebsd.org/cgi/man.cgi?query=random&sektion=4 + * OpenBSD -> `getentropy(2)`(`unistd.h`), if not available `/dev/urandom` should be used. https://man.openbsd.org/getentropy, https://man.openbsd.org/urandom + * Windows -> `BCryptGenRandom`(`bcrypt.h`). https://docs.microsoft.com/en-us/windows/win32/api/bcrypt/nf-bcrypt-bcryptgenrandom + */ + +#if defined(_WIN32) +#include +#include +#include +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) +#include +#elif defined(__OpenBSD__) +#include +#else +#error "Couldn't identify the OS" +#endif + +#include +#include +#include + + +/* Returns 1 on success, and 0 on failure. */ +static int fill_random(unsigned char* data, size_t size) { +#if defined(_WIN32) + NTSTATUS res = BCryptGenRandom(NULL, data, size, BCRYPT_USE_SYSTEM_PREFERRED_RNG); + if (res != STATUS_SUCCESS || size > ULONG_MAX) { + return 0; + } else { + return 1; + } +#elif defined(__linux__) || defined(__FreeBSD__) + /* If `getrandom(2)` is not available you should fallback to /dev/urandom */ + ssize_t res = getrandom(data, size, 0); + if (res < 0 || (size_t)res != size ) { + return 0; + } else { + return 1; + } +#elif defined(__APPLE__) || defined(__OpenBSD__) + /* If `getentropy(2)` is not available you should fallback to either + * `SecRandomCopyBytes` or /dev/urandom */ + int res = getentropy(data, size); + if (res == 0) { + return 1; + } else { + return 0; + } +#endif + return 0; +} + +static void print_hex(unsigned char* data, size_t size) { + size_t i; + printf("0x"); + for (i = 0; i < size; i++) { + printf("%02x", data[i]); + } + printf("\n"); +} diff --git a/secp256k1-sys/depend/secp256k1/examples/schnorr.c b/secp256k1-sys/depend/secp256k1/examples/schnorr.c new file mode 100644 index 000000000..748dc4414 --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/examples/schnorr.c @@ -0,0 +1,148 @@ +/************************************************************************* + * Written in 2020-2022 by Elichai Turkel * + * To the extent possible under law, the author(s) have dedicated all * + * copyright and related and neighboring rights to the software in this * + * file to the public domain worldwide. This software is distributed * + * without any warranty. For the CC0 Public Domain Dedication, see * + * EXAMPLES_COPYING or https://creativecommons.org/publicdomain/zero/1.0 * + *************************************************************************/ + +#include +#include +#include + +#include +#include +#include + +#include "random.h" + +int main(void) { + unsigned char msg[12] = "Hello World!"; + unsigned char msg_hash[32]; + unsigned char tag[17] = "my_fancy_protocol"; + unsigned char seckey[32]; + unsigned char randomize[32]; + unsigned char auxiliary_rand[32]; + unsigned char serialized_pubkey[32]; + unsigned char signature[64]; + int is_signature_valid; + int return_val; + rustsecp256k1_v0_8_0_xonly_pubkey pubkey; + rustsecp256k1_v0_8_0_keypair keypair; + /* Before we can call actual API functions, we need to create a "context". */ + rustsecp256k1_v0_8_0_context* ctx = rustsecp256k1_v0_8_0_context_create(SECP256K1_CONTEXT_NONE); + if (!fill_random(randomize, sizeof(randomize))) { + printf("Failed to generate randomness\n"); + return 1; + } + /* Randomizing the context is recommended to protect against side-channel + * leakage See `rustsecp256k1_v0_8_0_context_randomize` in secp256k1.h for more + * information about it. This should never fail. */ + return_val = rustsecp256k1_v0_8_0_context_randomize(ctx, randomize); + assert(return_val); + + /*** Key Generation ***/ + + /* If the secret key is zero or out of range (bigger than secp256k1's + * order), we try to sample a new key. Note that the probability of this + * happening is negligible. */ + while (1) { + if (!fill_random(seckey, sizeof(seckey))) { + printf("Failed to generate randomness\n"); + return 1; + } + /* Try to create a keypair with a valid context, it should only fail if + * the secret key is zero or out of range. */ + if (rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, seckey)) { + break; + } + } + + /* Extract the X-only public key from the keypair. We pass NULL for + * `pk_parity` as the parity isn't needed for signing or verification. + * `rustsecp256k1_v0_8_0_keypair_xonly_pub` supports returning the parity for + * other use cases such as tests or verifying Taproot tweaks. + * This should never fail with a valid context and public key. */ + return_val = rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &pubkey, NULL, &keypair); + assert(return_val); + + /* Serialize the public key. Should always return 1 for a valid public key. */ + return_val = rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, serialized_pubkey, &pubkey); + assert(return_val); + + /*** Signing ***/ + + /* Instead of signing (possibly very long) messages directly, we sign a + * 32-byte hash of the message in this example. + * + * We use rustsecp256k1_v0_8_0_tagged_sha256 to create this hash. This function expects + * a context-specific "tag", which restricts the context in which the signed + * messages should be considered valid. For example, if protocol A mandates + * to use the tag "my_fancy_protocol" and protocol B mandates to use the tag + * "my_boring_protocol", then signed messages from protocol A will never be + * valid in protocol B (and vice versa), even if keys are reused across + * protocols. This implements "domain separation", which is considered good + * practice. It avoids attacks in which users are tricked into signing a + * message that has intended consequences in the intended context (e.g., + * protocol A) but would have unintended consequences if it were valid in + * some other context (e.g., protocol B). */ + return_val = rustsecp256k1_v0_8_0_tagged_sha256(ctx, msg_hash, tag, sizeof(tag), msg, sizeof(msg)); + assert(return_val); + + /* Generate 32 bytes of randomness to use with BIP-340 schnorr signing. */ + if (!fill_random(auxiliary_rand, sizeof(auxiliary_rand))) { + printf("Failed to generate randomness\n"); + return 1; + } + + /* Generate a Schnorr signature. + * + * We use the rustsecp256k1_v0_8_0_schnorrsig_sign32 function that provides a simple + * interface for signing 32-byte messages (which in our case is a hash of + * the actual message). BIP-340 recommends passing 32 bytes of randomness + * to the signing function to improve security against side-channel attacks. + * Signing with a valid context, a 32-byte message, a verified keypair, and + * any 32 bytes of auxiliary random data should never fail. */ + return_val = rustsecp256k1_v0_8_0_schnorrsig_sign32(ctx, signature, msg_hash, &keypair, auxiliary_rand); + assert(return_val); + + /*** Verification ***/ + + /* Deserialize the public key. This will return 0 if the public key can't + * be parsed correctly */ + if (!rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, &pubkey, serialized_pubkey)) { + printf("Failed parsing the public key\n"); + return 1; + } + + /* Compute the tagged hash on the received messages using the same tag as the signer. */ + return_val = rustsecp256k1_v0_8_0_tagged_sha256(ctx, msg_hash, tag, sizeof(tag), msg, sizeof(msg)); + assert(return_val); + + /* Verify a signature. This will return 1 if it's valid and 0 if it's not. */ + is_signature_valid = rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, signature, msg_hash, 32, &pubkey); + + + printf("Is the signature valid? %s\n", is_signature_valid ? "true" : "false"); + printf("Secret Key: "); + print_hex(seckey, sizeof(seckey)); + printf("Public Key: "); + print_hex(serialized_pubkey, sizeof(serialized_pubkey)); + printf("Signature: "); + print_hex(signature, sizeof(signature)); + + /* This will clear everything from the context and free the memory */ + rustsecp256k1_v0_8_0_context_destroy(ctx); + + /* It's best practice to try to clear secrets from memory after using them. + * This is done because some bugs can allow an attacker to leak memory, for + * example through "out of bounds" array access (see Heartbleed), Or the OS + * swapping them to disk. Hence, we overwrite the secret key buffer with zeros. + * + * TODO: Prevent these writes from being optimized out, as any good compiler + * will remove any writes that aren't used. */ + memset(seckey, 0, sizeof(seckey)); + + return 0; +} diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1.h b/secp256k1-sys/depend/secp256k1/include/secp256k1.h index a17d9fe3f..eb9307bcc 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1.h @@ -7,7 +7,7 @@ extern "C" { #include -/* Unless explicitly stated all pointer arguments must not be NULL. +/** Unless explicitly stated all pointer arguments must not be NULL. * * The following rules specify the order of arguments in API calls: * @@ -24,28 +24,32 @@ extern "C" { * 5. Opaque data pointers follow the function pointer they are to be passed to. */ -/** Opaque data structure that holds context information (precomputed tables etc.). +/** Opaque data structure that holds context information * - * The purpose of context structures is to cache large precomputed data tables - * that are expensive to construct, and also to maintain the randomization data - * for blinding. + * The primary purpose of context objects is to store randomization data for + * enhanced protection against side-channel leakage. This protection is only + * effective if the context is randomized after its creation. See + * rustsecp256k1_v0_8_0_context_create for creation of contexts and + * rustsecp256k1_v0_8_0_context_randomize for randomization. * - * Do not create a new context object for each operation, as construction is - * far slower than all other API calls (~100 times slower than an ECDSA - * verification). + * A secondary purpose of context objects is to store pointers to callback + * functions that the library will call when certain error states arise. See + * rustsecp256k1_v0_8_0_context_set_error_callback as well as + * rustsecp256k1_v0_8_0_context_set_illegal_callback for details. Future library versions + * may use context objects for additional purposes. * * A constructed context can safely be used from multiple threads * simultaneously, but API calls that take a non-const pointer to a context * need exclusive access to it. In particular this is the case for - * rustsecp256k1_v0_7_0_context_destroy, rustsecp256k1_v0_7_0_context_preallocated_destroy, - * and rustsecp256k1_v0_7_0_context_randomize. + * rustsecp256k1_v0_8_0_context_destroy, rustsecp256k1_v0_8_0_context_preallocated_destroy, + * and rustsecp256k1_v0_8_0_context_randomize. * * Regarding randomization, either do it once at creation time (in which case * you do not need any locking for the other calls), or use a read-write lock. */ -typedef struct rustsecp256k1_v0_7_0_context_struct rustsecp256k1_v0_7_0_context; +typedef struct rustsecp256k1_v0_8_0_context_struct rustsecp256k1_v0_8_0_context; -/** Opaque data structure that holds rewriteable "scratch space" +/** Opaque data structure that holds rewritable "scratch space" * * The purpose of this structure is to replace dynamic memory allocations, * because we target architectures where this may not be available. It is @@ -56,7 +60,7 @@ typedef struct rustsecp256k1_v0_7_0_context_struct rustsecp256k1_v0_7_0_context; * Unlike the context object, this cannot safely be shared between threads * without additional synchronization logic. */ -typedef struct rustsecp256k1_v0_7_0_scratch_space_struct rustsecp256k1_v0_7_0_scratch_space; +typedef struct rustsecp256k1_v0_8_0_scratch_space_struct rustsecp256k1_v0_8_0_scratch_space; /** Opaque data structure that holds a parsed and valid public key. * @@ -64,12 +68,12 @@ typedef struct rustsecp256k1_v0_7_0_scratch_space_struct rustsecp256k1_v0_7_0_sc * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. * If you need to convert to a format suitable for storage or transmission, - * use rustsecp256k1_v0_7_0_ec_pubkey_serialize and rustsecp256k1_v0_7_0_ec_pubkey_parse. To - * compare keys, use rustsecp256k1_v0_7_0_ec_pubkey_cmp. + * use rustsecp256k1_v0_8_0_ec_pubkey_serialize and rustsecp256k1_v0_8_0_ec_pubkey_parse. To + * compare keys, use rustsecp256k1_v0_8_0_ec_pubkey_cmp. */ typedef struct { unsigned char data[64]; -} rustsecp256k1_v0_7_0_pubkey; +} rustsecp256k1_v0_8_0_pubkey; /** Opaque data structured that holds a parsed ECDSA signature. * @@ -77,12 +81,12 @@ typedef struct { * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. * If you need to convert to a format suitable for storage, transmission, or - * comparison, use the rustsecp256k1_v0_7_0_ecdsa_signature_serialize_* and - * rustsecp256k1_v0_7_0_ecdsa_signature_parse_* functions. + * comparison, use the rustsecp256k1_v0_8_0_ecdsa_signature_serialize_* and + * rustsecp256k1_v0_8_0_ecdsa_signature_parse_* functions. */ typedef struct { unsigned char data[64]; -} rustsecp256k1_v0_7_0_ecdsa_signature; +} rustsecp256k1_v0_8_0_ecdsa_signature; /** A pointer to a function to deterministically generate a nonce. * @@ -100,7 +104,7 @@ typedef struct { * Except for test cases, this function should compute some cryptographic hash of * the message, the algorithm, the key and the attempt. */ -typedef int (*rustsecp256k1_v0_7_0_nonce_function)( +typedef int (*rustsecp256k1_v0_8_0_nonce_function)( unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, @@ -130,7 +134,7 @@ typedef int (*rustsecp256k1_v0_7_0_nonce_function)( # define SECP256K1_INLINE inline # endif -/** When this header is used at build-time the SECP256K1_BUILD define needs to be set +/* When this header is used at build-time the SECP256K1_BUILD define needs to be set * to correctly setup export attributes and nullness checks. This is normally done * by secp256k1.c but to guard against this header being included before secp256k1.c * has had a chance to set the define (e.g. via test harnesses that just includes @@ -141,9 +145,13 @@ typedef int (*rustsecp256k1_v0_7_0_nonce_function)( # define SECP256K1_NO_BUILD #endif +/** At secp256k1 build-time DLL_EXPORT is defined when building objects destined + * for a shared library, but not for those intended for static libraries. + */ + #ifndef SECP256K1_API # if defined(_WIN32) -# ifdef SECP256K1_BUILD +# if defined(SECP256K1_BUILD) && defined(DLL_EXPORT) # define SECP256K1_API __declspec(dllexport) # else # define SECP256K1_API @@ -155,9 +163,9 @@ typedef int (*rustsecp256k1_v0_7_0_nonce_function)( # endif #endif -/**Warning attributes - * NONNULL is not used if SECP256K1_BUILD is set to avoid the compiler optimizing out - * some paranoid null checks. */ +/* Warning attributes + * NONNULL is not used if SECP256K1_BUILD is set to avoid the compiler optimizing out + * some paranoid null checks. */ # if defined(__GNUC__) && SECP256K1_GNUC_PREREQ(3, 4) # define SECP256K1_WARN_UNUSED_RESULT __attribute__ ((__warn_unused_result__)) # else @@ -169,24 +177,39 @@ typedef int (*rustsecp256k1_v0_7_0_nonce_function)( # define SECP256K1_ARG_NONNULL(_x) # endif -/** All flags' lower 8 bits indicate what they're for. Do not use directly. */ +/* Attribute for marking functions, types, and variables as deprecated */ +#if !defined(SECP256K1_BUILD) && defined(__has_attribute) +# if __has_attribute(__deprecated__) +# define SECP256K1_DEPRECATED(_msg) __attribute__ ((__deprecated__(_msg))) +# else +# define SECP256K1_DEPRECATED(_msg) +# endif +#else +# define SECP256K1_DEPRECATED(_msg) +#endif + +/* All flags' lower 8 bits indicate what they're for. Do not use directly. */ #define SECP256K1_FLAGS_TYPE_MASK ((1 << 8) - 1) #define SECP256K1_FLAGS_TYPE_CONTEXT (1 << 0) #define SECP256K1_FLAGS_TYPE_COMPRESSION (1 << 1) -/** The higher bits contain the actual data. Do not use directly. */ +/* The higher bits contain the actual data. Do not use directly. */ #define SECP256K1_FLAGS_BIT_CONTEXT_VERIFY (1 << 8) #define SECP256K1_FLAGS_BIT_CONTEXT_SIGN (1 << 9) #define SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY (1 << 10) #define SECP256K1_FLAGS_BIT_COMPRESSION (1 << 8) -/** Flags to pass to rustsecp256k1_v0_7_0_context_create, rustsecp256k1_v0_7_0_context_preallocated_size, and - * rustsecp256k1_v0_7_0_context_preallocated_create. */ +/** Context flags to pass to rustsecp256k1_v0_8_0_context_create, rustsecp256k1_v0_8_0_context_preallocated_size, and + * rustsecp256k1_v0_8_0_context_preallocated_create. */ +#define SECP256K1_CONTEXT_NONE (SECP256K1_FLAGS_TYPE_CONTEXT) + +/** Deprecated context flags. These flags are treated equivalent to SECP256K1_CONTEXT_NONE. */ #define SECP256K1_CONTEXT_VERIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) #define SECP256K1_CONTEXT_SIGN (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_SIGN) + +/* Testing flag. Do not use. */ #define SECP256K1_CONTEXT_DECLASSIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY) -#define SECP256K1_CONTEXT_NONE (SECP256K1_FLAGS_TYPE_CONTEXT) -/** Flag to pass to rustsecp256k1_v0_7_0_ec_pubkey_serialize. */ +/** Flag to pass to rustsecp256k1_v0_8_0_ec_pubkey_serialize. */ #define SECP256K1_EC_COMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION | SECP256K1_FLAGS_BIT_COMPRESSION) #define SECP256K1_EC_UNCOMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION) @@ -197,30 +220,73 @@ typedef int (*rustsecp256k1_v0_7_0_nonce_function)( #define SECP256K1_TAG_PUBKEY_HYBRID_EVEN 0x06 #define SECP256K1_TAG_PUBKEY_HYBRID_ODD 0x07 -/** A simple secp256k1 context object with no precomputed tables. These are useful for - * type serialization/parsing functions which require a context object to maintain - * API consistency, but currently do not require expensive precomputations or dynamic - * allocations. +/** A built-in constant secp256k1 context object with static storage duration, to be + * used in conjunction with rustsecp256k1_v0_8_0_selftest. + * + * This context object offers *only limited functionality* , i.e., it cannot be used + * for API functions that perform computations involving secret keys, e.g., signing + * and public key generation. If this restriction applies to a specific API function, + * it is mentioned in its documentation. See rustsecp256k1_v0_8_0_context_create if you need a + * full context object that supports all functionality offered by the library. + * + * It is highly recommended to call rustsecp256k1_v0_8_0_selftest before using this context. */ -SECP256K1_API extern const rustsecp256k1_v0_7_0_context *rustsecp256k1_v0_7_0_context_no_precomp; +SECP256K1_API extern const rustsecp256k1_v0_8_0_context *rustsecp256k1_v0_8_0_context_static; + +/** Deprecated alias for rustsecp256k1_v0_8_0_context_static. */ +SECP256K1_API extern const rustsecp256k1_v0_8_0_context *rustsecp256k1_v0_8_0_context_no_precomp +SECP256K1_DEPRECATED("Use rustsecp256k1_v0_8_0_context_static instead"); + +/** Perform basic self tests (to be used in conjunction with rustsecp256k1_v0_8_0_context_static) + * + * This function performs self tests that detect some serious usage errors and + * similar conditions, e.g., when the library is compiled for the wrong endianness. + * This is a last resort measure to be used in production. The performed tests are + * very rudimentary and are not intended as a replacement for running the test + * binaries. + * + * It is highly recommended to call this before using rustsecp256k1_v0_8_0_context_static. + * It is not necessary to call this function before using a context created with + * rustsecp256k1_v0_8_0_context_create (or rustsecp256k1_v0_8_0_context_preallocated_create), which will + * take care of performing the self tests. + * + * If the tests fail, this function will call the default error handler to abort the + * program (see rustsecp256k1_v0_8_0_context_set_error_callback). + */ +SECP256K1_API void rustsecp256k1_v0_8_0_selftest(void); + /** Create a secp256k1 context object (in dynamically allocated memory). * * This function uses malloc to allocate memory. It is guaranteed that malloc is * called at most once for every call of this function. If you need to avoid dynamic - * memory allocation entirely, see the functions in rustsecp256k1_v0_7_0_preallocated.h. + * memory allocation entirely, see rustsecp256k1_v0_8_0_context_static and the functions in + * rustsecp256k1_v0_8_0_preallocated.h. * * Returns: a newly created context object. - * In: flags: which parts of the context to initialize. + * In: flags: Always set to SECP256K1_CONTEXT_NONE (see below). + * + * The only valid non-deprecated flag in recent library versions is + * SECP256K1_CONTEXT_NONE, which will create a context sufficient for all functionality + * offered by the library. All other (deprecated) flags will be treated as equivalent + * to the SECP256K1_CONTEXT_NONE flag. Though the flags parameter primarily exists for + * historical reasons, future versions of the library may introduce new flags. + * + * If the context is intended to be used for API functions that perform computations + * involving secret keys, e.g., signing and public key generation, then it is highly + * recommended to call rustsecp256k1_v0_8_0_context_randomize on the context before calling + * those API functions. This will provide enhanced protection against side-channel + * leakage, see rustsecp256k1_v0_8_0_context_randomize for details. * - * See also rustsecp256k1_v0_7_0_context_randomize. + * Do not create a new context object for each operation, as construction and + * randomization can take non-negligible time. */ /** Copy a secp256k1 context object (into dynamically allocated memory). * * This function uses malloc to allocate memory. It is guaranteed that malloc is * called at most once for every call of this function. If you need to avoid dynamic - * memory allocation entirely, see the functions in rustsecp256k1_v0_7_0_preallocated.h. + * memory allocation entirely, see the functions in rustsecp256k1_v0_8_0_preallocated.h. * * Returns: a newly created context object. * Args: ctx: an existing context to copy @@ -230,14 +296,14 @@ SECP256K1_API extern const rustsecp256k1_v0_7_0_context *rustsecp256k1_v0_7_0_co * * The context pointer may not be used afterwards. * - * The context to destroy must have been created using rustsecp256k1_v0_7_0_context_create - * or rustsecp256k1_v0_7_0_context_clone. If the context has instead been created using - * rustsecp256k1_v0_7_0_context_preallocated_create or rustsecp256k1_v0_7_0_context_preallocated_clone, the - * behaviour is undefined. In that case, rustsecp256k1_v0_7_0_context_preallocated_destroy must + * The context to destroy must have been created using rustsecp256k1_v0_8_0_context_create + * or rustsecp256k1_v0_8_0_context_clone. If the context has instead been created using + * rustsecp256k1_v0_8_0_context_preallocated_create or rustsecp256k1_v0_8_0_context_preallocated_clone, the + * behaviour is undefined. In that case, rustsecp256k1_v0_8_0_context_preallocated_destroy must * be used instead. * * Args: ctx: an existing context to destroy, constructed using - * rustsecp256k1_v0_7_0_context_create or rustsecp256k1_v0_7_0_context_clone + * rustsecp256k1_v0_8_0_context_create or rustsecp256k1_v0_8_0_context_clone */ /** Set a callback function to be called when an illegal argument is passed to @@ -261,11 +327,11 @@ SECP256K1_API extern const rustsecp256k1_v0_7_0_context *rustsecp256k1_v0_7_0_co * USE_EXTERNAL_DEFAULT_CALLBACKS is defined, which is the case if the build * has been configured with --enable-external-default-callbacks. Then the * following two symbols must be provided to link against: - * - void rustsecp256k1_v0_7_0_default_illegal_callback_fn(const char* message, void* data); - * - void rustsecp256k1_v0_7_0_default_error_callback_fn(const char* message, void* data); + * - void rustsecp256k1_v0_8_0_default_illegal_callback_fn(const char* message, void* data); + * - void rustsecp256k1_v0_8_0_default_error_callback_fn(const char* message, void* data); * The library can call these default handlers even before a proper callback data - * pointer could have been set using rustsecp256k1_v0_7_0_context_set_illegal_callback or - * rustsecp256k1_v0_7_0_context_set_error_callback, e.g., when the creation of a context + * pointer could have been set using rustsecp256k1_v0_8_0_context_set_illegal_callback or + * rustsecp256k1_v0_8_0_context_set_error_callback, e.g., when the creation of a context * fails. In this case, the corresponding default handler will be called with * the data pointer argument set to NULL. * @@ -275,35 +341,38 @@ SECP256K1_API extern const rustsecp256k1_v0_7_0_context *rustsecp256k1_v0_7_0_co * (NULL restores the default handler.) * data: the opaque pointer to pass to fun above, must be NULL for the default handler. * - * See also rustsecp256k1_v0_7_0_context_set_error_callback. + * See also rustsecp256k1_v0_8_0_context_set_error_callback. */ -SECP256K1_API void rustsecp256k1_v0_7_0_context_set_illegal_callback( - rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API void rustsecp256k1_v0_8_0_context_set_illegal_callback( + rustsecp256k1_v0_8_0_context* ctx, void (*fun)(const char* message, void* data), const void* data ) SECP256K1_ARG_NONNULL(1); /** Set a callback function to be called when an internal consistency check - * fails. The default is crashing. + * fails. + * + * The default callback writes an error message to stderr and calls abort + * to abort the program. * * This can only trigger in case of a hardware failure, miscompilation, * memory corruption, serious bug in the library, or other error would can * otherwise result in undefined behaviour. It will not trigger due to mere - * incorrect usage of the API (see rustsecp256k1_v0_7_0_context_set_illegal_callback + * incorrect usage of the API (see rustsecp256k1_v0_8_0_context_set_illegal_callback * for that). After this callback returns, anything may happen, including * crashing. * * Args: ctx: an existing context object. * In: fun: a pointer to a function to call when an internal error occurs, * taking a message and an opaque pointer (NULL restores the - * default handler, see rustsecp256k1_v0_7_0_context_set_illegal_callback + * default handler, see rustsecp256k1_v0_8_0_context_set_illegal_callback * for details). * data: the opaque pointer to pass to fun above, must be NULL for the default handler. * - * See also rustsecp256k1_v0_7_0_context_set_illegal_callback. + * See also rustsecp256k1_v0_8_0_context_set_illegal_callback. */ -SECP256K1_API void rustsecp256k1_v0_7_0_context_set_error_callback( - rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API void rustsecp256k1_v0_8_0_context_set_error_callback( + rustsecp256k1_v0_8_0_context* ctx, void (*fun)(const char* message, void* data), const void* data ) SECP256K1_ARG_NONNULL(1); @@ -337,9 +406,9 @@ SECP256K1_API void rustsecp256k1_v0_7_0_context_set_error_callback( * 0x03), uncompressed (65 bytes, header byte 0x04), or hybrid (65 bytes, header * byte 0x06 or 0x07) format public keys. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_parse( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_pubkey* pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_pubkey_parse( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_pubkey* pubkey, const unsigned char *input, size_t inputlen ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -354,16 +423,16 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_pa * In/Out: outputlen: a pointer to an integer which is initially set to the * size of output, and is overwritten with the written * size. - * In: pubkey: a pointer to a rustsecp256k1_v0_7_0_pubkey containing an + * In: pubkey: a pointer to a rustsecp256k1_v0_8_0_pubkey containing an * initialized public key. * flags: SECP256K1_EC_COMPRESSED if serialization should be in * compressed format, otherwise SECP256K1_EC_UNCOMPRESSED. */ -SECP256K1_API int rustsecp256k1_v0_7_0_ec_pubkey_serialize( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_8_0_ec_pubkey_serialize( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output, size_t *outputlen, - const rustsecp256k1_v0_7_0_pubkey* pubkey, + const rustsecp256k1_v0_8_0_pubkey* pubkey, unsigned int flags ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); @@ -376,10 +445,10 @@ SECP256K1_API int rustsecp256k1_v0_7_0_ec_pubkey_serialize( * In: pubkey1: first public key to compare * pubkey2: second public key to compare */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_cmp( - const rustsecp256k1_v0_7_0_context* ctx, - const rustsecp256k1_v0_7_0_pubkey* pubkey1, - const rustsecp256k1_v0_7_0_pubkey* pubkey2 +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_pubkey_cmp( + const rustsecp256k1_v0_8_0_context* ctx, + const rustsecp256k1_v0_8_0_pubkey* pubkey1, + const rustsecp256k1_v0_8_0_pubkey* pubkey2 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Parse an ECDSA signature in compact (64 bytes) format. @@ -394,12 +463,12 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_cm * encoding is invalid. R and S with value 0 are allowed in the encoding. * * After the call, sig will always be initialized. If parsing failed or R or - * S are zero, the resulting sig value is guaranteed to fail validation for any - * message and public key. + * S are zero, the resulting sig value is guaranteed to fail verification for + * any message and public key. */ -SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_ecdsa_signature* sig, +SECP256K1_API int rustsecp256k1_v0_8_0_ecdsa_signature_parse_compact( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_ecdsa_signature* sig, const unsigned char *input64 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -415,12 +484,12 @@ SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact( * encoded numbers are out of range. * * After the call, sig will always be initialized. If parsing failed or the - * encoded numbers are out of range, signature validation with it is + * encoded numbers are out of range, signature verification with it is * guaranteed to fail for every message and public key. */ -SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_parse_der( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_ecdsa_signature* sig, +SECP256K1_API int rustsecp256k1_v0_8_0_ecdsa_signature_parse_der( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -436,11 +505,11 @@ SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_parse_der( * if 0 was returned). * In: sig: a pointer to an initialized signature object */ -SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_8_0_ecdsa_signature_serialize_der( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output, size_t *outputlen, - const rustsecp256k1_v0_7_0_ecdsa_signature* sig + const rustsecp256k1_v0_8_0_ecdsa_signature* sig ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Serialize an ECDSA signature in compact (64 byte) format. @@ -450,19 +519,19 @@ SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der( * Out: output64: a pointer to a 64-byte array to store the compact serialization * In: sig: a pointer to an initialized signature object * - * See rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact for details about the encoding. + * See rustsecp256k1_v0_8_0_ecdsa_signature_parse_compact for details about the encoding. */ -SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_serialize_compact( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_8_0_ecdsa_signature_serialize_compact( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output64, - const rustsecp256k1_v0_7_0_ecdsa_signature* sig + const rustsecp256k1_v0_8_0_ecdsa_signature* sig ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Verify an ECDSA signature. * * Returns: 1: correct signature * 0: incorrect or unparseable signature - * Args: ctx: a secp256k1 context object, initialized for verification. + * Args: ctx: a secp256k1 context object. * In: sig: the signature being verified. * msghash32: the 32-byte message hash being verified. * The verifier must make sure to apply a cryptographic @@ -478,16 +547,16 @@ SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_serialize_compact( * form are accepted. * * If you need to accept ECDSA signatures from sources that do not obey this - * rule, apply rustsecp256k1_v0_7_0_ecdsa_signature_normalize to the signature prior to - * validation, but be aware that doing so results in malleable signatures. + * rule, apply rustsecp256k1_v0_8_0_ecdsa_signature_normalize to the signature prior to + * verification, but be aware that doing so results in malleable signatures. * * For details, see the comments for that function. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ecdsa_verify( - const rustsecp256k1_v0_7_0_context* ctx, - const rustsecp256k1_v0_7_0_ecdsa_signature *sig, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ecdsa_verify( + const rustsecp256k1_v0_8_0_context* ctx, + const rustsecp256k1_v0_8_0_ecdsa_signature *sig, const unsigned char *msghash32, - const rustsecp256k1_v0_7_0_pubkey *pubkey + const rustsecp256k1_v0_8_0_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Convert a signature to a normalized lower-S form. @@ -526,50 +595,50 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ecdsa_verify * accept various non-unique encodings, so care should be taken when this * property is required for an application. * - * The rustsecp256k1_v0_7_0_ecdsa_sign function will by default create signatures in the - * lower-S form, and rustsecp256k1_v0_7_0_ecdsa_verify will not accept others. In case + * The rustsecp256k1_v0_8_0_ecdsa_sign function will by default create signatures in the + * lower-S form, and rustsecp256k1_v0_8_0_ecdsa_verify will not accept others. In case * signatures come from a system that cannot enforce this property, - * rustsecp256k1_v0_7_0_ecdsa_signature_normalize must be called before verification. + * rustsecp256k1_v0_8_0_ecdsa_signature_normalize must be called before verification. */ -SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_normalize( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_ecdsa_signature *sigout, - const rustsecp256k1_v0_7_0_ecdsa_signature *sigin +SECP256K1_API int rustsecp256k1_v0_8_0_ecdsa_signature_normalize( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_ecdsa_signature *sigout, + const rustsecp256k1_v0_8_0_ecdsa_signature *sigin ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(3); /** An implementation of RFC6979 (using HMAC-SHA256) as nonce generation function. * If a data pointer is passed, it is assumed to be a pointer to 32 bytes of * extra entropy. */ -SECP256K1_API extern const rustsecp256k1_v0_7_0_nonce_function rustsecp256k1_v0_7_0_nonce_function_rfc6979; +SECP256K1_API extern const rustsecp256k1_v0_8_0_nonce_function rustsecp256k1_v0_8_0_nonce_function_rfc6979; -/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_7_0_nonce_function_rfc6979). */ -SECP256K1_API extern const rustsecp256k1_v0_7_0_nonce_function rustsecp256k1_v0_7_0_nonce_function_default; +/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_8_0_nonce_function_rfc6979). */ +SECP256K1_API extern const rustsecp256k1_v0_8_0_nonce_function rustsecp256k1_v0_8_0_nonce_function_default; /** Create an ECDSA signature. * * Returns: 1: signature created * 0: the nonce generation function failed, or the secret key was invalid. - * Args: ctx: pointer to a context object, initialized for signing. + * Args: ctx: pointer to a context object (not rustsecp256k1_v0_8_0_context_static). * Out: sig: pointer to an array where the signature will be placed. * In: msghash32: the 32-byte message hash being signed. * seckey: pointer to a 32-byte secret key. * noncefp: pointer to a nonce generation function. If NULL, - * rustsecp256k1_v0_7_0_nonce_function_default is used. + * rustsecp256k1_v0_8_0_nonce_function_default is used. * ndata: pointer to arbitrary data used by the nonce generation function * (can be NULL). If it is non-NULL and - * rustsecp256k1_v0_7_0_nonce_function_default is used, then ndata must be a + * rustsecp256k1_v0_8_0_nonce_function_default is used, then ndata must be a * pointer to 32-bytes of additional data. * * The created signature is always in lower-S form. See - * rustsecp256k1_v0_7_0_ecdsa_signature_normalize for more details. + * rustsecp256k1_v0_8_0_ecdsa_signature_normalize for more details. */ -SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_sign( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_ecdsa_signature *sig, +SECP256K1_API int rustsecp256k1_v0_8_0_ecdsa_sign( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_ecdsa_signature *sig, const unsigned char *msghash32, const unsigned char *seckey, - rustsecp256k1_v0_7_0_nonce_function noncefp, + rustsecp256k1_v0_8_0_nonce_function noncefp, const void *ndata ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); @@ -585,8 +654,8 @@ SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_sign( * Args: ctx: pointer to a context object. * In: seckey: pointer to a 32-byte secret key. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_seckey_verify( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_seckey_verify( + const rustsecp256k1_v0_8_0_context* ctx, const unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); @@ -594,37 +663,38 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_seckey_ve * * Returns: 1: secret was valid, public key stores. * 0: secret was invalid, try again. - * Args: ctx: pointer to a context object, initialized for signing. + * Args: ctx: pointer to a context object (not rustsecp256k1_v0_8_0_context_static). * Out: pubkey: pointer to the created public key. * In: seckey: pointer to a 32-byte secret key. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_create( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_pubkey_create( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_pubkey *pubkey, const unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Negates a secret key in place. * * Returns: 0 if the given secret key is invalid according to - * rustsecp256k1_v0_7_0_ec_seckey_verify. 1 otherwise + * rustsecp256k1_v0_8_0_ec_seckey_verify. 1 otherwise * Args: ctx: pointer to a context object * In/Out: seckey: pointer to the 32-byte secret key to be negated. If the * secret key is invalid according to - * rustsecp256k1_v0_7_0_ec_seckey_verify, this function returns 0 and + * rustsecp256k1_v0_8_0_ec_seckey_verify, this function returns 0 and * seckey will be set to some unspecified value. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_seckey_negate( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_seckey_negate( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); -/** Same as rustsecp256k1_v0_7_0_ec_seckey_negate, but DEPRECATED. Will be removed in +/** Same as rustsecp256k1_v0_8_0_ec_seckey_negate, but DEPRECATED. Will be removed in * future versions. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_privkey_negate( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_privkey_negate( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) + SECP256K1_DEPRECATED("Use rustsecp256k1_v0_8_0_ec_seckey_negate instead"); /** Negates a public key in place. * @@ -632,9 +702,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_privkey_n * Args: ctx: pointer to a context object * In/Out: pubkey: pointer to the public key to be negated. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_negate( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_pubkey *pubkey +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_pubkey_negate( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); /** Tweak a secret key by adding tweak to it. @@ -644,44 +714,45 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_ne * otherwise. * Args: ctx: pointer to a context object. * In/Out: seckey: pointer to a 32-byte secret key. If the secret key is - * invalid according to rustsecp256k1_v0_7_0_ec_seckey_verify, this + * invalid according to rustsecp256k1_v0_8_0_ec_seckey_verify, this * function returns 0. seckey will be set to some unspecified * value if this function returns 0. * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_7_0_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_8_0_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_seckey_tweak_add( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_seckey_tweak_add( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Same as rustsecp256k1_v0_7_0_ec_seckey_tweak_add, but DEPRECATED. Will be removed in +/** Same as rustsecp256k1_v0_8_0_ec_seckey_tweak_add, but DEPRECATED. Will be removed in * future versions. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_privkey_tweak_add( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_privkey_tweak_add( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32 -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) + SECP256K1_DEPRECATED("Use rustsecp256k1_v0_8_0_ec_seckey_tweak_add instead"); /** Tweak a public key by adding tweak times the generator to it. * * Returns: 0 if the arguments are invalid or the resulting public key would be * invalid (only when the tweak is the negation of the corresponding * secret key). 1 otherwise. - * Args: ctx: pointer to a context object initialized for validation. + * Args: ctx: pointer to a context object. * In/Out: pubkey: pointer to a public key object. pubkey will be set to an * invalid value if this function returns 0. * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_7_0_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_8_0_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_tweak_add( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_pubkey_tweak_add( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_pubkey *pubkey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -690,72 +761,84 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_tw * Returns: 0 if the arguments are invalid. 1 otherwise. * Args: ctx: pointer to a context object. * In/Out: seckey: pointer to a 32-byte secret key. If the secret key is - * invalid according to rustsecp256k1_v0_7_0_ec_seckey_verify, this + * invalid according to rustsecp256k1_v0_8_0_ec_seckey_verify, this * function returns 0. seckey will be set to some unspecified * value if this function returns 0. * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_7_0_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_8_0_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_seckey_tweak_mul( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_seckey_tweak_mul( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Same as rustsecp256k1_v0_7_0_ec_seckey_tweak_mul, but DEPRECATED. Will be removed in +/** Same as rustsecp256k1_v0_8_0_ec_seckey_tweak_mul, but DEPRECATED. Will be removed in * future versions. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_privkey_tweak_mul( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_privkey_tweak_mul( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32 -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) + SECP256K1_DEPRECATED("Use rustsecp256k1_v0_8_0_ec_seckey_tweak_mul instead"); /** Tweak a public key by multiplying it by a tweak value. * * Returns: 0 if the arguments are invalid. 1 otherwise. - * Args: ctx: pointer to a context object initialized for validation. + * Args: ctx: pointer to a context object. * In/Out: pubkey: pointer to a public key object. pubkey will be set to an * invalid value if this function returns 0. * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_7_0_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_8_0_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_tweak_mul( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_pubkey_tweak_mul( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_pubkey *pubkey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Updates the context randomization to protect against side-channel leakage. - * Returns: 1: randomization successfully updated or nothing to randomize +/** Randomizes the context to provide enhanced protection against side-channel leakage. + * + * Returns: 1: randomization successful (or called on copy of rustsecp256k1_v0_8_0_context_static) * 0: error * Args: ctx: pointer to a context object. * In: seed32: pointer to a 32-byte random seed (NULL resets to initial state) * - * While secp256k1 code is written to be constant-time no matter what secret - * values are, it's possible that a future compiler may output code which isn't, + * While secp256k1 code is written and tested to be constant-time no matter what + * secret values are, it is possible that a compiler may output code which is not, * and also that the CPU may not emit the same radio frequencies or draw the same - * amount power for all values. - * - * This function provides a seed which is combined into the blinding value: that - * blinding value is added before each multiplication (and removed afterwards) so - * that it does not affect function results, but shields against attacks which - * rely on any input-dependent behaviour. - * - * This function has currently an effect only on contexts initialized for signing - * because randomization is currently used only for signing. However, this is not - * guaranteed and may change in the future. It is safe to call this function on - * contexts not initialized for signing; then it will have no effect and return 1. - * - * You should call this after rustsecp256k1_v0_7_0_context_create or - * rustsecp256k1_v0_7_0_context_clone (and rustsecp256k1_v0_7_0_context_preallocated_create or - * rustsecp256k1_v0_7_0_context_clone, resp.), and you may call this repeatedly afterwards. - */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_context_randomize( - rustsecp256k1_v0_7_0_context* ctx, + * amount of power for all values. Randomization of the context shields against + * side-channel observations which aim to exploit secret-dependent behaviour in + * certain computations which involve secret keys. + * + * It is highly recommended to call this function on contexts returned from + * rustsecp256k1_v0_8_0_context_create or rustsecp256k1_v0_8_0_context_clone (or from the corresponding + * functions in rustsecp256k1_v0_8_0_preallocated.h) before using these contexts to call API + * functions that perform computations involving secret keys, e.g., signing and + * public key generation. It is possible to call this function more than once on + * the same context, and doing so before every few computations involving secret + * keys is recommended as a defense-in-depth measure. + * + * Currently, the random seed is mainly used for blinding multiplications of a + * secret scalar with the elliptic curve base point. Multiplications of this + * kind are performed by exactly those API functions which are documented to + * require a context that is not the rustsecp256k1_v0_8_0_context_static. As a rule of thumb, + * these are all functions which take a secret key (or a keypair) as an input. + * A notable exception to that rule is the ECDH module, which relies on a different + * kind of elliptic curve point multiplication and thus does not benefit from + * enhanced protection against side-channel leakage currently. + * + * It is safe call this function on a copy of rustsecp256k1_v0_8_0_context_static in writable + * memory (e.g., obtained via rustsecp256k1_v0_8_0_context_clone). In that case, this + * function is guaranteed to return 1, but the call will have no effect because + * the static context (or a copy thereof) is not meant to be randomized. + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_context_randomize( + rustsecp256k1_v0_8_0_context* ctx, const unsigned char *seed32 ) SECP256K1_ARG_NONNULL(1); @@ -768,10 +851,10 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_context_rand * In: ins: pointer to array of pointers to public keys. * n: the number of public keys to add together (must be at least 1). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_combine( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_pubkey *out, - const rustsecp256k1_v0_7_0_pubkey * const * ins, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_pubkey_combine( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_pubkey *out, + const rustsecp256k1_v0_8_0_pubkey * const * ins, size_t n ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -783,7 +866,7 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_co * implementations optimized for a specific tag can precompute the SHA256 state * after hashing the tag hashes. * - * Returns 0 if the arguments are invalid and 1 otherwise. + * Returns: 1 always. * Args: ctx: pointer to a context object * Out: hash32: pointer to a 32-byte array to store the resulting hash * In: tag: pointer to an array containing the tag @@ -791,8 +874,8 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_co * msg: pointer to an array containing the message * msglen: length of the message array */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_tagged_sha256( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_tagged_sha256( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *hash32, const unsigned char *tag, size_t taglen, diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1.h.orig b/secp256k1-sys/depend/secp256k1/include/secp256k1.h.orig index 9eec69328..f3c99d1b5 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1.h.orig +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1.h.orig @@ -7,7 +7,7 @@ extern "C" { #include -/* Unless explicitly stated all pointer arguments must not be NULL. +/** Unless explicitly stated all pointer arguments must not be NULL. * * The following rules specify the order of arguments in API calls: * @@ -24,28 +24,32 @@ extern "C" { * 5. Opaque data pointers follow the function pointer they are to be passed to. */ -/** Opaque data structure that holds context information (precomputed tables etc.). +/** Opaque data structure that holds context information * - * The purpose of context structures is to cache large precomputed data tables - * that are expensive to construct, and also to maintain the randomization data - * for blinding. + * The primary purpose of context objects is to store randomization data for + * enhanced protection against side-channel leakage. This protection is only + * effective if the context is randomized after its creation. See + * rustsecp256k1_v0_8_0_context_create for creation of contexts and + * rustsecp256k1_v0_8_0_context_randomize for randomization. * - * Do not create a new context object for each operation, as construction is - * far slower than all other API calls (~100 times slower than an ECDSA - * verification). + * A secondary purpose of context objects is to store pointers to callback + * functions that the library will call when certain error states arise. See + * rustsecp256k1_v0_8_0_context_set_error_callback as well as + * rustsecp256k1_v0_8_0_context_set_illegal_callback for details. Future library versions + * may use context objects for additional purposes. * * A constructed context can safely be used from multiple threads * simultaneously, but API calls that take a non-const pointer to a context * need exclusive access to it. In particular this is the case for - * rustsecp256k1_v0_7_0_context_destroy, rustsecp256k1_v0_7_0_context_preallocated_destroy, - * and rustsecp256k1_v0_7_0_context_randomize. + * rustsecp256k1_v0_8_0_context_destroy, rustsecp256k1_v0_8_0_context_preallocated_destroy, + * and rustsecp256k1_v0_8_0_context_randomize. * * Regarding randomization, either do it once at creation time (in which case * you do not need any locking for the other calls), or use a read-write lock. */ -typedef struct rustsecp256k1_v0_7_0_context_struct rustsecp256k1_v0_7_0_context; +typedef struct rustsecp256k1_v0_8_0_context_struct rustsecp256k1_v0_8_0_context; -/** Opaque data structure that holds rewriteable "scratch space" +/** Opaque data structure that holds rewritable "scratch space" * * The purpose of this structure is to replace dynamic memory allocations, * because we target architectures where this may not be available. It is @@ -56,7 +60,7 @@ typedef struct rustsecp256k1_v0_7_0_context_struct rustsecp256k1_v0_7_0_context; * Unlike the context object, this cannot safely be shared between threads * without additional synchronization logic. */ -typedef struct rustsecp256k1_v0_7_0_scratch_space_struct rustsecp256k1_v0_7_0_scratch_space; +typedef struct rustsecp256k1_v0_8_0_scratch_space_struct rustsecp256k1_v0_8_0_scratch_space; /** Opaque data structure that holds a parsed and valid public key. * @@ -64,12 +68,12 @@ typedef struct rustsecp256k1_v0_7_0_scratch_space_struct rustsecp256k1_v0_7_0_sc * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. * If you need to convert to a format suitable for storage or transmission, - * use rustsecp256k1_v0_7_0_ec_pubkey_serialize and rustsecp256k1_v0_7_0_ec_pubkey_parse. To - * compare keys, use rustsecp256k1_v0_7_0_ec_pubkey_cmp. + * use rustsecp256k1_v0_8_0_ec_pubkey_serialize and rustsecp256k1_v0_8_0_ec_pubkey_parse. To + * compare keys, use rustsecp256k1_v0_8_0_ec_pubkey_cmp. */ typedef struct { unsigned char data[64]; -} rustsecp256k1_v0_7_0_pubkey; +} rustsecp256k1_v0_8_0_pubkey; /** Opaque data structured that holds a parsed ECDSA signature. * @@ -77,12 +81,12 @@ typedef struct { * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. * If you need to convert to a format suitable for storage, transmission, or - * comparison, use the rustsecp256k1_v0_7_0_ecdsa_signature_serialize_* and - * rustsecp256k1_v0_7_0_ecdsa_signature_parse_* functions. + * comparison, use the rustsecp256k1_v0_8_0_ecdsa_signature_serialize_* and + * rustsecp256k1_v0_8_0_ecdsa_signature_parse_* functions. */ typedef struct { unsigned char data[64]; -} rustsecp256k1_v0_7_0_ecdsa_signature; +} rustsecp256k1_v0_8_0_ecdsa_signature; /** A pointer to a function to deterministically generate a nonce. * @@ -100,7 +104,7 @@ typedef struct { * Except for test cases, this function should compute some cryptographic hash of * the message, the algorithm, the key and the attempt. */ -typedef int (*rustsecp256k1_v0_7_0_nonce_function)( +typedef int (*rustsecp256k1_v0_8_0_nonce_function)( unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, @@ -130,7 +134,7 @@ typedef int (*rustsecp256k1_v0_7_0_nonce_function)( # define SECP256K1_INLINE inline # endif -/** When this header is used at build-time the SECP256K1_BUILD define needs to be set +/* When this header is used at build-time the SECP256K1_BUILD define needs to be set * to correctly setup export attributes and nullness checks. This is normally done * by secp256k1.c but to guard against this header being included before secp256k1.c * has had a chance to set the define (e.g. via test harnesses that just includes @@ -141,9 +145,13 @@ typedef int (*rustsecp256k1_v0_7_0_nonce_function)( # define SECP256K1_NO_BUILD #endif +/** At secp256k1 build-time DLL_EXPORT is defined when building objects destined + * for a shared library, but not for those intended for static libraries. + */ + #ifndef SECP256K1_API # if defined(_WIN32) -# ifdef SECP256K1_BUILD +# if defined(SECP256K1_BUILD) && defined(DLL_EXPORT) # define SECP256K1_API __declspec(dllexport) # else # define SECP256K1_API @@ -155,9 +163,9 @@ typedef int (*rustsecp256k1_v0_7_0_nonce_function)( # endif #endif -/**Warning attributes - * NONNULL is not used if SECP256K1_BUILD is set to avoid the compiler optimizing out - * some paranoid null checks. */ +/* Warning attributes + * NONNULL is not used if SECP256K1_BUILD is set to avoid the compiler optimizing out + * some paranoid null checks. */ # if defined(__GNUC__) && SECP256K1_GNUC_PREREQ(3, 4) # define SECP256K1_WARN_UNUSED_RESULT __attribute__ ((__warn_unused_result__)) # else @@ -169,24 +177,39 @@ typedef int (*rustsecp256k1_v0_7_0_nonce_function)( # define SECP256K1_ARG_NONNULL(_x) # endif -/** All flags' lower 8 bits indicate what they're for. Do not use directly. */ +/* Attribute for marking functions, types, and variables as deprecated */ +#if !defined(SECP256K1_BUILD) && defined(__has_attribute) +# if __has_attribute(__deprecated__) +# define SECP256K1_DEPRECATED(_msg) __attribute__ ((__deprecated__(_msg))) +# else +# define SECP256K1_DEPRECATED(_msg) +# endif +#else +# define SECP256K1_DEPRECATED(_msg) +#endif + +/* All flags' lower 8 bits indicate what they're for. Do not use directly. */ #define SECP256K1_FLAGS_TYPE_MASK ((1 << 8) - 1) #define SECP256K1_FLAGS_TYPE_CONTEXT (1 << 0) #define SECP256K1_FLAGS_TYPE_COMPRESSION (1 << 1) -/** The higher bits contain the actual data. Do not use directly. */ +/* The higher bits contain the actual data. Do not use directly. */ #define SECP256K1_FLAGS_BIT_CONTEXT_VERIFY (1 << 8) #define SECP256K1_FLAGS_BIT_CONTEXT_SIGN (1 << 9) #define SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY (1 << 10) #define SECP256K1_FLAGS_BIT_COMPRESSION (1 << 8) -/** Flags to pass to rustsecp256k1_v0_7_0_context_create, rustsecp256k1_v0_7_0_context_preallocated_size, and - * rustsecp256k1_v0_7_0_context_preallocated_create. */ +/** Context flags to pass to rustsecp256k1_v0_8_0_context_create, rustsecp256k1_v0_8_0_context_preallocated_size, and + * rustsecp256k1_v0_8_0_context_preallocated_create. */ +#define SECP256K1_CONTEXT_NONE (SECP256K1_FLAGS_TYPE_CONTEXT) + +/** Deprecated context flags. These flags are treated equivalent to SECP256K1_CONTEXT_NONE. */ #define SECP256K1_CONTEXT_VERIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) #define SECP256K1_CONTEXT_SIGN (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_SIGN) + +/* Testing flag. Do not use. */ #define SECP256K1_CONTEXT_DECLASSIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY) -#define SECP256K1_CONTEXT_NONE (SECP256K1_FLAGS_TYPE_CONTEXT) -/** Flag to pass to rustsecp256k1_v0_7_0_ec_pubkey_serialize. */ +/** Flag to pass to rustsecp256k1_v0_8_0_ec_pubkey_serialize. */ #define SECP256K1_EC_COMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION | SECP256K1_FLAGS_BIT_COMPRESSION) #define SECP256K1_EC_UNCOMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION) @@ -197,25 +220,68 @@ typedef int (*rustsecp256k1_v0_7_0_nonce_function)( #define SECP256K1_TAG_PUBKEY_HYBRID_EVEN 0x06 #define SECP256K1_TAG_PUBKEY_HYBRID_ODD 0x07 -/** A simple secp256k1 context object with no precomputed tables. These are useful for - * type serialization/parsing functions which require a context object to maintain - * API consistency, but currently do not require expensive precomputations or dynamic - * allocations. +/** A built-in constant secp256k1 context object with static storage duration, to be + * used in conjunction with rustsecp256k1_v0_8_0_selftest. + * + * This context object offers *only limited functionality* , i.e., it cannot be used + * for API functions that perform computations involving secret keys, e.g., signing + * and public key generation. If this restriction applies to a specific API function, + * it is mentioned in its documentation. See rustsecp256k1_v0_8_0_context_create if you need a + * full context object that supports all functionality offered by the library. + * + * It is highly recommended to call rustsecp256k1_v0_8_0_selftest before using this context. */ -SECP256K1_API extern const rustsecp256k1_v0_7_0_context *rustsecp256k1_v0_7_0_context_no_precomp; +SECP256K1_API extern const rustsecp256k1_v0_8_0_context *rustsecp256k1_v0_8_0_context_static; + +/** Deprecated alias for rustsecp256k1_v0_8_0_context_static. */ +SECP256K1_API extern const rustsecp256k1_v0_8_0_context *rustsecp256k1_v0_8_0_context_no_precomp +SECP256K1_DEPRECATED("Use rustsecp256k1_v0_8_0_context_static instead"); + +/** Perform basic self tests (to be used in conjunction with rustsecp256k1_v0_8_0_context_static) + * + * This function performs self tests that detect some serious usage errors and + * similar conditions, e.g., when the library is compiled for the wrong endianness. + * This is a last resort measure to be used in production. The performed tests are + * very rudimentary and are not intended as a replacement for running the test + * binaries. + * + * It is highly recommended to call this before using rustsecp256k1_v0_8_0_context_static. + * It is not necessary to call this function before using a context created with + * rustsecp256k1_v0_8_0_context_create (or rustsecp256k1_v0_8_0_context_preallocated_create), which will + * take care of performing the self tests. + * + * If the tests fail, this function will call the default error handler to abort the + * program (see rustsecp256k1_v0_8_0_context_set_error_callback). + */ +SECP256K1_API void rustsecp256k1_v0_8_0_selftest(void); + /** Create a secp256k1 context object (in dynamically allocated memory). * * This function uses malloc to allocate memory. It is guaranteed that malloc is * called at most once for every call of this function. If you need to avoid dynamic - * memory allocation entirely, see the functions in rustsecp256k1_v0_7_0_preallocated.h. + * memory allocation entirely, see rustsecp256k1_v0_8_0_context_static and the functions in + * rustsecp256k1_v0_8_0_preallocated.h. * * Returns: a newly created context object. - * In: flags: which parts of the context to initialize. + * In: flags: Always set to SECP256K1_CONTEXT_NONE (see below). + * + * The only valid non-deprecated flag in recent library versions is + * SECP256K1_CONTEXT_NONE, which will create a context sufficient for all functionality + * offered by the library. All other (deprecated) flags will be treated as equivalent + * to the SECP256K1_CONTEXT_NONE flag. Though the flags parameter primarily exists for + * historical reasons, future versions of the library may introduce new flags. + * + * If the context is intended to be used for API functions that perform computations + * involving secret keys, e.g., signing and public key generation, then it is highly + * recommended to call rustsecp256k1_v0_8_0_context_randomize on the context before calling + * those API functions. This will provide enhanced protection against side-channel + * leakage, see rustsecp256k1_v0_8_0_context_randomize for details. * - * See also rustsecp256k1_v0_7_0_context_randomize. + * Do not create a new context object for each operation, as construction and + * randomization can take non-negligible time. */ -SECP256K1_API rustsecp256k1_v0_7_0_context* rustsecp256k1_v0_7_0_context_create( +SECP256K1_API rustsecp256k1_v0_8_0_context* rustsecp256k1_v0_8_0_context_create( unsigned int flags ) SECP256K1_WARN_UNUSED_RESULT; @@ -223,30 +289,30 @@ SECP256K1_API rustsecp256k1_v0_7_0_context* rustsecp256k1_v0_7_0_context_create( * * This function uses malloc to allocate memory. It is guaranteed that malloc is * called at most once for every call of this function. If you need to avoid dynamic - * memory allocation entirely, see the functions in rustsecp256k1_v0_7_0_preallocated.h. + * memory allocation entirely, see the functions in rustsecp256k1_v0_8_0_preallocated.h. * * Returns: a newly created context object. * Args: ctx: an existing context to copy */ -SECP256K1_API rustsecp256k1_v0_7_0_context* rustsecp256k1_v0_7_0_context_clone( - const rustsecp256k1_v0_7_0_context* ctx +SECP256K1_API rustsecp256k1_v0_8_0_context* rustsecp256k1_v0_8_0_context_clone( + const rustsecp256k1_v0_8_0_context* ctx ) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT; /** Destroy a secp256k1 context object (created in dynamically allocated memory). * * The context pointer may not be used afterwards. * - * The context to destroy must have been created using rustsecp256k1_v0_7_0_context_create - * or rustsecp256k1_v0_7_0_context_clone. If the context has instead been created using - * rustsecp256k1_v0_7_0_context_preallocated_create or rustsecp256k1_v0_7_0_context_preallocated_clone, the - * behaviour is undefined. In that case, rustsecp256k1_v0_7_0_context_preallocated_destroy must + * The context to destroy must have been created using rustsecp256k1_v0_8_0_context_create + * or rustsecp256k1_v0_8_0_context_clone. If the context has instead been created using + * rustsecp256k1_v0_8_0_context_preallocated_create or rustsecp256k1_v0_8_0_context_preallocated_clone, the + * behaviour is undefined. In that case, rustsecp256k1_v0_8_0_context_preallocated_destroy must * be used instead. * * Args: ctx: an existing context to destroy, constructed using - * rustsecp256k1_v0_7_0_context_create or rustsecp256k1_v0_7_0_context_clone + * rustsecp256k1_v0_8_0_context_create or rustsecp256k1_v0_8_0_context_clone */ -SECP256K1_API void rustsecp256k1_v0_7_0_context_destroy( - rustsecp256k1_v0_7_0_context* ctx +SECP256K1_API void rustsecp256k1_v0_8_0_context_destroy( + rustsecp256k1_v0_8_0_context* ctx ) SECP256K1_ARG_NONNULL(1); /** Set a callback function to be called when an illegal argument is passed to @@ -270,11 +336,11 @@ SECP256K1_API void rustsecp256k1_v0_7_0_context_destroy( * USE_EXTERNAL_DEFAULT_CALLBACKS is defined, which is the case if the build * has been configured with --enable-external-default-callbacks. Then the * following two symbols must be provided to link against: - * - void rustsecp256k1_v0_7_0_default_illegal_callback_fn(const char* message, void* data); - * - void rustsecp256k1_v0_7_0_default_error_callback_fn(const char* message, void* data); + * - void rustsecp256k1_v0_8_0_default_illegal_callback_fn(const char* message, void* data); + * - void rustsecp256k1_v0_8_0_default_error_callback_fn(const char* message, void* data); * The library can call these default handlers even before a proper callback data - * pointer could have been set using rustsecp256k1_v0_7_0_context_set_illegal_callback or - * rustsecp256k1_v0_7_0_context_set_error_callback, e.g., when the creation of a context + * pointer could have been set using rustsecp256k1_v0_8_0_context_set_illegal_callback or + * rustsecp256k1_v0_8_0_context_set_error_callback, e.g., when the creation of a context * fails. In this case, the corresponding default handler will be called with * the data pointer argument set to NULL. * @@ -284,35 +350,38 @@ SECP256K1_API void rustsecp256k1_v0_7_0_context_destroy( * (NULL restores the default handler.) * data: the opaque pointer to pass to fun above, must be NULL for the default handler. * - * See also rustsecp256k1_v0_7_0_context_set_error_callback. + * See also rustsecp256k1_v0_8_0_context_set_error_callback. */ -SECP256K1_API void rustsecp256k1_v0_7_0_context_set_illegal_callback( - rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API void rustsecp256k1_v0_8_0_context_set_illegal_callback( + rustsecp256k1_v0_8_0_context* ctx, void (*fun)(const char* message, void* data), const void* data ) SECP256K1_ARG_NONNULL(1); /** Set a callback function to be called when an internal consistency check - * fails. The default is crashing. + * fails. + * + * The default callback writes an error message to stderr and calls abort + * to abort the program. * * This can only trigger in case of a hardware failure, miscompilation, * memory corruption, serious bug in the library, or other error would can * otherwise result in undefined behaviour. It will not trigger due to mere - * incorrect usage of the API (see rustsecp256k1_v0_7_0_context_set_illegal_callback + * incorrect usage of the API (see rustsecp256k1_v0_8_0_context_set_illegal_callback * for that). After this callback returns, anything may happen, including * crashing. * * Args: ctx: an existing context object. * In: fun: a pointer to a function to call when an internal error occurs, * taking a message and an opaque pointer (NULL restores the - * default handler, see rustsecp256k1_v0_7_0_context_set_illegal_callback + * default handler, see rustsecp256k1_v0_8_0_context_set_illegal_callback * for details). * data: the opaque pointer to pass to fun above, must be NULL for the default handler. * - * See also rustsecp256k1_v0_7_0_context_set_illegal_callback. + * See also rustsecp256k1_v0_8_0_context_set_illegal_callback. */ -SECP256K1_API void rustsecp256k1_v0_7_0_context_set_error_callback( - rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API void rustsecp256k1_v0_8_0_context_set_error_callback( + rustsecp256k1_v0_8_0_context* ctx, void (*fun)(const char* message, void* data), const void* data ) SECP256K1_ARG_NONNULL(1); @@ -324,8 +393,8 @@ SECP256K1_API void rustsecp256k1_v0_7_0_context_set_error_callback( * In: size: amount of memory to be available as scratch space. Some extra * (<100 bytes) will be allocated for extra accounting. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT rustsecp256k1_v0_7_0_scratch_space* rustsecp256k1_v0_7_0_scratch_space_create( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT rustsecp256k1_v0_8_0_scratch_space* rustsecp256k1_v0_8_0_scratch_space_create( + const rustsecp256k1_v0_8_0_context* ctx, size_t size ) SECP256K1_ARG_NONNULL(1); @@ -335,9 +404,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT rustsecp256k1_v0_7_0_scratch_space* r * Args: ctx: a secp256k1 context object. * scratch: space to destroy */ -SECP256K1_API void rustsecp256k1_v0_7_0_scratch_space_destroy( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_scratch_space* scratch +SECP256K1_API void rustsecp256k1_v0_8_0_scratch_space_destroy( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_scratch_space* scratch ) SECP256K1_ARG_NONNULL(1); /** Parse a variable-length public key into the pubkey object. @@ -354,9 +423,9 @@ SECP256K1_API void rustsecp256k1_v0_7_0_scratch_space_destroy( * 0x03), uncompressed (65 bytes, header byte 0x04), or hybrid (65 bytes, header * byte 0x06 or 0x07) format public keys. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_parse( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_pubkey* pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_pubkey_parse( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_pubkey* pubkey, const unsigned char *input, size_t inputlen ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -371,16 +440,16 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_pa * In/Out: outputlen: a pointer to an integer which is initially set to the * size of output, and is overwritten with the written * size. - * In: pubkey: a pointer to a rustsecp256k1_v0_7_0_pubkey containing an + * In: pubkey: a pointer to a rustsecp256k1_v0_8_0_pubkey containing an * initialized public key. * flags: SECP256K1_EC_COMPRESSED if serialization should be in * compressed format, otherwise SECP256K1_EC_UNCOMPRESSED. */ -SECP256K1_API int rustsecp256k1_v0_7_0_ec_pubkey_serialize( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_8_0_ec_pubkey_serialize( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output, size_t *outputlen, - const rustsecp256k1_v0_7_0_pubkey* pubkey, + const rustsecp256k1_v0_8_0_pubkey* pubkey, unsigned int flags ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); @@ -393,10 +462,10 @@ SECP256K1_API int rustsecp256k1_v0_7_0_ec_pubkey_serialize( * In: pubkey1: first public key to compare * pubkey2: second public key to compare */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_cmp( - const rustsecp256k1_v0_7_0_context* ctx, - const rustsecp256k1_v0_7_0_pubkey* pubkey1, - const rustsecp256k1_v0_7_0_pubkey* pubkey2 +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_pubkey_cmp( + const rustsecp256k1_v0_8_0_context* ctx, + const rustsecp256k1_v0_8_0_pubkey* pubkey1, + const rustsecp256k1_v0_8_0_pubkey* pubkey2 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Parse an ECDSA signature in compact (64 bytes) format. @@ -411,12 +480,12 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_cm * encoding is invalid. R and S with value 0 are allowed in the encoding. * * After the call, sig will always be initialized. If parsing failed or R or - * S are zero, the resulting sig value is guaranteed to fail validation for any - * message and public key. + * S are zero, the resulting sig value is guaranteed to fail verification for + * any message and public key. */ -SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_ecdsa_signature* sig, +SECP256K1_API int rustsecp256k1_v0_8_0_ecdsa_signature_parse_compact( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_ecdsa_signature* sig, const unsigned char *input64 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -432,12 +501,12 @@ SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact( * encoded numbers are out of range. * * After the call, sig will always be initialized. If parsing failed or the - * encoded numbers are out of range, signature validation with it is + * encoded numbers are out of range, signature verification with it is * guaranteed to fail for every message and public key. */ -SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_parse_der( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_ecdsa_signature* sig, +SECP256K1_API int rustsecp256k1_v0_8_0_ecdsa_signature_parse_der( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -453,11 +522,11 @@ SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_parse_der( * if 0 was returned). * In: sig: a pointer to an initialized signature object */ -SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_8_0_ecdsa_signature_serialize_der( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output, size_t *outputlen, - const rustsecp256k1_v0_7_0_ecdsa_signature* sig + const rustsecp256k1_v0_8_0_ecdsa_signature* sig ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Serialize an ECDSA signature in compact (64 byte) format. @@ -467,19 +536,19 @@ SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der( * Out: output64: a pointer to a 64-byte array to store the compact serialization * In: sig: a pointer to an initialized signature object * - * See rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact for details about the encoding. + * See rustsecp256k1_v0_8_0_ecdsa_signature_parse_compact for details about the encoding. */ -SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_serialize_compact( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_8_0_ecdsa_signature_serialize_compact( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output64, - const rustsecp256k1_v0_7_0_ecdsa_signature* sig + const rustsecp256k1_v0_8_0_ecdsa_signature* sig ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Verify an ECDSA signature. * * Returns: 1: correct signature * 0: incorrect or unparseable signature - * Args: ctx: a secp256k1 context object, initialized for verification. + * Args: ctx: a secp256k1 context object. * In: sig: the signature being verified. * msghash32: the 32-byte message hash being verified. * The verifier must make sure to apply a cryptographic @@ -495,16 +564,16 @@ SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_serialize_compact( * form are accepted. * * If you need to accept ECDSA signatures from sources that do not obey this - * rule, apply rustsecp256k1_v0_7_0_ecdsa_signature_normalize to the signature prior to - * validation, but be aware that doing so results in malleable signatures. + * rule, apply rustsecp256k1_v0_8_0_ecdsa_signature_normalize to the signature prior to + * verification, but be aware that doing so results in malleable signatures. * * For details, see the comments for that function. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ecdsa_verify( - const rustsecp256k1_v0_7_0_context* ctx, - const rustsecp256k1_v0_7_0_ecdsa_signature *sig, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ecdsa_verify( + const rustsecp256k1_v0_8_0_context* ctx, + const rustsecp256k1_v0_8_0_ecdsa_signature *sig, const unsigned char *msghash32, - const rustsecp256k1_v0_7_0_pubkey *pubkey + const rustsecp256k1_v0_8_0_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Convert a signature to a normalized lower-S form. @@ -543,50 +612,50 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ecdsa_verify * accept various non-unique encodings, so care should be taken when this * property is required for an application. * - * The rustsecp256k1_v0_7_0_ecdsa_sign function will by default create signatures in the - * lower-S form, and rustsecp256k1_v0_7_0_ecdsa_verify will not accept others. In case + * The rustsecp256k1_v0_8_0_ecdsa_sign function will by default create signatures in the + * lower-S form, and rustsecp256k1_v0_8_0_ecdsa_verify will not accept others. In case * signatures come from a system that cannot enforce this property, - * rustsecp256k1_v0_7_0_ecdsa_signature_normalize must be called before verification. + * rustsecp256k1_v0_8_0_ecdsa_signature_normalize must be called before verification. */ -SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_signature_normalize( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_ecdsa_signature *sigout, - const rustsecp256k1_v0_7_0_ecdsa_signature *sigin +SECP256K1_API int rustsecp256k1_v0_8_0_ecdsa_signature_normalize( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_ecdsa_signature *sigout, + const rustsecp256k1_v0_8_0_ecdsa_signature *sigin ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(3); /** An implementation of RFC6979 (using HMAC-SHA256) as nonce generation function. * If a data pointer is passed, it is assumed to be a pointer to 32 bytes of * extra entropy. */ -SECP256K1_API extern const rustsecp256k1_v0_7_0_nonce_function rustsecp256k1_v0_7_0_nonce_function_rfc6979; +SECP256K1_API extern const rustsecp256k1_v0_8_0_nonce_function rustsecp256k1_v0_8_0_nonce_function_rfc6979; -/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_7_0_nonce_function_rfc6979). */ -SECP256K1_API extern const rustsecp256k1_v0_7_0_nonce_function rustsecp256k1_v0_7_0_nonce_function_default; +/** A default safe nonce generation function (currently equal to rustsecp256k1_v0_8_0_nonce_function_rfc6979). */ +SECP256K1_API extern const rustsecp256k1_v0_8_0_nonce_function rustsecp256k1_v0_8_0_nonce_function_default; /** Create an ECDSA signature. * * Returns: 1: signature created * 0: the nonce generation function failed, or the secret key was invalid. - * Args: ctx: pointer to a context object, initialized for signing. + * Args: ctx: pointer to a context object (not rustsecp256k1_v0_8_0_context_static). * Out: sig: pointer to an array where the signature will be placed. * In: msghash32: the 32-byte message hash being signed. * seckey: pointer to a 32-byte secret key. * noncefp: pointer to a nonce generation function. If NULL, - * rustsecp256k1_v0_7_0_nonce_function_default is used. + * rustsecp256k1_v0_8_0_nonce_function_default is used. * ndata: pointer to arbitrary data used by the nonce generation function * (can be NULL). If it is non-NULL and - * rustsecp256k1_v0_7_0_nonce_function_default is used, then ndata must be a + * rustsecp256k1_v0_8_0_nonce_function_default is used, then ndata must be a * pointer to 32-bytes of additional data. * * The created signature is always in lower-S form. See - * rustsecp256k1_v0_7_0_ecdsa_signature_normalize for more details. + * rustsecp256k1_v0_8_0_ecdsa_signature_normalize for more details. */ -SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_sign( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_ecdsa_signature *sig, +SECP256K1_API int rustsecp256k1_v0_8_0_ecdsa_sign( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_ecdsa_signature *sig, const unsigned char *msghash32, const unsigned char *seckey, - rustsecp256k1_v0_7_0_nonce_function noncefp, + rustsecp256k1_v0_8_0_nonce_function noncefp, const void *ndata ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); @@ -602,8 +671,8 @@ SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_sign( * Args: ctx: pointer to a context object. * In: seckey: pointer to a 32-byte secret key. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_seckey_verify( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_seckey_verify( + const rustsecp256k1_v0_8_0_context* ctx, const unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); @@ -611,37 +680,38 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_seckey_ve * * Returns: 1: secret was valid, public key stores. * 0: secret was invalid, try again. - * Args: ctx: pointer to a context object, initialized for signing. + * Args: ctx: pointer to a context object (not rustsecp256k1_v0_8_0_context_static). * Out: pubkey: pointer to the created public key. * In: seckey: pointer to a 32-byte secret key. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_create( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_pubkey_create( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_pubkey *pubkey, const unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Negates a secret key in place. * * Returns: 0 if the given secret key is invalid according to - * rustsecp256k1_v0_7_0_ec_seckey_verify. 1 otherwise + * rustsecp256k1_v0_8_0_ec_seckey_verify. 1 otherwise * Args: ctx: pointer to a context object * In/Out: seckey: pointer to the 32-byte secret key to be negated. If the * secret key is invalid according to - * rustsecp256k1_v0_7_0_ec_seckey_verify, this function returns 0 and + * rustsecp256k1_v0_8_0_ec_seckey_verify, this function returns 0 and * seckey will be set to some unspecified value. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_seckey_negate( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_seckey_negate( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); -/** Same as rustsecp256k1_v0_7_0_ec_seckey_negate, but DEPRECATED. Will be removed in +/** Same as rustsecp256k1_v0_8_0_ec_seckey_negate, but DEPRECATED. Will be removed in * future versions. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_privkey_negate( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_privkey_negate( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) + SECP256K1_DEPRECATED("Use rustsecp256k1_v0_8_0_ec_seckey_negate instead"); /** Negates a public key in place. * @@ -649,9 +719,9 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_privkey_n * Args: ctx: pointer to a context object * In/Out: pubkey: pointer to the public key to be negated. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_negate( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_pubkey *pubkey +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_pubkey_negate( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); /** Tweak a secret key by adding tweak to it. @@ -661,44 +731,45 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_ne * otherwise. * Args: ctx: pointer to a context object. * In/Out: seckey: pointer to a 32-byte secret key. If the secret key is - * invalid according to rustsecp256k1_v0_7_0_ec_seckey_verify, this + * invalid according to rustsecp256k1_v0_8_0_ec_seckey_verify, this * function returns 0. seckey will be set to some unspecified * value if this function returns 0. * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_7_0_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_8_0_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_seckey_tweak_add( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_seckey_tweak_add( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Same as rustsecp256k1_v0_7_0_ec_seckey_tweak_add, but DEPRECATED. Will be removed in +/** Same as rustsecp256k1_v0_8_0_ec_seckey_tweak_add, but DEPRECATED. Will be removed in * future versions. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_privkey_tweak_add( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_privkey_tweak_add( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32 -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) + SECP256K1_DEPRECATED("Use rustsecp256k1_v0_8_0_ec_seckey_tweak_add instead"); /** Tweak a public key by adding tweak times the generator to it. * * Returns: 0 if the arguments are invalid or the resulting public key would be * invalid (only when the tweak is the negation of the corresponding * secret key). 1 otherwise. - * Args: ctx: pointer to a context object initialized for validation. + * Args: ctx: pointer to a context object. * In/Out: pubkey: pointer to a public key object. pubkey will be set to an * invalid value if this function returns 0. * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_7_0_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_8_0_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_tweak_add( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_pubkey_tweak_add( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_pubkey *pubkey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -707,72 +778,84 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_tw * Returns: 0 if the arguments are invalid. 1 otherwise. * Args: ctx: pointer to a context object. * In/Out: seckey: pointer to a 32-byte secret key. If the secret key is - * invalid according to rustsecp256k1_v0_7_0_ec_seckey_verify, this + * invalid according to rustsecp256k1_v0_8_0_ec_seckey_verify, this * function returns 0. seckey will be set to some unspecified * value if this function returns 0. * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_7_0_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_8_0_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_seckey_tweak_mul( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_seckey_tweak_mul( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Same as rustsecp256k1_v0_7_0_ec_seckey_tweak_mul, but DEPRECATED. Will be removed in +/** Same as rustsecp256k1_v0_8_0_ec_seckey_tweak_mul, but DEPRECATED. Will be removed in * future versions. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_privkey_tweak_mul( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_privkey_tweak_mul( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32 -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) + SECP256K1_DEPRECATED("Use rustsecp256k1_v0_8_0_ec_seckey_tweak_mul instead"); /** Tweak a public key by multiplying it by a tweak value. * * Returns: 0 if the arguments are invalid. 1 otherwise. - * Args: ctx: pointer to a context object initialized for validation. + * Args: ctx: pointer to a context object. * In/Out: pubkey: pointer to a public key object. pubkey will be set to an * invalid value if this function returns 0. * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according to - * rustsecp256k1_v0_7_0_ec_seckey_verify, this function returns 0. For + * rustsecp256k1_v0_8_0_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_tweak_mul( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_pubkey_tweak_mul( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_pubkey *pubkey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Updates the context randomization to protect against side-channel leakage. - * Returns: 1: randomization successfully updated or nothing to randomize +/** Randomizes the context to provide enhanced protection against side-channel leakage. + * + * Returns: 1: randomization successful (or called on copy of rustsecp256k1_v0_8_0_context_static) * 0: error * Args: ctx: pointer to a context object. * In: seed32: pointer to a 32-byte random seed (NULL resets to initial state) * - * While secp256k1 code is written to be constant-time no matter what secret - * values are, it's possible that a future compiler may output code which isn't, + * While secp256k1 code is written and tested to be constant-time no matter what + * secret values are, it is possible that a compiler may output code which is not, * and also that the CPU may not emit the same radio frequencies or draw the same - * amount power for all values. - * - * This function provides a seed which is combined into the blinding value: that - * blinding value is added before each multiplication (and removed afterwards) so - * that it does not affect function results, but shields against attacks which - * rely on any input-dependent behaviour. - * - * This function has currently an effect only on contexts initialized for signing - * because randomization is currently used only for signing. However, this is not - * guaranteed and may change in the future. It is safe to call this function on - * contexts not initialized for signing; then it will have no effect and return 1. - * - * You should call this after rustsecp256k1_v0_7_0_context_create or - * rustsecp256k1_v0_7_0_context_clone (and rustsecp256k1_v0_7_0_context_preallocated_create or - * rustsecp256k1_v0_7_0_context_clone, resp.), and you may call this repeatedly afterwards. - */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_context_randomize( - rustsecp256k1_v0_7_0_context* ctx, + * amount of power for all values. Randomization of the context shields against + * side-channel observations which aim to exploit secret-dependent behaviour in + * certain computations which involve secret keys. + * + * It is highly recommended to call this function on contexts returned from + * rustsecp256k1_v0_8_0_context_create or rustsecp256k1_v0_8_0_context_clone (or from the corresponding + * functions in rustsecp256k1_v0_8_0_preallocated.h) before using these contexts to call API + * functions that perform computations involving secret keys, e.g., signing and + * public key generation. It is possible to call this function more than once on + * the same context, and doing so before every few computations involving secret + * keys is recommended as a defense-in-depth measure. + * + * Currently, the random seed is mainly used for blinding multiplications of a + * secret scalar with the elliptic curve base point. Multiplications of this + * kind are performed by exactly those API functions which are documented to + * require a context that is not the rustsecp256k1_v0_8_0_context_static. As a rule of thumb, + * these are all functions which take a secret key (or a keypair) as an input. + * A notable exception to that rule is the ECDH module, which relies on a different + * kind of elliptic curve point multiplication and thus does not benefit from + * enhanced protection against side-channel leakage currently. + * + * It is safe call this function on a copy of rustsecp256k1_v0_8_0_context_static in writable + * memory (e.g., obtained via rustsecp256k1_v0_8_0_context_clone). In that case, this + * function is guaranteed to return 1, but the call will have no effect because + * the static context (or a copy thereof) is not meant to be randomized. + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_context_randomize( + rustsecp256k1_v0_8_0_context* ctx, const unsigned char *seed32 ) SECP256K1_ARG_NONNULL(1); @@ -785,10 +868,10 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_context_rand * In: ins: pointer to array of pointers to public keys. * n: the number of public keys to add together (must be at least 1). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_combine( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_pubkey *out, - const rustsecp256k1_v0_7_0_pubkey * const * ins, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ec_pubkey_combine( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_pubkey *out, + const rustsecp256k1_v0_8_0_pubkey * const * ins, size_t n ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -800,7 +883,7 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_co * implementations optimized for a specific tag can precompute the SHA256 state * after hashing the tag hashes. * - * Returns 0 if the arguments are invalid and 1 otherwise. + * Returns: 1 always. * Args: ctx: pointer to a context object * Out: hash32: pointer to a 32-byte array to store the resulting hash * In: tag: pointer to an array containing the tag @@ -808,8 +891,8 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ec_pubkey_co * msg: pointer to an array containing the message * msglen: length of the message array */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_tagged_sha256( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_tagged_sha256( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *hash32, const unsigned char *tag, size_t taglen, diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_ecdh.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_ecdh.h index ce7ae2ed4..c30e92f34 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_ecdh.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_ecdh.h @@ -10,15 +10,15 @@ extern "C" { /** A pointer to a function that hashes an EC point to obtain an ECDH secret * * Returns: 1 if the point was successfully hashed. - * 0 will cause rustsecp256k1_v0_7_0_ecdh to fail and return 0. + * 0 will cause rustsecp256k1_v0_8_0_ecdh to fail and return 0. * Other return values are not allowed, and the behaviour of - * rustsecp256k1_v0_7_0_ecdh is undefined for other return values. + * rustsecp256k1_v0_8_0_ecdh is undefined for other return values. * Out: output: pointer to an array to be filled by the function * In: x32: pointer to a 32-byte x coordinate * y32: pointer to a 32-byte y coordinate * data: arbitrary data pointer that is passed through */ -typedef int (*rustsecp256k1_v0_7_0_ecdh_hash_function)( +typedef int (*rustsecp256k1_v0_8_0_ecdh_hash_function)( unsigned char *output, const unsigned char *x32, const unsigned char *y32, @@ -27,11 +27,11 @@ typedef int (*rustsecp256k1_v0_7_0_ecdh_hash_function)( /** An implementation of SHA256 hash function that applies to compressed public key. * Populates the output parameter with 32 bytes. */ -SECP256K1_API extern const rustsecp256k1_v0_7_0_ecdh_hash_function rustsecp256k1_v0_7_0_ecdh_hash_function_sha256; +SECP256K1_API extern const rustsecp256k1_v0_8_0_ecdh_hash_function rustsecp256k1_v0_8_0_ecdh_hash_function_sha256; -/** A default ECDH hash function (currently equal to rustsecp256k1_v0_7_0_ecdh_hash_function_sha256). +/** A default ECDH hash function (currently equal to rustsecp256k1_v0_8_0_ecdh_hash_function_sha256). * Populates the output parameter with 32 bytes. */ -SECP256K1_API extern const rustsecp256k1_v0_7_0_ecdh_hash_function rustsecp256k1_v0_7_0_ecdh_hash_function_default; +SECP256K1_API extern const rustsecp256k1_v0_8_0_ecdh_hash_function rustsecp256k1_v0_8_0_ecdh_hash_function_default; /** Compute an EC Diffie-Hellman secret in constant time * @@ -39,20 +39,20 @@ SECP256K1_API extern const rustsecp256k1_v0_7_0_ecdh_hash_function rustsecp256k1 * 0: scalar was invalid (zero or overflow) or hashfp returned 0 * Args: ctx: pointer to a context object. * Out: output: pointer to an array to be filled by hashfp. - * In: pubkey: a pointer to a rustsecp256k1_v0_7_0_pubkey containing an initialized public key. + * In: pubkey: a pointer to a rustsecp256k1_v0_8_0_pubkey containing an initialized public key. * seckey: a 32-byte scalar with which to multiply the point. * hashfp: pointer to a hash function. If NULL, - * rustsecp256k1_v0_7_0_ecdh_hash_function_sha256 is used + * rustsecp256k1_v0_8_0_ecdh_hash_function_sha256 is used * (in which case, 32 bytes will be written to output). * data: arbitrary data pointer that is passed through to hashfp - * (can be NULL for rustsecp256k1_v0_7_0_ecdh_hash_function_sha256). + * (can be NULL for rustsecp256k1_v0_8_0_ecdh_hash_function_sha256). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ecdh( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ecdh( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output, - const rustsecp256k1_v0_7_0_pubkey *pubkey, + const rustsecp256k1_v0_8_0_pubkey *pubkey, const unsigned char *seckey, - rustsecp256k1_v0_7_0_ecdh_hash_function hashfp, + rustsecp256k1_v0_8_0_ecdh_hash_function hashfp, void *data ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_extrakeys.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_extrakeys.h index 4b204efda..430fb17ae 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_extrakeys.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_extrakeys.h @@ -16,12 +16,12 @@ extern "C" { * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. * If you need to convert to a format suitable for storage, transmission, use - * use rustsecp256k1_v0_7_0_xonly_pubkey_serialize and rustsecp256k1_v0_7_0_xonly_pubkey_parse. To - * compare keys, use rustsecp256k1_v0_7_0_xonly_pubkey_cmp. + * use rustsecp256k1_v0_8_0_xonly_pubkey_serialize and rustsecp256k1_v0_8_0_xonly_pubkey_parse. To + * compare keys, use rustsecp256k1_v0_8_0_xonly_pubkey_cmp. */ typedef struct { unsigned char data[64]; -} rustsecp256k1_v0_7_0_xonly_pubkey; +} rustsecp256k1_v0_8_0_xonly_pubkey; /** Opaque data structure that holds a keypair consisting of a secret and a * public key. @@ -32,7 +32,7 @@ typedef struct { */ typedef struct { unsigned char data[96]; -} rustsecp256k1_v0_7_0_keypair; +} rustsecp256k1_v0_8_0_keypair; /** Parse a 32-byte sequence into a xonly_pubkey object. * @@ -44,9 +44,9 @@ typedef struct { * parsed version of input. If not, it's set to an invalid value. * In: input32: pointer to a serialized xonly_pubkey. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_xonly_pubkey_parse( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_xonly_pubkey* pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_xonly_pubkey_parse( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_xonly_pubkey* pubkey, const unsigned char *input32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -56,12 +56,12 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_xonly_pubkey * * Args: ctx: a secp256k1 context object. * Out: output32: a pointer to a 32-byte array to place the serialized key in. - * In: pubkey: a pointer to a rustsecp256k1_v0_7_0_xonly_pubkey containing an initialized public key. + * In: pubkey: a pointer to a rustsecp256k1_v0_8_0_xonly_pubkey containing an initialized public key. */ -SECP256K1_API int rustsecp256k1_v0_7_0_xonly_pubkey_serialize( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_8_0_xonly_pubkey_serialize( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output32, - const rustsecp256k1_v0_7_0_xonly_pubkey* pubkey + const rustsecp256k1_v0_8_0_xonly_pubkey* pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Compare two x-only public keys using lexicographic order @@ -73,16 +73,15 @@ SECP256K1_API int rustsecp256k1_v0_7_0_xonly_pubkey_serialize( * In: pubkey1: first public key to compare * pubkey2: second public key to compare */ -SECP256K1_API int rustsecp256k1_v0_7_0_xonly_pubkey_cmp( - const rustsecp256k1_v0_7_0_context* ctx, - const rustsecp256k1_v0_7_0_xonly_pubkey* pk1, - const rustsecp256k1_v0_7_0_xonly_pubkey* pk2 +SECP256K1_API int rustsecp256k1_v0_8_0_xonly_pubkey_cmp( + const rustsecp256k1_v0_8_0_context* ctx, + const rustsecp256k1_v0_8_0_xonly_pubkey* pk1, + const rustsecp256k1_v0_8_0_xonly_pubkey* pk2 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Converts a rustsecp256k1_v0_7_0_pubkey into a rustsecp256k1_v0_7_0_xonly_pubkey. +/** Converts a rustsecp256k1_v0_8_0_pubkey into a rustsecp256k1_v0_8_0_xonly_pubkey. * - * Returns: 1 if the public key was successfully converted - * 0 otherwise + * Returns: 1 always. * * Args: ctx: pointer to a context object. * Out: xonly_pubkey: pointer to an x-only public key object for placing the converted public key. @@ -91,11 +90,11 @@ SECP256K1_API int rustsecp256k1_v0_7_0_xonly_pubkey_cmp( * the negation of the pubkey and set to 0 otherwise. * In: pubkey: pointer to a public key that is converted. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_xonly_pubkey *xonly_pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_xonly_pubkey *xonly_pubkey, int *pk_parity, - const rustsecp256k1_v0_7_0_pubkey *pubkey + const rustsecp256k1_v0_8_0_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4); /** Tweak an x-only public key by adding the generator multiplied with tweak32 @@ -103,34 +102,34 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_xonly_pubkey * * Note that the resulting point can not in general be represented by an x-only * pubkey because it may have an odd Y coordinate. Instead, the output_pubkey - * is a normal rustsecp256k1_v0_7_0_pubkey. + * is a normal rustsecp256k1_v0_8_0_pubkey. * * Returns: 0 if the arguments are invalid or the resulting public key would be * invalid (only when the tweak is the negation of the corresponding * secret key). 1 otherwise. * - * Args: ctx: pointer to a context object initialized for verification. + * Args: ctx: pointer to a context object. * Out: output_pubkey: pointer to a public key to store the result. Will be set * to an invalid value if this function returns 0. * In: internal_pubkey: pointer to an x-only pubkey to apply the tweak to. * tweak32: pointer to a 32-byte tweak. If the tweak is invalid - * according to rustsecp256k1_v0_7_0_ec_seckey_verify, this function + * according to rustsecp256k1_v0_8_0_ec_seckey_verify, this function * returns 0. For uniformly random 32-byte arrays the * chance of being invalid is negligible (around 1 in 2^128). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_pubkey *output_pubkey, - const rustsecp256k1_v0_7_0_xonly_pubkey *internal_pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_pubkey *output_pubkey, + const rustsecp256k1_v0_8_0_xonly_pubkey *internal_pubkey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Checks that a tweaked pubkey is the result of calling - * rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add with internal_pubkey and tweak32. + * rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add with internal_pubkey and tweak32. * * The tweaked pubkey is represented by its 32-byte x-only serialization and * its pk_parity, which can both be obtained by converting the result of - * tweak_add to a rustsecp256k1_v0_7_0_xonly_pubkey. + * tweak_add to a rustsecp256k1_v0_8_0_xonly_pubkey. * * Note that this alone does _not_ verify that the tweaked pubkey is a * commitment. If the tweak is not chosen in a specific way, the tweaked pubkey @@ -138,21 +137,21 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_xonly_pubkey * * Returns: 0 if the arguments are invalid or the tweaked pubkey is not the * result of tweaking the internal_pubkey with tweak32. 1 otherwise. - * Args: ctx: pointer to a context object initialized for verification. + * Args: ctx: pointer to a context object. * In: tweaked_pubkey32: pointer to a serialized xonly_pubkey. * tweaked_pk_parity: the parity of the tweaked pubkey (whose serialization * is passed in as tweaked_pubkey32). This must match the * pk_parity value that is returned when calling - * rustsecp256k1_v0_7_0_xonly_pubkey with the tweaked pubkey, or + * rustsecp256k1_v0_8_0_xonly_pubkey with the tweaked pubkey, or * this function will fail. * internal_pubkey: pointer to an x-only public key object to apply the tweak to. * tweak32: pointer to a 32-byte tweak. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add_check( + const rustsecp256k1_v0_8_0_context* ctx, const unsigned char *tweaked_pubkey32, int tweaked_pk_parity, - const rustsecp256k1_v0_7_0_xonly_pubkey *internal_pubkey, + const rustsecp256k1_v0_8_0_xonly_pubkey *internal_pubkey, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5); @@ -160,86 +159,86 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_xonly_pubkey * * Returns: 1: secret was valid, keypair is ready to use * 0: secret was invalid, try again with a different secret - * Args: ctx: pointer to a context object, initialized for signing. + * Args: ctx: pointer to a context object (not rustsecp256k1_v0_8_0_context_static). * Out: keypair: pointer to the created keypair. * In: seckey: pointer to a 32-byte secret key. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_keypair_create( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_keypair *keypair, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_keypair_create( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_keypair *keypair, const unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Get the secret key from a keypair. * - * Returns: 0 if the arguments are invalid. 1 otherwise. + * Returns: 1 always. * Args: ctx: pointer to a context object. * Out: seckey: pointer to a 32-byte buffer for the secret key. * In: keypair: pointer to a keypair. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_keypair_sec( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_keypair_sec( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, - const rustsecp256k1_v0_7_0_keypair *keypair + const rustsecp256k1_v0_8_0_keypair *keypair ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Get the public key from a keypair. * - * Returns: 0 if the arguments are invalid. 1 otherwise. + * Returns: 1 always. * Args: ctx: pointer to a context object. * Out: pubkey: pointer to a pubkey object. If 1 is returned, it is set to * the keypair public key. If not, it's set to an invalid value. * In: keypair: pointer to a keypair. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_keypair_pub( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_pubkey *pubkey, - const rustsecp256k1_v0_7_0_keypair *keypair +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_keypair_pub( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_pubkey *pubkey, + const rustsecp256k1_v0_8_0_keypair *keypair ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Get the x-only public key from a keypair. * - * This is the same as calling rustsecp256k1_v0_7_0_keypair_pub and then - * rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey. + * This is the same as calling rustsecp256k1_v0_8_0_keypair_pub and then + * rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey. * - * Returns: 0 if the arguments are invalid. 1 otherwise. + * Returns: 1 always. * Args: ctx: pointer to a context object. * Out: pubkey: pointer to an xonly_pubkey object. If 1 is returned, it is set * to the keypair public key after converting it to an * xonly_pubkey. If not, it's set to an invalid value. * pk_parity: Ignored if NULL. Otherwise, pointer to an integer that will be set to the - * pk_parity argument of rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey. + * pk_parity argument of rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey. * In: keypair: pointer to a keypair. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_keypair_xonly_pub( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_xonly_pubkey *pubkey, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_keypair_xonly_pub( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_xonly_pubkey *pubkey, int *pk_parity, - const rustsecp256k1_v0_7_0_keypair *keypair + const rustsecp256k1_v0_8_0_keypair *keypair ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4); /** Tweak a keypair by adding tweak32 to the secret key and updating the public * key accordingly. * - * Calling this function and then rustsecp256k1_v0_7_0_keypair_pub results in the same - * public key as calling rustsecp256k1_v0_7_0_keypair_xonly_pub and then - * rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add. + * Calling this function and then rustsecp256k1_v0_8_0_keypair_pub results in the same + * public key as calling rustsecp256k1_v0_8_0_keypair_xonly_pub and then + * rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add. * * Returns: 0 if the arguments are invalid or the resulting keypair would be * invalid (only when the tweak is the negation of the keypair's * secret key). 1 otherwise. * - * Args: ctx: pointer to a context object initialized for verification. + * Args: ctx: pointer to a context object. * In/Out: keypair: pointer to a keypair to apply the tweak to. Will be set to * an invalid value if this function returns 0. * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according - * to rustsecp256k1_v0_7_0_ec_seckey_verify, this function returns 0. For + * to rustsecp256k1_v0_8_0_ec_seckey_verify, this function returns 0. For * uniformly random 32-byte arrays the chance of being invalid * is negligible (around 1 in 2^128). */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_keypair_xonly_tweak_add( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_keypair *keypair, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_keypair_xonly_tweak_add( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_keypair *keypair, const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_preallocated.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_preallocated.h index bcc898d19..c0acf0dad 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_preallocated.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_preallocated.h @@ -16,8 +16,8 @@ extern "C" { * objects created by functions in secp256k1.h, i.e., they can be passed to any * API function that expects a context object (see secp256k1.h for details). The * only exception is that context objects created by functions in this module - * must be destroyed using rustsecp256k1_v0_7_0_context_preallocated_destroy (in this - * module) instead of rustsecp256k1_v0_7_0_context_destroy (in secp256k1.h). + * must be destroyed using rustsecp256k1_v0_8_0_context_preallocated_destroy (in this + * module) instead of rustsecp256k1_v0_8_0_context_destroy (in secp256k1.h). * * It is guaranteed that functions in this module will not call malloc or its * friends realloc, calloc, and free. @@ -27,24 +27,24 @@ extern "C" { * caller-provided memory. * * The purpose of this function is to determine how much memory must be provided - * to rustsecp256k1_v0_7_0_context_preallocated_create. + * to rustsecp256k1_v0_8_0_context_preallocated_create. * * Returns: the required size of the caller-provided memory block * In: flags: which parts of the context to initialize. */ -SECP256K1_API size_t rustsecp256k1_v0_7_0_context_preallocated_size( +SECP256K1_API size_t rustsecp256k1_v0_8_0_context_preallocated_size( unsigned int flags ) SECP256K1_WARN_UNUSED_RESULT; /** Create a secp256k1 context object in caller-provided memory. * * The caller must provide a pointer to a rewritable contiguous block of memory - * of size at least rustsecp256k1_v0_7_0_context_preallocated_size(flags) bytes, suitably + * of size at least rustsecp256k1_v0_8_0_context_preallocated_size(flags) bytes, suitably * aligned to hold an object of any type. * * The block of memory is exclusively owned by the created context object during * the lifetime of this context object, which begins with the call to this - * function and ends when a call to rustsecp256k1_v0_7_0_context_preallocated_destroy + * function and ends when a call to rustsecp256k1_v0_8_0_context_preallocated_destroy * (which destroys the context object again) returns. During the lifetime of the * context object, the caller is obligated not to access this block of memory, * i.e., the caller may not read or write the memory, e.g., by copying the memory @@ -54,14 +54,16 @@ SECP256K1_API size_t rustsecp256k1_v0_7_0_context_preallocated_size( * * Returns: a newly created context object. * In: prealloc: a pointer to a rewritable contiguous block of memory of - * size at least rustsecp256k1_v0_7_0_context_preallocated_size(flags) + * size at least rustsecp256k1_v0_8_0_context_preallocated_size(flags) * bytes, as detailed above. * flags: which parts of the context to initialize. * - * See also rustsecp256k1_v0_7_0_context_randomize (in secp256k1.h) - * and rustsecp256k1_v0_7_0_context_preallocated_destroy. + * See rustsecp256k1_v0_8_0_context_create (in secp256k1.h) for further details. + * + * See also rustsecp256k1_v0_8_0_context_randomize (in secp256k1.h) + * and rustsecp256k1_v0_8_0_context_preallocated_destroy. */ -SECP256K1_API rustsecp256k1_v0_7_0_context* rustsecp256k1_v0_7_0_context_preallocated_create( +SECP256K1_API rustsecp256k1_v0_8_0_context* rustsecp256k1_v0_8_0_context_preallocated_create( void* prealloc, unsigned int flags ) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT; @@ -72,28 +74,28 @@ SECP256K1_API rustsecp256k1_v0_7_0_context* rustsecp256k1_v0_7_0_context_preallo * Returns: the required size of the caller-provided memory block. * In: ctx: an existing context to copy. */ -SECP256K1_API size_t rustsecp256k1_v0_7_0_context_preallocated_clone_size( - const rustsecp256k1_v0_7_0_context* ctx +SECP256K1_API size_t rustsecp256k1_v0_8_0_context_preallocated_clone_size( + const rustsecp256k1_v0_8_0_context* ctx ) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT; /** Copy a secp256k1 context object into caller-provided memory. * * The caller must provide a pointer to a rewritable contiguous block of memory - * of size at least rustsecp256k1_v0_7_0_context_preallocated_size(flags) bytes, suitably + * of size at least rustsecp256k1_v0_8_0_context_preallocated_size(flags) bytes, suitably * aligned to hold an object of any type. * * The block of memory is exclusively owned by the created context object during * the lifetime of this context object, see the description of - * rustsecp256k1_v0_7_0_context_preallocated_create for details. + * rustsecp256k1_v0_8_0_context_preallocated_create for details. * * Returns: a newly created context object. * Args: ctx: an existing context to copy. * In: prealloc: a pointer to a rewritable contiguous block of memory of - * size at least rustsecp256k1_v0_7_0_context_preallocated_size(flags) + * size at least rustsecp256k1_v0_8_0_context_preallocated_size(flags) * bytes, as detailed above. */ -SECP256K1_API rustsecp256k1_v0_7_0_context* rustsecp256k1_v0_7_0_context_preallocated_clone( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API rustsecp256k1_v0_8_0_context* rustsecp256k1_v0_8_0_context_preallocated_clone( + const rustsecp256k1_v0_8_0_context* ctx, void* prealloc ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_WARN_UNUSED_RESULT; @@ -103,22 +105,22 @@ SECP256K1_API rustsecp256k1_v0_7_0_context* rustsecp256k1_v0_7_0_context_preallo * The context pointer may not be used afterwards. * * The context to destroy must have been created using - * rustsecp256k1_v0_7_0_context_preallocated_create or rustsecp256k1_v0_7_0_context_preallocated_clone. - * If the context has instead been created using rustsecp256k1_v0_7_0_context_create or - * rustsecp256k1_v0_7_0_context_clone, the behaviour is undefined. In that case, - * rustsecp256k1_v0_7_0_context_destroy must be used instead. + * rustsecp256k1_v0_8_0_context_preallocated_create or rustsecp256k1_v0_8_0_context_preallocated_clone. + * If the context has instead been created using rustsecp256k1_v0_8_0_context_create or + * rustsecp256k1_v0_8_0_context_clone, the behaviour is undefined. In that case, + * rustsecp256k1_v0_8_0_context_destroy must be used instead. * * If required, it is the responsibility of the caller to deallocate the block * of memory properly after this function returns, e.g., by calling free on the - * preallocated pointer given to rustsecp256k1_v0_7_0_context_preallocated_create or - * rustsecp256k1_v0_7_0_context_preallocated_clone. + * preallocated pointer given to rustsecp256k1_v0_8_0_context_preallocated_create or + * rustsecp256k1_v0_8_0_context_preallocated_clone. * * Args: ctx: an existing context to destroy, constructed using - * rustsecp256k1_v0_7_0_context_preallocated_create or - * rustsecp256k1_v0_7_0_context_preallocated_clone. + * rustsecp256k1_v0_8_0_context_preallocated_create or + * rustsecp256k1_v0_8_0_context_preallocated_clone. */ -SECP256K1_API void rustsecp256k1_v0_7_0_context_preallocated_destroy( - rustsecp256k1_v0_7_0_context* ctx +SECP256K1_API void rustsecp256k1_v0_8_0_context_preallocated_destroy( + rustsecp256k1_v0_8_0_context* ctx ) SECP256K1_ARG_NONNULL(1); #ifdef __cplusplus diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_recovery.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_recovery.h index a04acd0ad..7cad1d6ee 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_recovery.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_recovery.h @@ -14,8 +14,8 @@ extern "C" { * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 65 bytes in size, and can be safely copied/moved. * If you need to convert to a format suitable for storage or transmission, use - * the rustsecp256k1_v0_7_0_ecdsa_signature_serialize_* and - * rustsecp256k1_v0_7_0_ecdsa_signature_parse_* functions. + * the rustsecp256k1_v0_8_0_ecdsa_signature_serialize_* and + * rustsecp256k1_v0_8_0_ecdsa_signature_parse_* functions. * * Furthermore, it is guaranteed that identical signatures (including their * recoverability) will have identical representation, so they can be @@ -23,7 +23,7 @@ extern "C" { */ typedef struct { unsigned char data[65]; -} rustsecp256k1_v0_7_0_ecdsa_recoverable_signature; +} rustsecp256k1_v0_8_0_ecdsa_recoverable_signature; /** Parse a compact ECDSA signature (64 bytes + recovery id). * @@ -33,9 +33,9 @@ typedef struct { * In: input64: a pointer to a 64-byte compact signature * recid: the recovery id (0, 1, 2 or 3) */ -SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature* sig, +SECP256K1_API int rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); @@ -47,10 +47,10 @@ SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact * Out: sig: a pointer to a normal signature. * In: sigin: a pointer to a recoverable signature. */ -SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_convert( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_ecdsa_signature* sig, - const rustsecp256k1_v0_7_0_ecdsa_recoverable_signature* sigin +SECP256K1_API int rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_convert( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_ecdsa_signature* sig, + const rustsecp256k1_v0_8_0_ecdsa_recoverable_signature* sigin ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Serialize an ECDSA signature in compact format (64 bytes + recovery id). @@ -61,32 +61,32 @@ SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_convert( * recid: a pointer to an integer to hold the recovery id. * In: sig: a pointer to an initialized signature object. */ -SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_serialize_compact( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_serialize_compact( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output64, int *recid, - const rustsecp256k1_v0_7_0_ecdsa_recoverable_signature* sig + const rustsecp256k1_v0_8_0_ecdsa_recoverable_signature* sig ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); /** Create a recoverable ECDSA signature. * * Returns: 1: signature created * 0: the nonce generation function failed, or the secret key was invalid. - * Args: ctx: pointer to a context object, initialized for signing. + * Args: ctx: pointer to a context object (not rustsecp256k1_v0_8_0_context_static). * Out: sig: pointer to an array where the signature will be placed. * In: msghash32: the 32-byte message hash being signed. * seckey: pointer to a 32-byte secret key. * noncefp: pointer to a nonce generation function. If NULL, - * rustsecp256k1_v0_7_0_nonce_function_default is used. + * rustsecp256k1_v0_8_0_nonce_function_default is used. * ndata: pointer to arbitrary data used by the nonce generation function - * (can be NULL for rustsecp256k1_v0_7_0_nonce_function_default). + * (can be NULL for rustsecp256k1_v0_8_0_nonce_function_default). */ -SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_sign_recoverable( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature *sig, +SECP256K1_API int rustsecp256k1_v0_8_0_ecdsa_sign_recoverable( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature *sig, const unsigned char *msghash32, const unsigned char *seckey, - rustsecp256k1_v0_7_0_nonce_function noncefp, + rustsecp256k1_v0_8_0_nonce_function noncefp, const void *ndata ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); @@ -94,15 +94,15 @@ SECP256K1_API int rustsecp256k1_v0_7_0_ecdsa_sign_recoverable( * * Returns: 1: public key successfully recovered (which guarantees a correct signature). * 0: otherwise. - * Args: ctx: pointer to a context object, initialized for verification. + * Args: ctx: pointer to a context object. * Out: pubkey: pointer to the recovered public key. * In: sig: pointer to initialized signature that supports pubkey recovery. * msghash32: the 32-byte message hash assumed to be signed. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_ecdsa_recover( - const rustsecp256k1_v0_7_0_context* ctx, - rustsecp256k1_v0_7_0_pubkey *pubkey, - const rustsecp256k1_v0_7_0_ecdsa_recoverable_signature *sig, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_ecdsa_recover( + const rustsecp256k1_v0_8_0_context* ctx, + rustsecp256k1_v0_8_0_pubkey *pubkey, + const rustsecp256k1_v0_8_0_ecdsa_recoverable_signature *sig, const unsigned char *msghash32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); diff --git a/secp256k1-sys/depend/secp256k1/include/secp256k1_schnorrsig.h b/secp256k1-sys/depend/secp256k1/include/secp256k1_schnorrsig.h index b818051a2..b1545e4a1 100644 --- a/secp256k1-sys/depend/secp256k1/include/secp256k1_schnorrsig.h +++ b/secp256k1-sys/depend/secp256k1/include/secp256k1_schnorrsig.h @@ -15,7 +15,7 @@ extern "C" { /** A pointer to a function to deterministically generate a nonce. * - * Same as rustsecp256k1_v0_7_0_nonce function with the exception of accepting an + * Same as rustsecp256k1_v0_8_0_nonce function with the exception of accepting an * additional pubkey argument and not requiring an attempt argument. The pubkey * argument can protect signature schemes with key-prefixed challenge hash * inputs against reusing the nonce when signing with the wrong precomputed @@ -38,7 +38,7 @@ extern "C" { * Except for test cases, this function should compute some cryptographic hash of * the message, the key, the pubkey, the algorithm description, and data. */ -typedef int (*rustsecp256k1_v0_7_0_nonce_function_hardened)( +typedef int (*rustsecp256k1_v0_8_0_nonce_function_hardened)( unsigned char *nonce32, const unsigned char *msg, size_t msglen, @@ -61,7 +61,7 @@ typedef int (*rustsecp256k1_v0_7_0_nonce_function_hardened)( * Therefore, to create BIP-340 compliant signatures, algo must be set to * "BIP0340/nonce" and algolen to 13. */ -SECP256K1_API extern const rustsecp256k1_v0_7_0_nonce_function_hardened rustsecp256k1_v0_7_0_nonce_function_bip340; +SECP256K1_API extern const rustsecp256k1_v0_8_0_nonce_function_hardened rustsecp256k1_v0_8_0_nonce_function_bip340; /** Data structure that contains additional arguments for schnorrsig_sign_custom. * @@ -73,17 +73,17 @@ SECP256K1_API extern const rustsecp256k1_v0_7_0_nonce_function_hardened rustsecp * and has no other function than making sure the object is * initialized. * noncefp: pointer to a nonce generation function. If NULL, - * rustsecp256k1_v0_7_0_nonce_function_bip340 is used + * rustsecp256k1_v0_8_0_nonce_function_bip340 is used * ndata: pointer to arbitrary data used by the nonce generation function * (can be NULL). If it is non-NULL and - * rustsecp256k1_v0_7_0_nonce_function_bip340 is used, then ndata must be a + * rustsecp256k1_v0_8_0_nonce_function_bip340 is used, then ndata must be a * pointer to 32-byte auxiliary randomness as per BIP-340. */ typedef struct { unsigned char magic[4]; - rustsecp256k1_v0_7_0_nonce_function_hardened noncefp; + rustsecp256k1_v0_8_0_nonce_function_hardened noncefp; void* ndata; -} rustsecp256k1_v0_7_0_schnorrsig_extraparams; +} rustsecp256k1_v0_8_0_schnorrsig_extraparams; #define SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC { 0xda, 0x6f, 0xb3, 0x8c } #define SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT {\ @@ -95,18 +95,18 @@ typedef struct { /** Create a Schnorr signature. * * Does _not_ strictly follow BIP-340 because it does not verify the resulting - * signature. Instead, you can manually use rustsecp256k1_v0_7_0_schnorrsig_verify and + * signature. Instead, you can manually use rustsecp256k1_v0_8_0_schnorrsig_verify and * abort if it fails. * * This function only signs 32-byte messages. If you have messages of a * different size (or the same size but without a context-specific tag * prefix), it is recommended to create a 32-byte message hash with - * rustsecp256k1_v0_7_0_tagged_sha256 and then sign the hash. Tagged hashing allows + * rustsecp256k1_v0_8_0_tagged_sha256 and then sign the hash. Tagged hashing allows * providing an context-specific tag for domain separation. This prevents * signatures from being valid in multiple contexts by accident. * * Returns 1 on success, 0 on failure. - * Args: ctx: pointer to a context object, initialized for signing. + * Args: ctx: pointer to a context object (not rustsecp256k1_v0_8_0_context_static). * Out: sig64: pointer to a 64-byte array to store the serialized signature. * In: msg32: the 32-byte message being signed. * keypair: pointer to an initialized keypair. @@ -116,17 +116,28 @@ typedef struct { * BIP-340 "Default Signing" for a full explanation of this * argument and for guidance if randomness is expensive. */ -SECP256K1_API int rustsecp256k1_v0_7_0_schnorrsig_sign( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_8_0_schnorrsig_sign32( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *sig64, const unsigned char *msg32, - const rustsecp256k1_v0_7_0_keypair *keypair, + const rustsecp256k1_v0_8_0_keypair *keypair, const unsigned char *aux_rand32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); +/** Same as rustsecp256k1_v0_8_0_schnorrsig_sign32, but DEPRECATED. Will be removed in + * future versions. */ +SECP256K1_API int rustsecp256k1_v0_8_0_schnorrsig_sign( + const rustsecp256k1_v0_8_0_context* ctx, + unsigned char *sig64, + const unsigned char *msg32, + const rustsecp256k1_v0_8_0_keypair *keypair, + const unsigned char *aux_rand32 +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) + SECP256K1_DEPRECATED("Use rustsecp256k1_v0_8_0_schnorrsig_sign32 instead"); + /** Create a Schnorr signature with a more flexible API. * - * Same arguments as rustsecp256k1_v0_7_0_schnorrsig_sign except that it allows signing + * Same arguments as rustsecp256k1_v0_8_0_schnorrsig_sign except that it allows signing * variable length messages and accepts a pointer to an extraparams object that * allows customizing signing by passing additional arguments. * @@ -137,31 +148,31 @@ SECP256K1_API int rustsecp256k1_v0_7_0_schnorrsig_sign( * msglen: length of the message * extraparams: pointer to a extraparams object (can be NULL) */ -SECP256K1_API int rustsecp256k1_v0_7_0_schnorrsig_sign_custom( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API int rustsecp256k1_v0_8_0_schnorrsig_sign_custom( + const rustsecp256k1_v0_8_0_context* ctx, unsigned char *sig64, const unsigned char *msg, size_t msglen, - const rustsecp256k1_v0_7_0_keypair *keypair, - rustsecp256k1_v0_7_0_schnorrsig_extraparams *extraparams + const rustsecp256k1_v0_8_0_keypair *keypair, + rustsecp256k1_v0_8_0_schnorrsig_extraparams *extraparams ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(5); /** Verify a Schnorr signature. * * Returns: 1: correct signature * 0: incorrect signature - * Args: ctx: a secp256k1 context object, initialized for verification. + * Args: ctx: a secp256k1 context object. * In: sig64: pointer to the 64-byte signature to verify. * msg: the message being verified. Can only be NULL if msglen is 0. * msglen: length of the message * pubkey: pointer to an x-only public key to verify with (cannot be NULL) */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_7_0_schnorrsig_verify( - const rustsecp256k1_v0_7_0_context* ctx, +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1_v0_8_0_schnorrsig_verify( + const rustsecp256k1_v0_8_0_context* ctx, const unsigned char *sig64, const unsigned char *msg, size_t msglen, - const rustsecp256k1_v0_7_0_xonly_pubkey *pubkey + const rustsecp256k1_v0_8_0_xonly_pubkey *pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(5); #ifdef __cplusplus diff --git a/secp256k1-sys/depend/secp256k1/sage/gen_exhaustive_groups.sage b/secp256k1-sys/depend/secp256k1/sage/gen_exhaustive_groups.sage index 7634dd92d..0dfcbbd59 100644 --- a/secp256k1-sys/depend/secp256k1/sage/gen_exhaustive_groups.sage +++ b/secp256k1-sys/depend/secp256k1/sage/gen_exhaustive_groups.sage @@ -1,4 +1,4 @@ -load("rustsecp256k1_v0_7_0_params.sage") +load("rustsecp256k1_v0_8_0_params.sage") orders_done = set() results = {} @@ -95,13 +95,13 @@ for f in sorted(results.keys()): G = results[f]["G"] print("# %s EXHAUSTIVE_TEST_ORDER == %i" % ("if" if first else "elif", f)) first = False - print("static const rustsecp256k1_v0_7_0_ge rustsecp256k1_v0_7_0_ge_const_g = SECP256K1_GE_CONST(") + print("static const rustsecp256k1_v0_8_0_ge rustsecp256k1_v0_8_0_ge_const_g = SECP256K1_GE_CONST(") print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4))) print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8))) print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4))) print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x" % tuple((int(G[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8))) print(");") - print("static const rustsecp256k1_v0_7_0_fe rustsecp256k1_v0_7_0_fe_const_b = SECP256K1_FE_CONST(") + print("static const rustsecp256k1_v0_8_0_fe rustsecp256k1_v0_8_0_fe_const_b = SECP256K1_FE_CONST(") print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(b) >> (32 * (7 - i))) & 0xffffffff for i in range(4))) print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x" % tuple((int(b) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8))) print(");") diff --git a/secp256k1-sys/depend/secp256k1/sage/gen_split_lambda_constants.sage b/secp256k1-sys/depend/secp256k1/sage/gen_split_lambda_constants.sage index 6a5776592..3e644cf49 100644 --- a/secp256k1-sys/depend/secp256k1/sage/gen_split_lambda_constants.sage +++ b/secp256k1-sys/depend/secp256k1/sage/gen_split_lambda_constants.sage @@ -1,9 +1,9 @@ -""" Generates the constants used in rustsecp256k1_v0_7_0_scalar_split_lambda. +""" Generates the constants used in rustsecp256k1_v0_8_0_scalar_split_lambda. -See the comments for rustsecp256k1_v0_7_0_scalar_split_lambda in src/scalar_impl.h for detailed explanations. +See the comments for rustsecp256k1_v0_8_0_scalar_split_lambda in src/scalar_impl.h for detailed explanations. """ -load("rustsecp256k1_v0_7_0_params.sage") +load("rustsecp256k1_v0_8_0_params.sage") def inf_norm(v): """Returns the infinity norm of a vector.""" @@ -24,17 +24,17 @@ def gauss_reduction(i1, i2): v2[1] -= m*v1[1] def find_split_constants_gauss(): - """Find constants for rustsecp256k1_v0_7_0_scalar_split_lamdba using gauss reduction.""" + """Find constants for rustsecp256k1_v0_8_0_scalar_split_lamdba using gauss reduction.""" (v11, v12), (v21, v22) = gauss_reduction([0, N], [1, int(LAMBDA)]) - # We use related vectors in rustsecp256k1_v0_7_0_scalar_split_lambda. + # We use related vectors in rustsecp256k1_v0_8_0_scalar_split_lambda. A1, B1 = -v21, -v11 A2, B2 = v22, -v21 return A1, B1, A2, B2 def find_split_constants_explicit_tof(): - """Find constants for rustsecp256k1_v0_7_0_scalar_split_lamdba using the trace of Frobenius. + """Find constants for rustsecp256k1_v0_8_0_scalar_split_lamdba using the trace of Frobenius. See Benjamin Smith: "Easy scalar decompositions for efficient scalar multiplication on elliptic curves and genus 2 Jacobians" (https://eprint.iacr.org/2013/672), Example 2 @@ -51,7 +51,7 @@ def find_split_constants_explicit_tof(): A2 = Integer((t + c)/2 - 1) B2 = Integer(1 - (t - c)/2) - # We use a negated b values in rustsecp256k1_v0_7_0_scalar_split_lambda. + # We use a negated b values in rustsecp256k1_v0_8_0_scalar_split_lambda. B1, B2 = -B1, -B2 return A1, B1, A2, B2 @@ -90,7 +90,7 @@ def rnddiv2(v): return v >> 1 def scalar_lambda_split(k): - """Equivalent to rustsecp256k1_v0_7_0_scalar_lambda_split().""" + """Equivalent to rustsecp256k1_v0_8_0_scalar_lambda_split().""" c1 = rnddiv2((k * G1) >> 383) c2 = rnddiv2((k * G2) >> 383) c1 = (c1 * -B1) % N diff --git a/secp256k1-sys/depend/secp256k1/sage/group_prover.sage b/secp256k1-sys/depend/secp256k1/sage/group_prover.sage index b200bfeae..9305c215d 100644 --- a/secp256k1-sys/depend/secp256k1/sage/group_prover.sage +++ b/secp256k1-sys/depend/secp256k1/sage/group_prover.sage @@ -164,6 +164,9 @@ class constraints: def negate(self): return constraints(zero=self.nonzero, nonzero=self.zero) + def map(self, fun): + return constraints(zero={fun(k): v for k, v in self.zero.items()}, nonzero={fun(k): v for k, v in self.nonzero.items()}) + def __add__(self, other): zero = self.zero.copy() zero.update(other.zero) @@ -177,6 +180,30 @@ class constraints: def __repr__(self): return "%s" % self +def normalize_factor(p): + """Normalizes the sign of primitive polynomials (as returned by factor()) + + This function ensures that the polynomial has a positive leading coefficient. + + This is necessary because recent sage versions (starting with v9.3 or v9.4, + we don't know) are inconsistent about the placement of the minus sign in + polynomial factorizations: + ``` + sage: R. = PolynomialRing(QQ,8,order='invlex') + sage: R((-2 * (bx - ax)) ^ 1).factor() + (-2) * (bx - ax) + sage: R((-2 * (bx - ax)) ^ 2).factor() + (4) * (-bx + ax)^2 + sage: R((-2 * (bx - ax)) ^ 3).factor() + (8) * (-bx + ax)^3 + ``` + """ + # Assert p is not 0 and that its non-zero coeffients are coprime. + # (We could just work with the primitive part p/p.content() but we want to be + # aware if factor() does not return a primitive part in future sage versions.) + assert p.content() == 1 + # Ensure that the first non-zero coefficient is positive. + return p if p.lc() > 0 else -p def conflicts(R, con): """Check whether any of the passed non-zero assumptions is implied by the zero assumptions""" @@ -204,10 +231,10 @@ def get_nonzero_set(R, assume): nonzero = set() for nz in map(numerator, assume.nonzero): for (f,n) in nz.factor(): - nonzero.add(f) + nonzero.add(normalize_factor(f)) rnz = zero.reduce(nz) for (f,n) in rnz.factor(): - nonzero.add(f) + nonzero.add(normalize_factor(f)) return nonzero @@ -222,27 +249,27 @@ def prove_nonzero(R, exprs, assume): return (False, [exprs[expr]]) allexprs = reduce(lambda a,b: numerator(a)*numerator(b), exprs, 1) for (f, n) in allexprs.factor(): - if f not in nonzero: + if normalize_factor(f) not in nonzero: ok = False if ok: return (True, None) ok = True - for (f, n) in zero.reduce(numerator(allexprs)).factor(): - if f not in nonzero: + for (f, n) in zero.reduce(allexprs).factor(): + if normalize_factor(f) not in nonzero: ok = False if ok: return (True, None) ok = True for expr in exprs: for (f,n) in numerator(expr).factor(): - if f not in nonzero: + if normalize_factor(f) not in nonzero: ok = False if ok: return (True, None) ok = True for expr in exprs: for (f,n) in zero.reduce(numerator(expr)).factor(): - if f not in nonzero: + if normalize_factor(f) not in nonzero: expl.add(exprs[expr]) if expl: return (False, list(expl)) @@ -254,7 +281,7 @@ def prove_zero(R, exprs, assume): """Check whether all of the passed expressions are provably zero, given assumptions""" r, e = prove_nonzero(R, dict(map(lambda x: (fastfrac(R, x.bot, 1), exprs[x]), exprs)), assume) if not r: - return (False, map(lambda x: "Possibly zero denominator: %s" % x, e)) + return (False, list(map(lambda x: "Possibly zero denominator: %s" % x, e))) zero = R.ideal(list(map(numerator, assume.zero))) nonzero = prod(x for x in assume.nonzero) expl = [] @@ -279,8 +306,8 @@ def describe_extra(R, assume, assumeExtra): if base not in zero: add = [] for (f, n) in numerator(base).factor(): - if f not in nonzero: - add += ["%s" % f] + if normalize_factor(f) not in nonzero: + add += ["%s" % normalize_factor(f)] if add: ret.add((" * ".join(add)) + " = 0 [%s]" % assumeExtra.zero[base]) # Iterate over the extra nonzero expressions @@ -288,8 +315,8 @@ def describe_extra(R, assume, assumeExtra): nzr = zeroextra.reduce(numerator(nz)) if nzr not in zeroextra: for (f,n) in nzr.factor(): - if zeroextra.reduce(f) not in nonzero: - ret.add("%s != 0" % zeroextra.reduce(f)) + if normalize_factor(zeroextra.reduce(f)) not in nonzero: + ret.add("%s != 0" % normalize_factor(zeroextra.reduce(f))) return ", ".join(x for x in ret) @@ -299,22 +326,21 @@ def check_symbolic(R, assumeLaw, assumeAssert, assumeBranch, require): if conflicts(R, assume): # This formula does not apply - return None + return (True, None) describe = describe_extra(R, assumeLaw + assumeBranch, assumeAssert) + if describe != "": + describe = " (assuming " + describe + ")" ok, msg = prove_zero(R, require.zero, assume) if not ok: - return "FAIL, %s fails (assuming %s)" % (str(msg), describe) + return (False, "FAIL, %s fails%s" % (str(msg), describe)) res, expl = prove_nonzero(R, require.nonzero, assume) if not res: - return "FAIL, %s fails (assuming %s)" % (str(expl), describe) + return (False, "FAIL, %s fails%s" % (str(expl), describe)) - if describe != "": - return "OK (assuming %s)" % describe - else: - return "OK" + return (True, "OK%s" % describe) def concrete_verify(c): diff --git a/secp256k1-sys/depend/secp256k1/sage/prove_group_implementations.sage b/secp256k1-sys/depend/secp256k1/sage/prove_group_implementations.sage index 3f0e6bd70..a945a9d33 100644 --- a/secp256k1-sys/depend/secp256k1/sage/prove_group_implementations.sage +++ b/secp256k1-sys/depend/secp256k1/sage/prove_group_implementations.sage @@ -5,32 +5,27 @@ import sys load("group_prover.sage") load("weierstrass_prover.sage") -def formula_rustsecp256k1_v0_7_0_gej_double_var(a): - """libsecp256k1's rustsecp256k1_v0_7_0_gej_double_var, used by various addition functions""" +def formula_rustsecp256k1_v0_8_0_gej_double_var(a): + """libsecp256k1's rustsecp256k1_v0_8_0_gej_double_var, used by various addition functions""" rz = a.Z * a.Y - rz = rz * 2 - t1 = a.X^2 - t1 = t1 * 3 - t2 = t1^2 - t3 = a.Y^2 - t3 = t3 * 2 - t4 = t3^2 - t4 = t4 * 2 - t3 = t3 * a.X - rx = t3 - rx = rx * 4 - rx = -rx - rx = rx + t2 - t2 = -t2 - t3 = t3 * 6 - t3 = t3 + t2 - ry = t1 * t3 - t2 = -t4 - ry = ry + t2 + s = a.Y^2 + l = a.X^2 + l = l * 3 + l = l / 2 + t = -s + t = t * a.X + rx = l^2 + rx = rx + t + rx = rx + t + s = s^2 + t = t + rx + ry = t * l + ry = ry + s + ry = -ry return jacobianpoint(rx, ry, rz) -def formula_rustsecp256k1_v0_7_0_gej_add_var(branch, a, b): - """libsecp256k1's rustsecp256k1_v0_7_0_gej_add_var""" +def formula_rustsecp256k1_v0_8_0_gej_add_var(branch, a, b): + """libsecp256k1's rustsecp256k1_v0_8_0_gej_add_var""" if branch == 0: return (constraints(), constraints(nonzero={a.Infinity : 'a_infinite'}), b) if branch == 1: @@ -45,34 +40,31 @@ def formula_rustsecp256k1_v0_7_0_gej_add_var(branch, a, b): s2 = s2 * a.Z h = -u1 h = h + u2 - i = -s1 - i = i + s2 + i = -s2 + i = i + s1 if branch == 2: - r = formula_rustsecp256k1_v0_7_0_gej_double_var(a) + r = formula_rustsecp256k1_v0_8_0_gej_double_var(a) return (constraints(), constraints(zero={h : 'h=0', i : 'i=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}), r) if branch == 3: return (constraints(), constraints(zero={h : 'h=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={i : 'i!=0'}), point_at_infinity()) - i2 = i^2 + t = h * b.Z + rz = a.Z * t h2 = h^2 + h2 = -h2 h3 = h2 * h - h = h * b.Z - rz = a.Z * h t = u1 * h2 - rx = t - rx = rx * 2 + rx = i^2 rx = rx + h3 - rx = -rx - rx = rx + i2 - ry = -rx - ry = ry + t - ry = ry * i + rx = rx + t + rx = rx + t + t = t + rx + ry = t * i h3 = h3 * s1 - h3 = -h3 ry = ry + h3 return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) -def formula_rustsecp256k1_v0_7_0_gej_add_ge_var(branch, a, b): - """libsecp256k1's rustsecp256k1_v0_7_0_gej_add_ge_var, which assume bz==1""" +def formula_rustsecp256k1_v0_8_0_gej_add_ge_var(branch, a, b): + """libsecp256k1's rustsecp256k1_v0_8_0_gej_add_ge_var, which assume bz==1""" if branch == 0: return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(nonzero={a.Infinity : 'a_infinite'}), b) if branch == 1: @@ -85,43 +77,41 @@ def formula_rustsecp256k1_v0_7_0_gej_add_ge_var(branch, a, b): s2 = s2 * a.Z h = -u1 h = h + u2 - i = -s1 - i = i + s2 + i = -s2 + i = i + s1 if (branch == 2): - r = formula_rustsecp256k1_v0_7_0_gej_double_var(a) + r = formula_rustsecp256k1_v0_8_0_gej_double_var(a) return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r) if (branch == 3): return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity()) - i2 = i^2 - h2 = h^2 - h3 = h * h2 rz = a.Z * h + h2 = h^2 + h2 = -h2 + h3 = h2 * h t = u1 * h2 - rx = t - rx = rx * 2 + rx = i^2 rx = rx + h3 - rx = -rx - rx = rx + i2 - ry = -rx - ry = ry + t - ry = ry * i + rx = rx + t + rx = rx + t + t = t + rx + ry = t * i h3 = h3 * s1 - h3 = -h3 ry = ry + h3 return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) -def formula_rustsecp256k1_v0_7_0_gej_add_zinv_var(branch, a, b): - """libsecp256k1's rustsecp256k1_v0_7_0_gej_add_zinv_var""" +def formula_rustsecp256k1_v0_8_0_gej_add_zinv_var(branch, a, b): + """libsecp256k1's rustsecp256k1_v0_8_0_gej_add_zinv_var""" bzinv = b.Z^(-1) if branch == 0: - return (constraints(), constraints(nonzero={b.Infinity : 'b_infinite'}), a) - if branch == 1: + rinf = b.Infinity bzinv2 = bzinv^2 bzinv3 = bzinv2 * bzinv rx = b.X * bzinv2 ry = b.Y * bzinv3 rz = 1 - return (constraints(), constraints(zero={b.Infinity : 'b_finite'}, nonzero={a.Infinity : 'a_infinite'}), jacobianpoint(rx, ry, rz)) + return (constraints(), constraints(nonzero={a.Infinity : 'a_infinite'}), jacobianpoint(rx, ry, rz, rinf)) + if branch == 1: + return (constraints(), constraints(zero={a.Infinity : 'a_finite'}, nonzero={b.Infinity : 'b_infinite'}), a) azz = a.Z * bzinv z12 = azz^2 u1 = a.X @@ -131,34 +121,30 @@ def formula_rustsecp256k1_v0_7_0_gej_add_zinv_var(branch, a, b): s2 = s2 * azz h = -u1 h = h + u2 - i = -s1 - i = i + s2 + i = -s2 + i = i + s1 if branch == 2: - r = formula_rustsecp256k1_v0_7_0_gej_double_var(a) + r = formula_rustsecp256k1_v0_8_0_gej_double_var(a) return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r) if branch == 3: return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity()) - i2 = i^2 + rz = a.Z * h h2 = h^2 - h3 = h * h2 - rz = a.Z - rz = rz * h + h2 = -h2 + h3 = h2 * h t = u1 * h2 - rx = t - rx = rx * 2 + rx = i^2 rx = rx + h3 - rx = -rx - rx = rx + i2 - ry = -rx - ry = ry + t - ry = ry * i + rx = rx + t + rx = rx + t + t = t + rx + ry = t * i h3 = h3 * s1 - h3 = -h3 ry = ry + h3 return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) -def formula_rustsecp256k1_v0_7_0_gej_add_ge(branch, a, b): - """libsecp256k1's rustsecp256k1_v0_7_0_gej_add_ge""" +def formula_rustsecp256k1_v0_8_0_gej_add_ge(branch, a, b): + """libsecp256k1's rustsecp256k1_v0_8_0_gej_add_ge""" zeroes = {} nonzeroes = {} a_infinity = False @@ -197,7 +183,8 @@ def formula_rustsecp256k1_v0_7_0_gej_add_ge(branch, a, b): rr_alt = rr m_alt = m n = m_alt^2 - q = n * t + q = -t + q = q * n n = n^2 if degenerate: n = m @@ -210,8 +197,6 @@ def formula_rustsecp256k1_v0_7_0_gej_add_ge(branch, a, b): zeroes.update({rz : 'r.z=0'}) else: nonzeroes.update({rz : 'r.z!=0'}) - rz = rz * 2 - q = -q t = t + q rx = t t = t * 2 @@ -219,8 +204,7 @@ def formula_rustsecp256k1_v0_7_0_gej_add_ge(branch, a, b): t = t * rr_alt t = t + n ry = -t - rx = rx * 4 - ry = ry * 4 + ry = ry / 2 if a_infinity: rx = b.X ry = b.Y @@ -229,8 +213,8 @@ def formula_rustsecp256k1_v0_7_0_gej_add_ge(branch, a, b): return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zeroes, nonzero=nonzeroes), point_at_infinity()) return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zeroes, nonzero=nonzeroes), jacobianpoint(rx, ry, rz)) -def formula_rustsecp256k1_v0_7_0_gej_add_ge_old(branch, a, b): - """libsecp256k1's old rustsecp256k1_v0_7_0_gej_add_ge, which fails when ay+by=0 but ax!=bx""" +def formula_rustsecp256k1_v0_8_0_gej_add_ge_old(branch, a, b): + """libsecp256k1's old rustsecp256k1_v0_8_0_gej_add_ge, which fails when ay+by=0 but ax!=bx""" a_infinity = (branch & 1) != 0 zero = {} nonzero = {} @@ -292,15 +276,18 @@ def formula_rustsecp256k1_v0_7_0_gej_add_ge_old(branch, a, b): return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zero, nonzero=nonzero), jacobianpoint(rx, ry, rz)) if __name__ == "__main__": - check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_7_0_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_7_0_gej_add_var) - check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_7_0_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_7_0_gej_add_ge_var) - check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_7_0_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_7_0_gej_add_zinv_var) - check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_7_0_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_7_0_gej_add_ge) - check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_7_0_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_7_0_gej_add_ge_old) + success = True + success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_8_0_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_8_0_gej_add_var) + success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_8_0_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_8_0_gej_add_ge_var) + success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_8_0_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_8_0_gej_add_zinv_var) + success = success & check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_8_0_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_8_0_gej_add_ge) + success = success & (not check_symbolic_jacobian_weierstrass("rustsecp256k1_v0_8_0_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_8_0_gej_add_ge_old)) if len(sys.argv) >= 2 and sys.argv[1] == "--exhaustive": - check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_7_0_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_7_0_gej_add_var, 43) - check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_7_0_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_7_0_gej_add_ge_var, 43) - check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_7_0_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_7_0_gej_add_zinv_var, 43) - check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_7_0_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_7_0_gej_add_ge, 43) - check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_7_0_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_7_0_gej_add_ge_old, 43) + success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_8_0_gej_add_var", 0, 7, 5, formula_rustsecp256k1_v0_8_0_gej_add_var, 43) + success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_8_0_gej_add_ge_var", 0, 7, 5, formula_rustsecp256k1_v0_8_0_gej_add_ge_var, 43) + success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_8_0_gej_add_zinv_var", 0, 7, 5, formula_rustsecp256k1_v0_8_0_gej_add_zinv_var, 43) + success = success & check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_8_0_gej_add_ge", 0, 7, 16, formula_rustsecp256k1_v0_8_0_gej_add_ge, 43) + success = success & (not check_exhaustive_jacobian_weierstrass("rustsecp256k1_v0_8_0_gej_add_ge_old [should fail]", 0, 7, 4, formula_rustsecp256k1_v0_8_0_gej_add_ge_old, 43)) + + sys.exit(int(not success)) diff --git a/secp256k1-sys/depend/secp256k1/sage/weierstrass_prover.sage b/secp256k1-sys/depend/secp256k1/sage/weierstrass_prover.sage index b770c6daf..be9cfd4c7 100644 --- a/secp256k1-sys/depend/secp256k1/sage/weierstrass_prover.sage +++ b/secp256k1-sys/depend/secp256k1/sage/weierstrass_prover.sage @@ -184,6 +184,7 @@ def check_exhaustive_jacobian_weierstrass(name, A, B, branches, formula, p): if r: points.append(point) + ret = True for za in range(1, p): for zb in range(1, p): for pa in points: @@ -211,8 +212,11 @@ def check_exhaustive_jacobian_weierstrass(name, A, B, branches, formula, p): match = True r, e = concrete_verify(require) if not r: + ret = False print(" failure in branch %i for (%s,%s,%s,%s) + (%s,%s,%s,%s) = (%s,%s,%s,%s): %s" % (branch, pA.X, pA.Y, pA.Z, pA.Infinity, pB.X, pB.Y, pB.Z, pB.Infinity, pC.X, pC.Y, pC.Z, pC.Infinity, e)) + print() + return ret def check_symbolic_function(R, assumeAssert, assumeBranch, f, A, B, pa, pb, pA, pB, pC): @@ -244,15 +248,21 @@ def check_symbolic_jacobian_weierstrass(name, A, B, branches, formula): print("Formula " + name + ":") count = 0 + ret = True for branch in range(branches): assumeFormula, assumeBranch, pC = formula(branch, pA, pB) + assumeBranch = assumeBranch.map(lift) + assumeFormula = assumeFormula.map(lift) pC.X = lift(pC.X) pC.Y = lift(pC.Y) pC.Z = lift(pC.Z) pC.Infinity = lift(pC.Infinity) for key in laws_jacobian_weierstrass: - res[key].append((check_symbolic_function(R, assumeFormula, assumeBranch, laws_jacobian_weierstrass[key], A, B, pa, pb, pA, pB, pC), branch)) + success, msg = check_symbolic_function(R, assumeFormula, assumeBranch, laws_jacobian_weierstrass[key], A, B, pa, pb, pA, pB, pC) + if not success: + ret = False + res[key].append((msg, branch)) for key in res: print(" %s:" % key) @@ -262,3 +272,4 @@ def check_symbolic_jacobian_weierstrass(name, A, B, branches, formula): print(" branch %i: %s" % (x[1], x[0])) print() + return ret diff --git a/secp256k1-sys/depend/secp256k1/src/asm/field_10x26_arm.s b/secp256k1-sys/depend/secp256k1/src/asm/field_10x26_arm.s index a9cc9db13..7e97e819f 100644 --- a/secp256k1-sys/depend/secp256k1/src/asm/field_10x26_arm.s +++ b/secp256k1-sys/depend/secp256k1/src/asm/field_10x26_arm.s @@ -27,8 +27,8 @@ Note: .set field_not_M, 0xfc000000 @ ~M = ~0x3ffffff .align 2 - .global rustsecp256k1_v0_7_0_fe_mul_inner - .type rustsecp256k1_v0_7_0_fe_mul_inner, %function + .global rustsecp256k1_v0_8_0_fe_mul_inner + .type rustsecp256k1_v0_8_0_fe_mul_inner, %function @ Arguments: @ r0 r Restrict: can overlap with a, not with b @ r1 a @@ -36,7 +36,7 @@ Note: @ Stack (total 4+10*4 = 44) @ sp + #0 saved 'r' pointer @ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9 -rustsecp256k1_v0_7_0_fe_mul_inner: +rustsecp256k1_v0_8_0_fe_mul_inner: stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14} sub sp, sp, #48 @ frame=44 + alignment str r0, [sp, #0] @ save result address, we need it only at the end @@ -511,18 +511,18 @@ rustsecp256k1_v0_7_0_fe_mul_inner: add sp, sp, #48 ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc} - .size rustsecp256k1_v0_7_0_fe_mul_inner, .-rustsecp256k1_v0_7_0_fe_mul_inner + .size rustsecp256k1_v0_8_0_fe_mul_inner, .-rustsecp256k1_v0_8_0_fe_mul_inner .align 2 - .global rustsecp256k1_v0_7_0_fe_sqr_inner - .type rustsecp256k1_v0_7_0_fe_sqr_inner, %function + .global rustsecp256k1_v0_8_0_fe_sqr_inner + .type rustsecp256k1_v0_8_0_fe_sqr_inner, %function @ Arguments: @ r0 r Can overlap with a @ r1 a @ Stack (total 4+10*4 = 44) @ sp + #0 saved 'r' pointer @ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9 -rustsecp256k1_v0_7_0_fe_sqr_inner: +rustsecp256k1_v0_8_0_fe_sqr_inner: stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14} sub sp, sp, #48 @ frame=44 + alignment str r0, [sp, #0] @ save result address, we need it only at the end @@ -909,5 +909,5 @@ rustsecp256k1_v0_7_0_fe_sqr_inner: add sp, sp, #48 ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc} - .size rustsecp256k1_v0_7_0_fe_sqr_inner, .-rustsecp256k1_v0_7_0_fe_sqr_inner + .size rustsecp256k1_v0_8_0_fe_sqr_inner, .-rustsecp256k1_v0_8_0_fe_sqr_inner diff --git a/secp256k1-sys/depend/secp256k1/src/assumptions.h b/secp256k1-sys/depend/secp256k1/src/assumptions.h index a6be45137..7bec18897 100644 --- a/secp256k1-sys/depend/secp256k1/src/assumptions.h +++ b/secp256k1-sys/depend/secp256k1/src/assumptions.h @@ -10,13 +10,16 @@ #include #include "util.h" +#if defined(SECP256K1_INT128_NATIVE) +#include "int128_native.h" +#endif /* This library, like most software, relies on a number of compiler implementation defined (but not undefined) behaviours. Although the behaviours we require are essentially universal we test them specifically here to reduce the odds of experiencing an unwelcome surprise. */ -struct rustsecp256k1_v0_7_0_assumption_checker { +struct rustsecp256k1_v0_8_0_assumption_checker { /* This uses a trick to implement a static assertion in C89: a type with an array of negative size is not allowed. */ int dummy_array[( @@ -55,7 +58,7 @@ struct rustsecp256k1_v0_7_0_assumption_checker { /* To int64_t. */ ((int64_t)(uint64_t)0xB123C456D789E012ULL == (int64_t)-(int64_t)0x4EDC3BA928761FEEULL) && -#if defined(SECP256K1_WIDEMUL_INT128) +#if defined(SECP256K1_INT128_NATIVE) ((int64_t)(((uint128_t)0xA1234567B8901234ULL << 64) + 0xC5678901D2345678ULL) == (int64_t)-(int64_t)0x3A9876FE2DCBA988ULL) && (((int64_t)(int128_t)(((uint128_t)0xB1C2D3E4F5A6B7C8ULL << 64) + 0xD9E0F1A2B3C4D5E6ULL)) == (int64_t)(uint64_t)0xD9E0F1A2B3C4D5E6ULL) && (((int64_t)(int128_t)(((uint128_t)0xABCDEF0123456789ULL << 64) + 0x0123456789ABCDEFULL)) == (int64_t)(uint64_t)0x0123456789ABCDEFULL) && @@ -71,7 +74,7 @@ struct rustsecp256k1_v0_7_0_assumption_checker { ((((int16_t)0xE9AC) >> 4) == (int16_t)(uint16_t)0xFE9A) && ((((int32_t)0x937C918A) >> 9) == (int32_t)(uint32_t)0xFFC9BE48) && ((((int64_t)0xA8B72231DF9CF4B9ULL) >> 19) == (int64_t)(uint64_t)0xFFFFF516E4463BF3ULL) && -#if defined(SECP256K1_WIDEMUL_INT128) +#if defined(SECP256K1_INT128_NATIVE) ((((int128_t)(((uint128_t)0xCD833A65684A0DBCULL << 64) + 0xB349312F71EA7637ULL)) >> 39) == (int128_t)(((uint128_t)0xFFFFFFFFFF9B0674ULL << 64) + 0xCAD0941B79669262ULL)) && #endif 1) * 2 - 1]; diff --git a/secp256k1-sys/depend/secp256k1/src/basic-config.h b/secp256k1-sys/depend/secp256k1/src/basic-config.h deleted file mode 100644 index 6f7693cb8..000000000 --- a/secp256k1-sys/depend/secp256k1/src/basic-config.h +++ /dev/null @@ -1,17 +0,0 @@ -/*********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or https://www.opensource.org/licenses/mit-license.php.* - ***********************************************************************/ - -#ifndef SECP256K1_BASIC_CONFIG_H -#define SECP256K1_BASIC_CONFIG_H - -#ifdef USE_BASIC_CONFIG - -#define ECMULT_WINDOW_SIZE 15 -#define ECMULT_GEN_PREC_BITS 4 - -#endif /* USE_BASIC_CONFIG */ - -#endif /* SECP256K1_BASIC_CONFIG_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/bench.c b/secp256k1-sys/depend/secp256k1/src/bench.c index 276e8464f..9829759ee 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench.c +++ b/secp256k1-sys/depend/secp256k1/src/bench.c @@ -57,7 +57,7 @@ void help(int default_iters) { } typedef struct { - rustsecp256k1_v0_7_0_context *ctx; + rustsecp256k1_v0_8_0_context *ctx; unsigned char msg[32]; unsigned char key[32]; unsigned char sig[72]; @@ -71,14 +71,14 @@ static void bench_verify(void* arg, int iters) { bench_verify_data* data = (bench_verify_data*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_pubkey pubkey; - rustsecp256k1_v0_7_0_ecdsa_signature sig; + rustsecp256k1_v0_8_0_pubkey pubkey; + rustsecp256k1_v0_8_0_ecdsa_signature sig; data->sig[data->siglen - 1] ^= (i & 0xFF); data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF); data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(data->ctx, &sig, data->msg, &pubkey) == (i == 0)); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(data->ctx, &sig, data->msg, &pubkey) == (i == 0)); data->sig[data->siglen - 1] ^= (i & 0xFF); data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF); data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF); @@ -86,7 +86,7 @@ static void bench_verify(void* arg, int iters) { } typedef struct { - rustsecp256k1_v0_7_0_context* ctx; + rustsecp256k1_v0_8_0_context* ctx; unsigned char msg[32]; unsigned char key[32]; } bench_sign_data; @@ -111,9 +111,9 @@ static void bench_sign_run(void* arg, int iters) { for (i = 0; i < iters; i++) { size_t siglen = 74; int j; - rustsecp256k1_v0_7_0_ecdsa_signature signature; - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(data->ctx, &signature, data->msg, data->key, NULL, NULL)); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der(data->ctx, sig, &siglen, &signature)); + rustsecp256k1_v0_8_0_ecdsa_signature signature; + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(data->ctx, &signature, data->msg, data->key, NULL, NULL)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_serialize_der(data->ctx, sig, &siglen, &signature)); for (j = 0; j < 32; j++) { data->msg[j] = sig[j]; data->key[j] = sig[j + 32]; @@ -135,8 +135,8 @@ static void bench_sign_run(void* arg, int iters) { int main(int argc, char** argv) { int i; - rustsecp256k1_v0_7_0_pubkey pubkey; - rustsecp256k1_v0_7_0_ecdsa_signature sig; + rustsecp256k1_v0_8_0_pubkey pubkey; + rustsecp256k1_v0_8_0_ecdsa_signature sig; bench_verify_data data; int d = argc == 1; @@ -164,7 +164,7 @@ int main(int argc, char** argv) { /* Check if the user tries to benchmark optional module without building it */ #ifndef ENABLE_MODULE_ECDH - if (have_flag(argc, argv, "ecdh")) { + if (have_flag(argc, argv, "ecdh")) { fprintf(stderr, "./bench: ECDH module not enabled.\n"); fprintf(stderr, "Use ./configure --enable-module-ecdh.\n\n"); return 1; @@ -172,7 +172,7 @@ int main(int argc, char** argv) { #endif #ifndef ENABLE_MODULE_RECOVERY - if (have_flag(argc, argv, "recover") || have_flag(argc, argv, "ecdsa_recover")) { + if (have_flag(argc, argv, "recover") || have_flag(argc, argv, "ecdsa_recover")) { fprintf(stderr, "./bench: Public key recovery module not enabled.\n"); fprintf(stderr, "Use ./configure --enable-module-recovery.\n\n"); return 1; @@ -180,15 +180,15 @@ int main(int argc, char** argv) { #endif #ifndef ENABLE_MODULE_SCHNORRSIG - if (have_flag(argc, argv, "schnorrsig") || have_flag(argc, argv, "schnorrsig_sign") || have_flag(argc, argv, "schnorrsig_verify")) { + if (have_flag(argc, argv, "schnorrsig") || have_flag(argc, argv, "schnorrsig_sign") || have_flag(argc, argv, "schnorrsig_verify")) { fprintf(stderr, "./bench: Schnorr signatures module not enabled.\n"); fprintf(stderr, "Use ./configure --enable-module-schnorrsig.\n\n"); return 1; } #endif - /* ECDSA verification benchmark */ - data.ctx = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + /* ECDSA benchmark */ + data.ctx = rustsecp256k1_v0_8_0_context_create(SECP256K1_CONTEXT_NONE); for (i = 0; i < 32; i++) { data.msg[i] = 1 + i; @@ -197,23 +197,18 @@ int main(int argc, char** argv) { data.key[i] = 33 + i; } data.siglen = 72; - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL)); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(data.ctx, &pubkey, data.key)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig)); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(data.ctx, &pubkey, data.key)); data.pubkeylen = 33; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); print_output_table_header_row(); if (d || have_flag(argc, argv, "ecdsa") || have_flag(argc, argv, "verify") || have_flag(argc, argv, "ecdsa_verify")) run_benchmark("ecdsa_verify", bench_verify, NULL, NULL, &data, 10, iters); - rustsecp256k1_v0_7_0_context_destroy(data.ctx); - - /* ECDSA signing benchmark */ - data.ctx = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_SIGN); - if (d || have_flag(argc, argv, "ecdsa") || have_flag(argc, argv, "sign") || have_flag(argc, argv, "ecdsa_sign")) run_benchmark("ecdsa_sign", bench_sign_run, bench_sign_setup, NULL, &data, 10, iters); - rustsecp256k1_v0_7_0_context_destroy(data.ctx); + rustsecp256k1_v0_8_0_context_destroy(data.ctx); #ifdef ENABLE_MODULE_ECDH /* ECDH benchmarks */ diff --git a/secp256k1-sys/depend/secp256k1/src/bench.h b/secp256k1-sys/depend/secp256k1/src/bench.h index aa275fe91..611ba11f0 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench.h +++ b/secp256k1-sys/depend/secp256k1/src/bench.h @@ -7,15 +7,31 @@ #ifndef SECP256K1_BENCH_H #define SECP256K1_BENCH_H +#include #include #include #include -#include "sys/time.h" + +#if (defined(_MSC_VER) && _MSC_VER >= 1900) +# include +#else +# include "sys/time.h" +#endif static int64_t gettime_i64(void) { +#if (defined(_MSC_VER) && _MSC_VER >= 1900) + /* C11 way to get wallclock time */ + struct timespec tv; + if (!timespec_get(&tv, TIME_UTC)) { + fputs("timespec_get failed!", stderr); + exit(1); + } + return (int64_t)tv.tv_nsec / 1000 + (int64_t)tv.tv_sec * 1000000LL; +#else struct timeval tv; gettimeofday(&tv, NULL); return (int64_t)tv.tv_usec + (int64_t)tv.tv_sec * 1000000LL; +#endif } #define FP_EXP (6) diff --git a/secp256k1-sys/depend/secp256k1/src/bench_ecmult.c b/secp256k1-sys/depend/secp256k1/src/bench_ecmult.c index 147786df4..1372fe1eb 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench_ecmult.c +++ b/secp256k1-sys/depend/secp256k1/src/bench_ecmult.c @@ -35,14 +35,14 @@ void help(char **argv) { typedef struct { /* Setup once in advance */ - rustsecp256k1_v0_7_0_context* ctx; - rustsecp256k1_v0_7_0_scratch_space* scratch; - rustsecp256k1_v0_7_0_scalar* scalars; - rustsecp256k1_v0_7_0_ge* pubkeys; - rustsecp256k1_v0_7_0_gej* pubkeys_gej; - rustsecp256k1_v0_7_0_scalar* seckeys; - rustsecp256k1_v0_7_0_gej* expected_output; - rustsecp256k1_v0_7_0_ecmult_multi_func ecmult_multi; + rustsecp256k1_v0_8_0_context* ctx; + rustsecp256k1_v0_8_0_scratch_space* scratch; + rustsecp256k1_v0_8_0_scalar* scalars; + rustsecp256k1_v0_8_0_ge* pubkeys; + rustsecp256k1_v0_8_0_gej* pubkeys_gej; + rustsecp256k1_v0_8_0_scalar* seckeys; + rustsecp256k1_v0_8_0_gej* expected_output; + rustsecp256k1_v0_8_0_ecmult_multi_func ecmult_multi; /* Changes per benchmark */ size_t count; @@ -54,7 +54,7 @@ typedef struct { size_t offset2; /* Benchmark output. */ - rustsecp256k1_v0_7_0_gej* output; + rustsecp256k1_v0_8_0_gej* output; } bench_data; /* Hashes x into [0, POINTS) twice and store the result in offset1 and offset2. */ @@ -67,26 +67,24 @@ static void hash_into_offset(bench_data* data, size_t x) { * sum(outputs) ?= (sum(scalars_gen) + sum(seckeys)*sum(scalars))*G */ static void bench_ecmult_teardown_helper(bench_data* data, size_t* seckey_offset, size_t* scalar_offset, size_t* scalar_gen_offset, int iters) { int i; - rustsecp256k1_v0_7_0_gej sum_output, tmp; - rustsecp256k1_v0_7_0_scalar sum_scalars; + rustsecp256k1_v0_8_0_gej sum_output, tmp; + rustsecp256k1_v0_8_0_scalar sum_scalars; - rustsecp256k1_v0_7_0_gej_set_infinity(&sum_output); - rustsecp256k1_v0_7_0_scalar_clear(&sum_scalars); + rustsecp256k1_v0_8_0_gej_set_infinity(&sum_output); + rustsecp256k1_v0_8_0_scalar_clear(&sum_scalars); for (i = 0; i < iters; ++i) { - rustsecp256k1_v0_7_0_gej_add_var(&sum_output, &sum_output, &data->output[i], NULL); + rustsecp256k1_v0_8_0_gej_add_var(&sum_output, &sum_output, &data->output[i], NULL); if (scalar_gen_offset != NULL) { - rustsecp256k1_v0_7_0_scalar_add(&sum_scalars, &sum_scalars, &data->scalars[(*scalar_gen_offset+i) % POINTS]); + rustsecp256k1_v0_8_0_scalar_add(&sum_scalars, &sum_scalars, &data->scalars[(*scalar_gen_offset+i) % POINTS]); } if (seckey_offset != NULL) { - rustsecp256k1_v0_7_0_scalar s = data->seckeys[(*seckey_offset+i) % POINTS]; - rustsecp256k1_v0_7_0_scalar_mul(&s, &s, &data->scalars[(*scalar_offset+i) % POINTS]); - rustsecp256k1_v0_7_0_scalar_add(&sum_scalars, &sum_scalars, &s); + rustsecp256k1_v0_8_0_scalar s = data->seckeys[(*seckey_offset+i) % POINTS]; + rustsecp256k1_v0_8_0_scalar_mul(&s, &s, &data->scalars[(*scalar_offset+i) % POINTS]); + rustsecp256k1_v0_8_0_scalar_add(&sum_scalars, &sum_scalars, &s); } } - rustsecp256k1_v0_7_0_ecmult_gen(&data->ctx->ecmult_gen_ctx, &tmp, &sum_scalars); - rustsecp256k1_v0_7_0_gej_neg(&tmp, &tmp); - rustsecp256k1_v0_7_0_gej_add_var(&tmp, &tmp, &sum_output, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&tmp)); + rustsecp256k1_v0_8_0_ecmult_gen(&data->ctx->ecmult_gen_ctx, &tmp, &sum_scalars); + CHECK(rustsecp256k1_v0_8_0_gej_eq_var(&tmp, &sum_output)); } static void bench_ecmult_setup(void* arg) { @@ -101,7 +99,7 @@ static void bench_ecmult_gen(void* arg, int iters) { int i; for (i = 0; i < iters; ++i) { - rustsecp256k1_v0_7_0_ecmult_gen(&data->ctx->ecmult_gen_ctx, &data->output[i], &data->scalars[(data->offset1+i) % POINTS]); + rustsecp256k1_v0_8_0_ecmult_gen(&data->ctx->ecmult_gen_ctx, &data->output[i], &data->scalars[(data->offset1+i) % POINTS]); } } @@ -115,7 +113,7 @@ static void bench_ecmult_const(void* arg, int iters) { int i; for (i = 0; i < iters; ++i) { - rustsecp256k1_v0_7_0_ecmult_const(&data->output[i], &data->pubkeys[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], 256); + rustsecp256k1_v0_8_0_ecmult_const(&data->output[i], &data->pubkeys[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], 256); } } @@ -129,7 +127,7 @@ static void bench_ecmult_1p(void* arg, int iters) { int i; for (i = 0; i < iters; ++i) { - rustsecp256k1_v0_7_0_ecmult(&data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], NULL); + rustsecp256k1_v0_8_0_ecmult(&data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], NULL); } } @@ -140,12 +138,12 @@ static void bench_ecmult_1p_teardown(void* arg, int iters) { static void bench_ecmult_0p_g(void* arg, int iters) { bench_data* data = (bench_data*)arg; - rustsecp256k1_v0_7_0_scalar zero; + rustsecp256k1_v0_8_0_scalar zero; int i; - rustsecp256k1_v0_7_0_scalar_set_int(&zero, 0); + rustsecp256k1_v0_8_0_scalar_set_int(&zero, 0); for (i = 0; i < iters; ++i) { - rustsecp256k1_v0_7_0_ecmult(&data->output[i], NULL, &zero, &data->scalars[(data->offset1+i) % POINTS]); + rustsecp256k1_v0_8_0_ecmult(&data->output[i], NULL, &zero, &data->scalars[(data->offset1+i) % POINTS]); } } @@ -159,7 +157,7 @@ static void bench_ecmult_1p_g(void* arg, int iters) { int i; for (i = 0; i < iters/2; ++i) { - rustsecp256k1_v0_7_0_ecmult(&data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], &data->scalars[(data->offset1+i) % POINTS]); + rustsecp256k1_v0_8_0_ecmult(&data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], &data->scalars[(data->offset1+i) % POINTS]); } } @@ -185,12 +183,12 @@ static void run_ecmult_bench(bench_data* data, int iters) { run_benchmark(str, bench_ecmult_1p_g, bench_ecmult_setup, bench_ecmult_1p_g_teardown, data, 10, 2*iters); } -static int bench_ecmult_multi_callback(rustsecp256k1_v0_7_0_scalar* sc, rustsecp256k1_v0_7_0_ge* ge, size_t idx, void* arg) { +static int bench_ecmult_multi_callback(rustsecp256k1_v0_8_0_scalar* sc, rustsecp256k1_v0_8_0_ge* ge, size_t idx, void* arg) { bench_data* data = (bench_data*)arg; if (data->includes_g) ++idx; if (idx == 0) { *sc = data->scalars[data->offset1]; - *ge = rustsecp256k1_v0_7_0_ge_const_g; + *ge = rustsecp256k1_v0_8_0_ge_const_g; } else { *sc = data->scalars[(data->offset1 + idx) % POINTS]; *ge = data->pubkeys[(data->offset2 + idx - 1) % POINTS]; @@ -224,14 +222,14 @@ static void bench_ecmult_multi_teardown(void* arg, int iters) { iters = iters / data->count; /* Verify the results in teardown, to avoid doing comparisons while benchmarking. */ for (iter = 0; iter < iters; ++iter) { - rustsecp256k1_v0_7_0_gej tmp; - rustsecp256k1_v0_7_0_gej_add_var(&tmp, &data->output[iter], &data->expected_output[iter], NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&tmp)); + rustsecp256k1_v0_8_0_gej tmp; + rustsecp256k1_v0_8_0_gej_add_var(&tmp, &data->output[iter], &data->expected_output[iter], NULL); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&tmp)); } } -static void generate_scalar(uint32_t num, rustsecp256k1_v0_7_0_scalar* scalar) { - rustsecp256k1_v0_7_0_sha256 sha256; +static void generate_scalar(uint32_t num, rustsecp256k1_v0_8_0_scalar* scalar) { + rustsecp256k1_v0_8_0_sha256 sha256; unsigned char c[10] = {'e', 'c', 'm', 'u', 'l', 't', 0, 0, 0, 0}; unsigned char buf[32]; int overflow = 0; @@ -239,16 +237,16 @@ static void generate_scalar(uint32_t num, rustsecp256k1_v0_7_0_scalar* scalar) { c[7] = num >> 8; c[8] = num >> 16; c[9] = num >> 24; - rustsecp256k1_v0_7_0_sha256_initialize(&sha256); - rustsecp256k1_v0_7_0_sha256_write(&sha256, c, sizeof(c)); - rustsecp256k1_v0_7_0_sha256_finalize(&sha256, buf); - rustsecp256k1_v0_7_0_scalar_set_b32(scalar, buf, &overflow); + rustsecp256k1_v0_8_0_sha256_initialize(&sha256); + rustsecp256k1_v0_8_0_sha256_write(&sha256, c, sizeof(c)); + rustsecp256k1_v0_8_0_sha256_finalize(&sha256, buf); + rustsecp256k1_v0_8_0_scalar_set_b32(scalar, buf, &overflow); CHECK(!overflow); } static void run_ecmult_multi_bench(bench_data* data, size_t count, int includes_g, int num_iters) { char str[32]; - static const rustsecp256k1_v0_7_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + static const rustsecp256k1_v0_8_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); size_t iters = 1 + num_iters / count; size_t iter; @@ -258,15 +256,15 @@ static void run_ecmult_multi_bench(bench_data* data, size_t count, int includes_ /* Compute (the negation of) the expected results directly. */ hash_into_offset(data, data->count); for (iter = 0; iter < iters; ++iter) { - rustsecp256k1_v0_7_0_scalar tmp; - rustsecp256k1_v0_7_0_scalar total = data->scalars[(data->offset1++) % POINTS]; + rustsecp256k1_v0_8_0_scalar tmp; + rustsecp256k1_v0_8_0_scalar total = data->scalars[(data->offset1++) % POINTS]; size_t i = 0; for (i = 0; i + 1 < count; ++i) { - rustsecp256k1_v0_7_0_scalar_mul(&tmp, &data->seckeys[(data->offset2++) % POINTS], &data->scalars[(data->offset1++) % POINTS]); - rustsecp256k1_v0_7_0_scalar_add(&total, &total, &tmp); + rustsecp256k1_v0_8_0_scalar_mul(&tmp, &data->seckeys[(data->offset2++) % POINTS], &data->scalars[(data->offset1++) % POINTS]); + rustsecp256k1_v0_8_0_scalar_add(&total, &total, &tmp); } - rustsecp256k1_v0_7_0_scalar_negate(&total, &total); - rustsecp256k1_v0_7_0_ecmult(&data->expected_output[iter], NULL, &zero, &total); + rustsecp256k1_v0_8_0_scalar_negate(&total, &total); + rustsecp256k1_v0_8_0_ecmult(&data->expected_output[iter], NULL, &zero, &total); } /* Run the benchmark. */ @@ -285,7 +283,7 @@ int main(int argc, char **argv) { int iters = get_iters(10000); - data.ecmult_multi = rustsecp256k1_v0_7_0_ecmult_multi_var; + data.ecmult_multi = rustsecp256k1_v0_8_0_ecmult_multi_var; if (argc > 1) { if(have_flag(argc, argv, "-h") @@ -295,10 +293,10 @@ int main(int argc, char **argv) { return 0; } else if(have_flag(argc, argv, "pippenger_wnaf")) { printf("Using pippenger_wnaf:\n"); - data.ecmult_multi = rustsecp256k1_v0_7_0_ecmult_pippenger_batch_single; + data.ecmult_multi = rustsecp256k1_v0_8_0_ecmult_pippenger_batch_single; } else if(have_flag(argc, argv, "strauss_wnaf")) { printf("Using strauss_wnaf:\n"); - data.ecmult_multi = rustsecp256k1_v0_7_0_ecmult_strauss_batch_single; + data.ecmult_multi = rustsecp256k1_v0_8_0_ecmult_strauss_batch_single; } else if(have_flag(argc, argv, "simple")) { printf("Using simple algorithm:\n"); } else { @@ -308,33 +306,33 @@ int main(int argc, char **argv) { } } - data.ctx = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - scratch_size = rustsecp256k1_v0_7_0_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16; + data.ctx = rustsecp256k1_v0_8_0_context_create(SECP256K1_CONTEXT_NONE); + scratch_size = rustsecp256k1_v0_8_0_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16; if (!have_flag(argc, argv, "simple")) { - data.scratch = rustsecp256k1_v0_7_0_scratch_space_create(data.ctx, scratch_size); + data.scratch = rustsecp256k1_v0_8_0_scratch_space_create(data.ctx, scratch_size); } else { data.scratch = NULL; } /* Allocate stuff */ - data.scalars = malloc(sizeof(rustsecp256k1_v0_7_0_scalar) * POINTS); - data.seckeys = malloc(sizeof(rustsecp256k1_v0_7_0_scalar) * POINTS); - data.pubkeys = malloc(sizeof(rustsecp256k1_v0_7_0_ge) * POINTS); - data.pubkeys_gej = malloc(sizeof(rustsecp256k1_v0_7_0_gej) * POINTS); - data.expected_output = malloc(sizeof(rustsecp256k1_v0_7_0_gej) * (iters + 1)); - data.output = malloc(sizeof(rustsecp256k1_v0_7_0_gej) * (iters + 1)); + data.scalars = malloc(sizeof(rustsecp256k1_v0_8_0_scalar) * POINTS); + data.seckeys = malloc(sizeof(rustsecp256k1_v0_8_0_scalar) * POINTS); + data.pubkeys = malloc(sizeof(rustsecp256k1_v0_8_0_ge) * POINTS); + data.pubkeys_gej = malloc(sizeof(rustsecp256k1_v0_8_0_gej) * POINTS); + data.expected_output = malloc(sizeof(rustsecp256k1_v0_8_0_gej) * (iters + 1)); + data.output = malloc(sizeof(rustsecp256k1_v0_8_0_gej) * (iters + 1)); /* Generate a set of scalars, and private/public keypairs. */ - rustsecp256k1_v0_7_0_gej_set_ge(&data.pubkeys_gej[0], &rustsecp256k1_v0_7_0_ge_const_g); - rustsecp256k1_v0_7_0_scalar_set_int(&data.seckeys[0], 1); + rustsecp256k1_v0_8_0_gej_set_ge(&data.pubkeys_gej[0], &rustsecp256k1_v0_8_0_ge_const_g); + rustsecp256k1_v0_8_0_scalar_set_int(&data.seckeys[0], 1); for (i = 0; i < POINTS; ++i) { generate_scalar(i, &data.scalars[i]); if (i) { - rustsecp256k1_v0_7_0_gej_double_var(&data.pubkeys_gej[i], &data.pubkeys_gej[i - 1], NULL); - rustsecp256k1_v0_7_0_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]); + rustsecp256k1_v0_8_0_gej_double_var(&data.pubkeys_gej[i], &data.pubkeys_gej[i - 1], NULL); + rustsecp256k1_v0_8_0_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]); } } - rustsecp256k1_v0_7_0_ge_set_all_gej_var(data.pubkeys, data.pubkeys_gej, POINTS); + rustsecp256k1_v0_8_0_ge_set_all_gej_var(data.pubkeys, data.pubkeys_gej, POINTS); print_output_table_header_row(); @@ -358,9 +356,9 @@ int main(int argc, char **argv) { } if (data.scratch != NULL) { - rustsecp256k1_v0_7_0_scratch_space_destroy(data.ctx, data.scratch); + rustsecp256k1_v0_8_0_scratch_space_destroy(data.ctx, data.scratch); } - rustsecp256k1_v0_7_0_context_destroy(data.ctx); + rustsecp256k1_v0_8_0_context_destroy(data.ctx); free(data.scalars); free(data.pubkeys); free(data.pubkeys_gej); diff --git a/secp256k1-sys/depend/secp256k1/src/bench_internal.c b/secp256k1-sys/depend/secp256k1/src/bench_internal.c index 061a10ff2..197763fca 100644 --- a/secp256k1-sys/depend/secp256k1/src/bench_internal.c +++ b/secp256k1-sys/depend/secp256k1/src/bench_internal.c @@ -19,10 +19,10 @@ #include "bench.h" typedef struct { - rustsecp256k1_v0_7_0_scalar scalar[2]; - rustsecp256k1_v0_7_0_fe fe[4]; - rustsecp256k1_v0_7_0_ge ge[2]; - rustsecp256k1_v0_7_0_gej gej[2]; + rustsecp256k1_v0_8_0_scalar scalar[2]; + rustsecp256k1_v0_8_0_fe fe[4]; + rustsecp256k1_v0_8_0_ge ge[2]; + rustsecp256k1_v0_8_0_gej gej[2]; unsigned char data[64]; int wnaf[256]; } bench_inv; @@ -63,18 +63,18 @@ void bench_setup(void* arg) { } }; - rustsecp256k1_v0_7_0_scalar_set_b32(&data->scalar[0], init[0], NULL); - rustsecp256k1_v0_7_0_scalar_set_b32(&data->scalar[1], init[1], NULL); - rustsecp256k1_v0_7_0_fe_set_b32(&data->fe[0], init[0]); - rustsecp256k1_v0_7_0_fe_set_b32(&data->fe[1], init[1]); - rustsecp256k1_v0_7_0_fe_set_b32(&data->fe[2], init[2]); - rustsecp256k1_v0_7_0_fe_set_b32(&data->fe[3], init[3]); - CHECK(rustsecp256k1_v0_7_0_ge_set_xo_var(&data->ge[0], &data->fe[0], 0)); - CHECK(rustsecp256k1_v0_7_0_ge_set_xo_var(&data->ge[1], &data->fe[1], 1)); - rustsecp256k1_v0_7_0_gej_set_ge(&data->gej[0], &data->ge[0]); - rustsecp256k1_v0_7_0_gej_rescale(&data->gej[0], &data->fe[2]); - rustsecp256k1_v0_7_0_gej_set_ge(&data->gej[1], &data->ge[1]); - rustsecp256k1_v0_7_0_gej_rescale(&data->gej[1], &data->fe[3]); + rustsecp256k1_v0_8_0_scalar_set_b32(&data->scalar[0], init[0], NULL); + rustsecp256k1_v0_8_0_scalar_set_b32(&data->scalar[1], init[1], NULL); + rustsecp256k1_v0_8_0_fe_set_b32(&data->fe[0], init[0]); + rustsecp256k1_v0_8_0_fe_set_b32(&data->fe[1], init[1]); + rustsecp256k1_v0_8_0_fe_set_b32(&data->fe[2], init[2]); + rustsecp256k1_v0_8_0_fe_set_b32(&data->fe[3], init[3]); + CHECK(rustsecp256k1_v0_8_0_ge_set_xo_var(&data->ge[0], &data->fe[0], 0)); + CHECK(rustsecp256k1_v0_8_0_ge_set_xo_var(&data->ge[1], &data->fe[1], 1)); + rustsecp256k1_v0_8_0_gej_set_ge(&data->gej[0], &data->ge[0]); + rustsecp256k1_v0_8_0_gej_rescale(&data->gej[0], &data->fe[2]); + rustsecp256k1_v0_8_0_gej_set_ge(&data->gej[1], &data->ge[1]); + rustsecp256k1_v0_8_0_gej_rescale(&data->gej[1], &data->fe[3]); memcpy(data->data, init[0], 32); memcpy(data->data + 32, init[1], 32); } @@ -84,7 +84,7 @@ void bench_scalar_add(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - j += rustsecp256k1_v0_7_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + j += rustsecp256k1_v0_8_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(j <= iters); } @@ -94,7 +94,7 @@ void bench_scalar_negate(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_scalar_negate(&data->scalar[0], &data->scalar[0]); + rustsecp256k1_v0_8_0_scalar_negate(&data->scalar[0], &data->scalar[0]); } } @@ -103,7 +103,7 @@ void bench_scalar_mul(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_scalar_mul(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + rustsecp256k1_v0_8_0_scalar_mul(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } } @@ -112,8 +112,8 @@ void bench_scalar_split(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_scalar_split_lambda(&data->scalar[0], &data->scalar[1], &data->scalar[0]); - j += rustsecp256k1_v0_7_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + rustsecp256k1_v0_8_0_scalar_split_lambda(&data->scalar[0], &data->scalar[1], &data->scalar[0]); + j += rustsecp256k1_v0_8_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(j <= iters); } @@ -123,8 +123,8 @@ void bench_scalar_inverse(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_scalar_inverse(&data->scalar[0], &data->scalar[0]); - j += rustsecp256k1_v0_7_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + rustsecp256k1_v0_8_0_scalar_inverse(&data->scalar[0], &data->scalar[0]); + j += rustsecp256k1_v0_8_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(j <= iters); } @@ -134,18 +134,27 @@ void bench_scalar_inverse_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_scalar_inverse_var(&data->scalar[0], &data->scalar[0]); - j += rustsecp256k1_v0_7_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + rustsecp256k1_v0_8_0_scalar_inverse_var(&data->scalar[0], &data->scalar[0]); + j += rustsecp256k1_v0_8_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(j <= iters); } +void bench_field_half(void* arg, int iters) { + int i; + bench_inv *data = (bench_inv*)arg; + + for (i = 0; i < iters; i++) { + rustsecp256k1_v0_8_0_fe_half(&data->fe[0]); + } +} + void bench_field_normalize(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_fe_normalize(&data->fe[0]); + rustsecp256k1_v0_8_0_fe_normalize(&data->fe[0]); } } @@ -154,7 +163,7 @@ void bench_field_normalize_weak(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_fe_normalize_weak(&data->fe[0]); + rustsecp256k1_v0_8_0_fe_normalize_weak(&data->fe[0]); } } @@ -163,7 +172,7 @@ void bench_field_mul(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_fe_mul(&data->fe[0], &data->fe[0], &data->fe[1]); + rustsecp256k1_v0_8_0_fe_mul(&data->fe[0], &data->fe[0], &data->fe[1]); } } @@ -172,7 +181,7 @@ void bench_field_sqr(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_fe_sqr(&data->fe[0], &data->fe[0]); + rustsecp256k1_v0_8_0_fe_sqr(&data->fe[0], &data->fe[0]); } } @@ -181,8 +190,8 @@ void bench_field_inverse(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_fe_inv(&data->fe[0], &data->fe[0]); - rustsecp256k1_v0_7_0_fe_add(&data->fe[0], &data->fe[1]); + rustsecp256k1_v0_8_0_fe_inv(&data->fe[0], &data->fe[0]); + rustsecp256k1_v0_8_0_fe_add(&data->fe[0], &data->fe[1]); } } @@ -191,20 +200,20 @@ void bench_field_inverse_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_fe_inv_var(&data->fe[0], &data->fe[0]); - rustsecp256k1_v0_7_0_fe_add(&data->fe[0], &data->fe[1]); + rustsecp256k1_v0_8_0_fe_inv_var(&data->fe[0], &data->fe[0]); + rustsecp256k1_v0_8_0_fe_add(&data->fe[0], &data->fe[1]); } } void bench_field_sqrt(void* arg, int iters) { int i, j = 0; bench_inv *data = (bench_inv*)arg; - rustsecp256k1_v0_7_0_fe t; + rustsecp256k1_v0_8_0_fe t; for (i = 0; i < iters; i++) { t = data->fe[0]; - j += rustsecp256k1_v0_7_0_fe_sqrt(&data->fe[0], &t); - rustsecp256k1_v0_7_0_fe_add(&data->fe[0], &data->fe[1]); + j += rustsecp256k1_v0_8_0_fe_sqrt(&data->fe[0], &t); + rustsecp256k1_v0_8_0_fe_add(&data->fe[0], &data->fe[1]); } CHECK(j <= iters); } @@ -214,7 +223,7 @@ void bench_group_double_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_gej_double_var(&data->gej[0], &data->gej[0], NULL); + rustsecp256k1_v0_8_0_gej_double_var(&data->gej[0], &data->gej[0], NULL); } } @@ -223,7 +232,7 @@ void bench_group_add_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_gej_add_var(&data->gej[0], &data->gej[0], &data->gej[1], NULL); + rustsecp256k1_v0_8_0_gej_add_var(&data->gej[0], &data->gej[0], &data->gej[1], NULL); } } @@ -232,7 +241,7 @@ void bench_group_add_affine(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_gej_add_ge(&data->gej[0], &data->gej[0], &data->ge[1]); + rustsecp256k1_v0_8_0_gej_add_ge(&data->gej[0], &data->gej[0], &data->ge[1]); } } @@ -241,7 +250,16 @@ void bench_group_add_affine_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_gej_add_ge_var(&data->gej[0], &data->gej[0], &data->ge[1], NULL); + rustsecp256k1_v0_8_0_gej_add_ge_var(&data->gej[0], &data->gej[0], &data->ge[1], NULL); + } +} + +void bench_group_add_zinv_var(void* arg, int iters) { + int i; + bench_inv *data = (bench_inv*)arg; + + for (i = 0; i < iters; i++) { + rustsecp256k1_v0_8_0_gej_add_zinv_var(&data->gej[0], &data->gej[0], &data->ge[1], &data->gej[0].y); } } @@ -250,18 +268,18 @@ void bench_group_to_affine_var(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; ++i) { - rustsecp256k1_v0_7_0_ge_set_gej_var(&data->ge[1], &data->gej[0]); + rustsecp256k1_v0_8_0_ge_set_gej_var(&data->ge[1], &data->gej[0]); /* Use the output affine X/Y coordinates to vary the input X/Y/Z coordinates. Note that the resulting coordinates will generally not correspond to a point on the curve, but this is not a problem for the code being benchmarked here. Adding and normalizing have less overhead than EC operations (which could guarantee the point remains on the curve). */ - rustsecp256k1_v0_7_0_fe_add(&data->gej[0].x, &data->ge[1].y); - rustsecp256k1_v0_7_0_fe_add(&data->gej[0].y, &data->fe[2]); - rustsecp256k1_v0_7_0_fe_add(&data->gej[0].z, &data->ge[1].x); - rustsecp256k1_v0_7_0_fe_normalize_var(&data->gej[0].x); - rustsecp256k1_v0_7_0_fe_normalize_var(&data->gej[0].y); - rustsecp256k1_v0_7_0_fe_normalize_var(&data->gej[0].z); + rustsecp256k1_v0_8_0_fe_add(&data->gej[0].x, &data->ge[1].y); + rustsecp256k1_v0_8_0_fe_add(&data->gej[0].y, &data->fe[2]); + rustsecp256k1_v0_8_0_fe_add(&data->gej[0].z, &data->ge[1].x); + rustsecp256k1_v0_8_0_fe_normalize_var(&data->gej[0].x); + rustsecp256k1_v0_8_0_fe_normalize_var(&data->gej[0].y); + rustsecp256k1_v0_8_0_fe_normalize_var(&data->gej[0].z); } } @@ -270,8 +288,8 @@ void bench_ecmult_wnaf(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - bits += rustsecp256k1_v0_7_0_ecmult_wnaf(data->wnaf, 256, &data->scalar[0], WINDOW_A); - overflow += rustsecp256k1_v0_7_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + bits += rustsecp256k1_v0_8_0_ecmult_wnaf(data->wnaf, 256, &data->scalar[0], WINDOW_A); + overflow += rustsecp256k1_v0_8_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(overflow >= 0); CHECK(bits <= 256*iters); @@ -282,8 +300,8 @@ void bench_wnaf_const(void* arg, int iters) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { - bits += rustsecp256k1_v0_7_0_wnaf_const(data->wnaf, &data->scalar[0], WINDOW_A, 256); - overflow += rustsecp256k1_v0_7_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); + bits += rustsecp256k1_v0_8_0_wnaf_const(data->wnaf, &data->scalar[0], WINDOW_A, 256); + overflow += rustsecp256k1_v0_8_0_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(overflow >= 0); CHECK(bits <= 256*iters); @@ -293,51 +311,43 @@ void bench_wnaf_const(void* arg, int iters) { void bench_sha256(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; - rustsecp256k1_v0_7_0_sha256 sha; + rustsecp256k1_v0_8_0_sha256 sha; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_sha256_initialize(&sha); - rustsecp256k1_v0_7_0_sha256_write(&sha, data->data, 32); - rustsecp256k1_v0_7_0_sha256_finalize(&sha, data->data); + rustsecp256k1_v0_8_0_sha256_initialize(&sha); + rustsecp256k1_v0_8_0_sha256_write(&sha, data->data, 32); + rustsecp256k1_v0_8_0_sha256_finalize(&sha, data->data); } } void bench_hmac_sha256(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; - rustsecp256k1_v0_7_0_hmac_sha256 hmac; + rustsecp256k1_v0_8_0_hmac_sha256 hmac; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_hmac_sha256_initialize(&hmac, data->data, 32); - rustsecp256k1_v0_7_0_hmac_sha256_write(&hmac, data->data, 32); - rustsecp256k1_v0_7_0_hmac_sha256_finalize(&hmac, data->data); + rustsecp256k1_v0_8_0_hmac_sha256_initialize(&hmac, data->data, 32); + rustsecp256k1_v0_8_0_hmac_sha256_write(&hmac, data->data, 32); + rustsecp256k1_v0_8_0_hmac_sha256_finalize(&hmac, data->data); } } void bench_rfc6979_hmac_sha256(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256 rng; - - for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_initialize(&rng, data->data, 64); - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_generate(&rng, data->data, 32); - } -} + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256 rng; -void bench_context_verify(void* arg, int iters) { - int i; - (void)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_context_destroy(rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_VERIFY)); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_initialize(&rng, data->data, 64); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_generate(&rng, data->data, 32); } } -void bench_context_sign(void* arg, int iters) { +void bench_context(void* arg, int iters) { int i; (void)arg; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_context_destroy(rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_SIGN)); + rustsecp256k1_v0_8_0_context_destroy(rustsecp256k1_v0_8_0_context_create(SECP256K1_CONTEXT_NONE)); } } @@ -354,6 +364,7 @@ int main(int argc, char **argv) { if (d || have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, iters); if (d || have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, iters); + if (d || have_flag(argc, argv, "field") || have_flag(argc, argv, "half")) run_benchmark("field_half", bench_field_half, bench_setup, NULL, &data, 10, iters*100); if (d || have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize", bench_field_normalize, bench_setup, NULL, &data, 10, iters*100); if (d || have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize_weak", bench_field_normalize_weak, bench_setup, NULL, &data, 10, iters*100); if (d || have_flag(argc, argv, "field") || have_flag(argc, argv, "sqr")) run_benchmark("field_sqr", bench_field_sqr, bench_setup, NULL, &data, 10, iters*10); @@ -366,6 +377,7 @@ int main(int argc, char **argv) { if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_var", bench_group_add_var, bench_setup, NULL, &data, 10, iters*10); if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine", bench_group_add_affine, bench_setup, NULL, &data, 10, iters*10); if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine_var", bench_group_add_affine_var, bench_setup, NULL, &data, 10, iters*10); + if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_zinv_var", bench_group_add_zinv_var, bench_setup, NULL, &data, 10, iters*10); if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "to_affine")) run_benchmark("group_to_affine_var", bench_group_to_affine_var, bench_setup, NULL, &data, 10, iters); if (d || have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, iters); @@ -375,8 +387,7 @@ int main(int argc, char **argv) { if (d || have_flag(argc, argv, "hash") || have_flag(argc, argv, "hmac")) run_benchmark("hash_hmac_sha256", bench_hmac_sha256, bench_setup, NULL, &data, 10, iters); if (d || have_flag(argc, argv, "hash") || have_flag(argc, argv, "rng6979")) run_benchmark("hash_rfc6979_hmac_sha256", bench_rfc6979_hmac_sha256, bench_setup, NULL, &data, 10, iters); - if (d || have_flag(argc, argv, "context") || have_flag(argc, argv, "verify")) run_benchmark("context_verify", bench_context_verify, bench_setup, NULL, &data, 10, 1 + iters/1000); - if (d || have_flag(argc, argv, "context") || have_flag(argc, argv, "sign")) run_benchmark("context_sign", bench_context_sign, bench_setup, NULL, &data, 10, 1 + iters/100); + if (d || have_flag(argc, argv, "context")) run_benchmark("context_create", bench_context, bench_setup, NULL, &data, 10, iters); return 0; } diff --git a/secp256k1-sys/depend/secp256k1/src/ecdsa.h b/secp256k1-sys/depend/secp256k1/src/ecdsa.h index d8b6d1ee0..ba52241b8 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecdsa.h +++ b/secp256k1-sys/depend/secp256k1/src/ecdsa.h @@ -13,9 +13,9 @@ #include "group.h" #include "ecmult.h" -static int rustsecp256k1_v0_7_0_ecdsa_sig_parse(rustsecp256k1_v0_7_0_scalar *r, rustsecp256k1_v0_7_0_scalar *s, const unsigned char *sig, size_t size); -static int rustsecp256k1_v0_7_0_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *s); -static int rustsecp256k1_v0_7_0_ecdsa_sig_verify(const rustsecp256k1_v0_7_0_scalar* r, const rustsecp256k1_v0_7_0_scalar* s, const rustsecp256k1_v0_7_0_ge *pubkey, const rustsecp256k1_v0_7_0_scalar *message); -static int rustsecp256k1_v0_7_0_ecdsa_sig_sign(const rustsecp256k1_v0_7_0_ecmult_gen_context *ctx, rustsecp256k1_v0_7_0_scalar* r, rustsecp256k1_v0_7_0_scalar* s, const rustsecp256k1_v0_7_0_scalar *seckey, const rustsecp256k1_v0_7_0_scalar *message, const rustsecp256k1_v0_7_0_scalar *nonce, int *recid); +static int rustsecp256k1_v0_8_0_ecdsa_sig_parse(rustsecp256k1_v0_8_0_scalar *r, rustsecp256k1_v0_8_0_scalar *s, const unsigned char *sig, size_t size); +static int rustsecp256k1_v0_8_0_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *s); +static int rustsecp256k1_v0_8_0_ecdsa_sig_verify(const rustsecp256k1_v0_8_0_scalar* r, const rustsecp256k1_v0_8_0_scalar* s, const rustsecp256k1_v0_8_0_ge *pubkey, const rustsecp256k1_v0_8_0_scalar *message); +static int rustsecp256k1_v0_8_0_ecdsa_sig_sign(const rustsecp256k1_v0_8_0_ecmult_gen_context *ctx, rustsecp256k1_v0_8_0_scalar* r, rustsecp256k1_v0_8_0_scalar* s, const rustsecp256k1_v0_8_0_scalar *seckey, const rustsecp256k1_v0_8_0_scalar *message, const rustsecp256k1_v0_8_0_scalar *nonce, int *recid); #endif /* SECP256K1_ECDSA_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecdsa_impl.h b/secp256k1-sys/depend/secp256k1/src/ecdsa_impl.h index 446d87cfb..c3c3d5df3 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecdsa_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecdsa_impl.h @@ -28,7 +28,7 @@ * sage: '%x' % (EllipticCurve ([F (a), F (b)]).order()) * 'fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141' */ -static const rustsecp256k1_v0_7_0_fe rustsecp256k1_v0_7_0_ecdsa_const_order_as_fe = SECP256K1_FE_CONST( +static const rustsecp256k1_v0_8_0_fe rustsecp256k1_v0_8_0_ecdsa_const_order_as_fe = SECP256K1_FE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xBAAEDCE6UL, 0xAF48A03BUL, 0xBFD25E8CUL, 0xD0364141UL ); @@ -42,11 +42,11 @@ static const rustsecp256k1_v0_7_0_fe rustsecp256k1_v0_7_0_ecdsa_const_order_as_f * sage: '%x' % (p - EllipticCurve ([F (a), F (b)]).order()) * '14551231950b75fc4402da1722fc9baee' */ -static const rustsecp256k1_v0_7_0_fe rustsecp256k1_v0_7_0_ecdsa_const_p_minus_order = SECP256K1_FE_CONST( +static const rustsecp256k1_v0_8_0_fe rustsecp256k1_v0_8_0_ecdsa_const_p_minus_order = SECP256K1_FE_CONST( 0, 0, 0, 1, 0x45512319UL, 0x50B75FC4UL, 0x402DA172UL, 0x2FC9BAEEUL ); -static int rustsecp256k1_v0_7_0_der_read_len(size_t *len, const unsigned char **sigp, const unsigned char *sigend) { +static int rustsecp256k1_v0_8_0_der_read_len(size_t *len, const unsigned char **sigp, const unsigned char *sigend) { size_t lenleft; unsigned char b1; VERIFY_CHECK(len != NULL); @@ -99,7 +99,7 @@ static int rustsecp256k1_v0_7_0_der_read_len(size_t *len, const unsigned char ** return 1; } -static int rustsecp256k1_v0_7_0_der_parse_integer(rustsecp256k1_v0_7_0_scalar *r, const unsigned char **sig, const unsigned char *sigend) { +static int rustsecp256k1_v0_8_0_der_parse_integer(rustsecp256k1_v0_8_0_scalar *r, const unsigned char **sig, const unsigned char *sigend) { int overflow = 0; unsigned char ra[32] = {0}; size_t rlen; @@ -109,7 +109,7 @@ static int rustsecp256k1_v0_7_0_der_parse_integer(rustsecp256k1_v0_7_0_scalar *r return 0; } (*sig)++; - if (rustsecp256k1_v0_7_0_der_read_len(&rlen, sig, sigend) == 0) { + if (rustsecp256k1_v0_8_0_der_read_len(&rlen, sig, sigend) == 0) { return 0; } if (rlen == 0 || rlen > (size_t)(sigend - *sig)) { @@ -141,23 +141,23 @@ static int rustsecp256k1_v0_7_0_der_parse_integer(rustsecp256k1_v0_7_0_scalar *r } if (!overflow) { if (rlen) memcpy(ra + 32 - rlen, *sig, rlen); - rustsecp256k1_v0_7_0_scalar_set_b32(r, ra, &overflow); + rustsecp256k1_v0_8_0_scalar_set_b32(r, ra, &overflow); } if (overflow) { - rustsecp256k1_v0_7_0_scalar_set_int(r, 0); + rustsecp256k1_v0_8_0_scalar_set_int(r, 0); } (*sig) += rlen; return 1; } -static int rustsecp256k1_v0_7_0_ecdsa_sig_parse(rustsecp256k1_v0_7_0_scalar *rr, rustsecp256k1_v0_7_0_scalar *rs, const unsigned char *sig, size_t size) { +static int rustsecp256k1_v0_8_0_ecdsa_sig_parse(rustsecp256k1_v0_8_0_scalar *rr, rustsecp256k1_v0_8_0_scalar *rs, const unsigned char *sig, size_t size) { const unsigned char *sigend = sig + size; size_t rlen; if (sig == sigend || *(sig++) != 0x30) { /* The encoding doesn't start with a constructed sequence (X.690-0207 8.9.1). */ return 0; } - if (rustsecp256k1_v0_7_0_der_read_len(&rlen, &sig, sigend) == 0) { + if (rustsecp256k1_v0_8_0_der_read_len(&rlen, &sig, sigend) == 0) { return 0; } if (rlen != (size_t)(sigend - sig)) { @@ -165,10 +165,10 @@ static int rustsecp256k1_v0_7_0_ecdsa_sig_parse(rustsecp256k1_v0_7_0_scalar *rr, return 0; } - if (!rustsecp256k1_v0_7_0_der_parse_integer(rr, &sig, sigend)) { + if (!rustsecp256k1_v0_8_0_der_parse_integer(rr, &sig, sigend)) { return 0; } - if (!rustsecp256k1_v0_7_0_der_parse_integer(rs, &sig, sigend)) { + if (!rustsecp256k1_v0_8_0_der_parse_integer(rs, &sig, sigend)) { return 0; } @@ -180,12 +180,12 @@ static int rustsecp256k1_v0_7_0_ecdsa_sig_parse(rustsecp256k1_v0_7_0_scalar *rr, return 1; } -static int rustsecp256k1_v0_7_0_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_7_0_scalar* ar, const rustsecp256k1_v0_7_0_scalar* as) { +static int rustsecp256k1_v0_8_0_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const rustsecp256k1_v0_8_0_scalar* ar, const rustsecp256k1_v0_8_0_scalar* as) { unsigned char r[33] = {0}, s[33] = {0}; unsigned char *rp = r, *sp = s; size_t lenR = 33, lenS = 33; - rustsecp256k1_v0_7_0_scalar_get_b32(&r[1], ar); - rustsecp256k1_v0_7_0_scalar_get_b32(&s[1], as); + rustsecp256k1_v0_8_0_scalar_get_b32(&r[1], ar); + rustsecp256k1_v0_8_0_scalar_get_b32(&s[1], as); while (lenR > 1 && rp[0] == 0 && rp[1] < 0x80) { lenR--; rp++; } while (lenS > 1 && sp[0] == 0 && sp[1] < 0x80) { lenS--; sp++; } if (*size < 6+lenS+lenR) { @@ -204,42 +204,42 @@ static int rustsecp256k1_v0_7_0_ecdsa_sig_serialize(unsigned char *sig, size_t * return 1; } -static int rustsecp256k1_v0_7_0_ecdsa_sig_verify(const rustsecp256k1_v0_7_0_scalar *sigr, const rustsecp256k1_v0_7_0_scalar *sigs, const rustsecp256k1_v0_7_0_ge *pubkey, const rustsecp256k1_v0_7_0_scalar *message) { +static int rustsecp256k1_v0_8_0_ecdsa_sig_verify(const rustsecp256k1_v0_8_0_scalar *sigr, const rustsecp256k1_v0_8_0_scalar *sigs, const rustsecp256k1_v0_8_0_ge *pubkey, const rustsecp256k1_v0_8_0_scalar *message) { unsigned char c[32]; - rustsecp256k1_v0_7_0_scalar sn, u1, u2; + rustsecp256k1_v0_8_0_scalar sn, u1, u2; #if !defined(EXHAUSTIVE_TEST_ORDER) - rustsecp256k1_v0_7_0_fe xr; + rustsecp256k1_v0_8_0_fe xr; #endif - rustsecp256k1_v0_7_0_gej pubkeyj; - rustsecp256k1_v0_7_0_gej pr; + rustsecp256k1_v0_8_0_gej pubkeyj; + rustsecp256k1_v0_8_0_gej pr; - if (rustsecp256k1_v0_7_0_scalar_is_zero(sigr) || rustsecp256k1_v0_7_0_scalar_is_zero(sigs)) { + if (rustsecp256k1_v0_8_0_scalar_is_zero(sigr) || rustsecp256k1_v0_8_0_scalar_is_zero(sigs)) { return 0; } - rustsecp256k1_v0_7_0_scalar_inverse_var(&sn, sigs); - rustsecp256k1_v0_7_0_scalar_mul(&u1, &sn, message); - rustsecp256k1_v0_7_0_scalar_mul(&u2, &sn, sigr); - rustsecp256k1_v0_7_0_gej_set_ge(&pubkeyj, pubkey); - rustsecp256k1_v0_7_0_ecmult(&pr, &pubkeyj, &u2, &u1); - if (rustsecp256k1_v0_7_0_gej_is_infinity(&pr)) { + rustsecp256k1_v0_8_0_scalar_inverse_var(&sn, sigs); + rustsecp256k1_v0_8_0_scalar_mul(&u1, &sn, message); + rustsecp256k1_v0_8_0_scalar_mul(&u2, &sn, sigr); + rustsecp256k1_v0_8_0_gej_set_ge(&pubkeyj, pubkey); + rustsecp256k1_v0_8_0_ecmult(&pr, &pubkeyj, &u2, &u1); + if (rustsecp256k1_v0_8_0_gej_is_infinity(&pr)) { return 0; } #if defined(EXHAUSTIVE_TEST_ORDER) { - rustsecp256k1_v0_7_0_scalar computed_r; - rustsecp256k1_v0_7_0_ge pr_ge; - rustsecp256k1_v0_7_0_ge_set_gej(&pr_ge, &pr); - rustsecp256k1_v0_7_0_fe_normalize(&pr_ge.x); + rustsecp256k1_v0_8_0_scalar computed_r; + rustsecp256k1_v0_8_0_ge pr_ge; + rustsecp256k1_v0_8_0_ge_set_gej(&pr_ge, &pr); + rustsecp256k1_v0_8_0_fe_normalize(&pr_ge.x); - rustsecp256k1_v0_7_0_fe_get_b32(c, &pr_ge.x); - rustsecp256k1_v0_7_0_scalar_set_b32(&computed_r, c, NULL); - return rustsecp256k1_v0_7_0_scalar_eq(sigr, &computed_r); + rustsecp256k1_v0_8_0_fe_get_b32(c, &pr_ge.x); + rustsecp256k1_v0_8_0_scalar_set_b32(&computed_r, c, NULL); + return rustsecp256k1_v0_8_0_scalar_eq(sigr, &computed_r); } #else - rustsecp256k1_v0_7_0_scalar_get_b32(c, sigr); - rustsecp256k1_v0_7_0_fe_set_b32(&xr, c); + rustsecp256k1_v0_8_0_scalar_get_b32(c, sigr); + rustsecp256k1_v0_8_0_fe_set_b32(&xr, c); /** We now have the recomputed R point in pr, and its claimed x coordinate (modulo n) * in xr. Naively, we would extract the x coordinate from pr (requiring a inversion modulo p), @@ -255,18 +255,18 @@ static int rustsecp256k1_v0_7_0_ecdsa_sig_verify(const rustsecp256k1_v0_7_0_scal * <=> (xr * pr.z^2 mod p == pr.x) || (xr + n < p && (xr + n) * pr.z^2 mod p == pr.x) * * Thus, we can avoid the inversion, but we have to check both cases separately. - * rustsecp256k1_v0_7_0_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test. + * rustsecp256k1_v0_8_0_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test. */ - if (rustsecp256k1_v0_7_0_gej_eq_x_var(&xr, &pr)) { + if (rustsecp256k1_v0_8_0_gej_eq_x_var(&xr, &pr)) { /* xr * pr.z^2 mod p == pr.x, so the signature is valid. */ return 1; } - if (rustsecp256k1_v0_7_0_fe_cmp_var(&xr, &rustsecp256k1_v0_7_0_ecdsa_const_p_minus_order) >= 0) { + if (rustsecp256k1_v0_8_0_fe_cmp_var(&xr, &rustsecp256k1_v0_8_0_ecdsa_const_p_minus_order) >= 0) { /* xr + n >= p, so we can skip testing the second case. */ return 0; } - rustsecp256k1_v0_7_0_fe_add(&xr, &rustsecp256k1_v0_7_0_ecdsa_const_order_as_fe); - if (rustsecp256k1_v0_7_0_gej_eq_x_var(&xr, &pr)) { + rustsecp256k1_v0_8_0_fe_add(&xr, &rustsecp256k1_v0_8_0_ecdsa_const_order_as_fe); + if (rustsecp256k1_v0_8_0_gej_eq_x_var(&xr, &pr)) { /* (xr + n) * pr.z^2 mod p == pr.x, so the signature is valid. */ return 1; } @@ -274,42 +274,42 @@ static int rustsecp256k1_v0_7_0_ecdsa_sig_verify(const rustsecp256k1_v0_7_0_scal #endif } -static int rustsecp256k1_v0_7_0_ecdsa_sig_sign(const rustsecp256k1_v0_7_0_ecmult_gen_context *ctx, rustsecp256k1_v0_7_0_scalar *sigr, rustsecp256k1_v0_7_0_scalar *sigs, const rustsecp256k1_v0_7_0_scalar *seckey, const rustsecp256k1_v0_7_0_scalar *message, const rustsecp256k1_v0_7_0_scalar *nonce, int *recid) { +static int rustsecp256k1_v0_8_0_ecdsa_sig_sign(const rustsecp256k1_v0_8_0_ecmult_gen_context *ctx, rustsecp256k1_v0_8_0_scalar *sigr, rustsecp256k1_v0_8_0_scalar *sigs, const rustsecp256k1_v0_8_0_scalar *seckey, const rustsecp256k1_v0_8_0_scalar *message, const rustsecp256k1_v0_8_0_scalar *nonce, int *recid) { unsigned char b[32]; - rustsecp256k1_v0_7_0_gej rp; - rustsecp256k1_v0_7_0_ge r; - rustsecp256k1_v0_7_0_scalar n; + rustsecp256k1_v0_8_0_gej rp; + rustsecp256k1_v0_8_0_ge r; + rustsecp256k1_v0_8_0_scalar n; int overflow = 0; int high; - rustsecp256k1_v0_7_0_ecmult_gen(ctx, &rp, nonce); - rustsecp256k1_v0_7_0_ge_set_gej(&r, &rp); - rustsecp256k1_v0_7_0_fe_normalize(&r.x); - rustsecp256k1_v0_7_0_fe_normalize(&r.y); - rustsecp256k1_v0_7_0_fe_get_b32(b, &r.x); - rustsecp256k1_v0_7_0_scalar_set_b32(sigr, b, &overflow); + rustsecp256k1_v0_8_0_ecmult_gen(ctx, &rp, nonce); + rustsecp256k1_v0_8_0_ge_set_gej(&r, &rp); + rustsecp256k1_v0_8_0_fe_normalize(&r.x); + rustsecp256k1_v0_8_0_fe_normalize(&r.y); + rustsecp256k1_v0_8_0_fe_get_b32(b, &r.x); + rustsecp256k1_v0_8_0_scalar_set_b32(sigr, b, &overflow); if (recid) { /* The overflow condition is cryptographically unreachable as hitting it requires finding the discrete log * of some P where P.x >= order, and only 1 in about 2^127 points meet this criteria. */ - *recid = (overflow << 1) | rustsecp256k1_v0_7_0_fe_is_odd(&r.y); - } - rustsecp256k1_v0_7_0_scalar_mul(&n, sigr, seckey); - rustsecp256k1_v0_7_0_scalar_add(&n, &n, message); - rustsecp256k1_v0_7_0_scalar_inverse(sigs, nonce); - rustsecp256k1_v0_7_0_scalar_mul(sigs, sigs, &n); - rustsecp256k1_v0_7_0_scalar_clear(&n); - rustsecp256k1_v0_7_0_gej_clear(&rp); - rustsecp256k1_v0_7_0_ge_clear(&r); - high = rustsecp256k1_v0_7_0_scalar_is_high(sigs); - rustsecp256k1_v0_7_0_scalar_cond_negate(sigs, high); + *recid = (overflow << 1) | rustsecp256k1_v0_8_0_fe_is_odd(&r.y); + } + rustsecp256k1_v0_8_0_scalar_mul(&n, sigr, seckey); + rustsecp256k1_v0_8_0_scalar_add(&n, &n, message); + rustsecp256k1_v0_8_0_scalar_inverse(sigs, nonce); + rustsecp256k1_v0_8_0_scalar_mul(sigs, sigs, &n); + rustsecp256k1_v0_8_0_scalar_clear(&n); + rustsecp256k1_v0_8_0_gej_clear(&rp); + rustsecp256k1_v0_8_0_ge_clear(&r); + high = rustsecp256k1_v0_8_0_scalar_is_high(sigs); + rustsecp256k1_v0_8_0_scalar_cond_negate(sigs, high); if (recid) { *recid ^= high; } /* P.x = order is on the curve, so technically sig->r could end up being zero, which would be an invalid signature. * This is cryptographically unreachable as hitting it requires finding the discrete log of P.x = N. */ - return (int)(!rustsecp256k1_v0_7_0_scalar_is_zero(sigr)) & (int)(!rustsecp256k1_v0_7_0_scalar_is_zero(sigs)); + return (int)(!rustsecp256k1_v0_8_0_scalar_is_zero(sigr)) & (int)(!rustsecp256k1_v0_8_0_scalar_is_zero(sigs)); } #endif /* SECP256K1_ECDSA_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/eckey.h b/secp256k1-sys/depend/secp256k1/src/eckey.h index c4931ea5c..3bb258317 100644 --- a/secp256k1-sys/depend/secp256k1/src/eckey.h +++ b/secp256k1-sys/depend/secp256k1/src/eckey.h @@ -14,12 +14,12 @@ #include "ecmult.h" #include "ecmult_gen.h" -static int rustsecp256k1_v0_7_0_eckey_pubkey_parse(rustsecp256k1_v0_7_0_ge *elem, const unsigned char *pub, size_t size); -static int rustsecp256k1_v0_7_0_eckey_pubkey_serialize(rustsecp256k1_v0_7_0_ge *elem, unsigned char *pub, size_t *size, int compressed); +static int rustsecp256k1_v0_8_0_eckey_pubkey_parse(rustsecp256k1_v0_8_0_ge *elem, const unsigned char *pub, size_t size); +static int rustsecp256k1_v0_8_0_eckey_pubkey_serialize(rustsecp256k1_v0_8_0_ge *elem, unsigned char *pub, size_t *size, int compressed); -static int rustsecp256k1_v0_7_0_eckey_privkey_tweak_add(rustsecp256k1_v0_7_0_scalar *key, const rustsecp256k1_v0_7_0_scalar *tweak); -static int rustsecp256k1_v0_7_0_eckey_pubkey_tweak_add(rustsecp256k1_v0_7_0_ge *key, const rustsecp256k1_v0_7_0_scalar *tweak); -static int rustsecp256k1_v0_7_0_eckey_privkey_tweak_mul(rustsecp256k1_v0_7_0_scalar *key, const rustsecp256k1_v0_7_0_scalar *tweak); -static int rustsecp256k1_v0_7_0_eckey_pubkey_tweak_mul(rustsecp256k1_v0_7_0_ge *key, const rustsecp256k1_v0_7_0_scalar *tweak); +static int rustsecp256k1_v0_8_0_eckey_privkey_tweak_add(rustsecp256k1_v0_8_0_scalar *key, const rustsecp256k1_v0_8_0_scalar *tweak); +static int rustsecp256k1_v0_8_0_eckey_pubkey_tweak_add(rustsecp256k1_v0_8_0_ge *key, const rustsecp256k1_v0_8_0_scalar *tweak); +static int rustsecp256k1_v0_8_0_eckey_privkey_tweak_mul(rustsecp256k1_v0_8_0_scalar *key, const rustsecp256k1_v0_8_0_scalar *tweak); +static int rustsecp256k1_v0_8_0_eckey_pubkey_tweak_mul(rustsecp256k1_v0_8_0_ge *key, const rustsecp256k1_v0_8_0_scalar *tweak); #endif /* SECP256K1_ECKEY_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/eckey_impl.h b/secp256k1-sys/depend/secp256k1/src/eckey_impl.h index d572491d9..c242b8d17 100644 --- a/secp256k1-sys/depend/secp256k1/src/eckey_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/eckey_impl.h @@ -14,82 +14,82 @@ #include "group.h" #include "ecmult_gen.h" -static int rustsecp256k1_v0_7_0_eckey_pubkey_parse(rustsecp256k1_v0_7_0_ge *elem, const unsigned char *pub, size_t size) { +static int rustsecp256k1_v0_8_0_eckey_pubkey_parse(rustsecp256k1_v0_8_0_ge *elem, const unsigned char *pub, size_t size) { if (size == 33 && (pub[0] == SECP256K1_TAG_PUBKEY_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_ODD)) { - rustsecp256k1_v0_7_0_fe x; - return rustsecp256k1_v0_7_0_fe_set_b32(&x, pub+1) && rustsecp256k1_v0_7_0_ge_set_xo_var(elem, &x, pub[0] == SECP256K1_TAG_PUBKEY_ODD); + rustsecp256k1_v0_8_0_fe x; + return rustsecp256k1_v0_8_0_fe_set_b32(&x, pub+1) && rustsecp256k1_v0_8_0_ge_set_xo_var(elem, &x, pub[0] == SECP256K1_TAG_PUBKEY_ODD); } else if (size == 65 && (pub[0] == SECP256K1_TAG_PUBKEY_UNCOMPRESSED || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) { - rustsecp256k1_v0_7_0_fe x, y; - if (!rustsecp256k1_v0_7_0_fe_set_b32(&x, pub+1) || !rustsecp256k1_v0_7_0_fe_set_b32(&y, pub+33)) { + rustsecp256k1_v0_8_0_fe x, y; + if (!rustsecp256k1_v0_8_0_fe_set_b32(&x, pub+1) || !rustsecp256k1_v0_8_0_fe_set_b32(&y, pub+33)) { return 0; } - rustsecp256k1_v0_7_0_ge_set_xy(elem, &x, &y); + rustsecp256k1_v0_8_0_ge_set_xy(elem, &x, &y); if ((pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD) && - rustsecp256k1_v0_7_0_fe_is_odd(&y) != (pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) { + rustsecp256k1_v0_8_0_fe_is_odd(&y) != (pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) { return 0; } - return rustsecp256k1_v0_7_0_ge_is_valid_var(elem); + return rustsecp256k1_v0_8_0_ge_is_valid_var(elem); } else { return 0; } } -static int rustsecp256k1_v0_7_0_eckey_pubkey_serialize(rustsecp256k1_v0_7_0_ge *elem, unsigned char *pub, size_t *size, int compressed) { - if (rustsecp256k1_v0_7_0_ge_is_infinity(elem)) { +static int rustsecp256k1_v0_8_0_eckey_pubkey_serialize(rustsecp256k1_v0_8_0_ge *elem, unsigned char *pub, size_t *size, int compressed) { + if (rustsecp256k1_v0_8_0_ge_is_infinity(elem)) { return 0; } - rustsecp256k1_v0_7_0_fe_normalize_var(&elem->x); - rustsecp256k1_v0_7_0_fe_normalize_var(&elem->y); - rustsecp256k1_v0_7_0_fe_get_b32(&pub[1], &elem->x); + rustsecp256k1_v0_8_0_fe_normalize_var(&elem->x); + rustsecp256k1_v0_8_0_fe_normalize_var(&elem->y); + rustsecp256k1_v0_8_0_fe_get_b32(&pub[1], &elem->x); if (compressed) { *size = 33; - pub[0] = rustsecp256k1_v0_7_0_fe_is_odd(&elem->y) ? SECP256K1_TAG_PUBKEY_ODD : SECP256K1_TAG_PUBKEY_EVEN; + pub[0] = rustsecp256k1_v0_8_0_fe_is_odd(&elem->y) ? SECP256K1_TAG_PUBKEY_ODD : SECP256K1_TAG_PUBKEY_EVEN; } else { *size = 65; pub[0] = SECP256K1_TAG_PUBKEY_UNCOMPRESSED; - rustsecp256k1_v0_7_0_fe_get_b32(&pub[33], &elem->y); + rustsecp256k1_v0_8_0_fe_get_b32(&pub[33], &elem->y); } return 1; } -static int rustsecp256k1_v0_7_0_eckey_privkey_tweak_add(rustsecp256k1_v0_7_0_scalar *key, const rustsecp256k1_v0_7_0_scalar *tweak) { - rustsecp256k1_v0_7_0_scalar_add(key, key, tweak); - return !rustsecp256k1_v0_7_0_scalar_is_zero(key); +static int rustsecp256k1_v0_8_0_eckey_privkey_tweak_add(rustsecp256k1_v0_8_0_scalar *key, const rustsecp256k1_v0_8_0_scalar *tweak) { + rustsecp256k1_v0_8_0_scalar_add(key, key, tweak); + return !rustsecp256k1_v0_8_0_scalar_is_zero(key); } -static int rustsecp256k1_v0_7_0_eckey_pubkey_tweak_add(rustsecp256k1_v0_7_0_ge *key, const rustsecp256k1_v0_7_0_scalar *tweak) { - rustsecp256k1_v0_7_0_gej pt; - rustsecp256k1_v0_7_0_scalar one; - rustsecp256k1_v0_7_0_gej_set_ge(&pt, key); - rustsecp256k1_v0_7_0_scalar_set_int(&one, 1); - rustsecp256k1_v0_7_0_ecmult(&pt, &pt, &one, tweak); +static int rustsecp256k1_v0_8_0_eckey_pubkey_tweak_add(rustsecp256k1_v0_8_0_ge *key, const rustsecp256k1_v0_8_0_scalar *tweak) { + rustsecp256k1_v0_8_0_gej pt; + rustsecp256k1_v0_8_0_scalar one; + rustsecp256k1_v0_8_0_gej_set_ge(&pt, key); + rustsecp256k1_v0_8_0_scalar_set_int(&one, 1); + rustsecp256k1_v0_8_0_ecmult(&pt, &pt, &one, tweak); - if (rustsecp256k1_v0_7_0_gej_is_infinity(&pt)) { + if (rustsecp256k1_v0_8_0_gej_is_infinity(&pt)) { return 0; } - rustsecp256k1_v0_7_0_ge_set_gej(key, &pt); + rustsecp256k1_v0_8_0_ge_set_gej(key, &pt); return 1; } -static int rustsecp256k1_v0_7_0_eckey_privkey_tweak_mul(rustsecp256k1_v0_7_0_scalar *key, const rustsecp256k1_v0_7_0_scalar *tweak) { +static int rustsecp256k1_v0_8_0_eckey_privkey_tweak_mul(rustsecp256k1_v0_8_0_scalar *key, const rustsecp256k1_v0_8_0_scalar *tweak) { int ret; - ret = !rustsecp256k1_v0_7_0_scalar_is_zero(tweak); + ret = !rustsecp256k1_v0_8_0_scalar_is_zero(tweak); - rustsecp256k1_v0_7_0_scalar_mul(key, key, tweak); + rustsecp256k1_v0_8_0_scalar_mul(key, key, tweak); return ret; } -static int rustsecp256k1_v0_7_0_eckey_pubkey_tweak_mul(rustsecp256k1_v0_7_0_ge *key, const rustsecp256k1_v0_7_0_scalar *tweak) { - rustsecp256k1_v0_7_0_scalar zero; - rustsecp256k1_v0_7_0_gej pt; - if (rustsecp256k1_v0_7_0_scalar_is_zero(tweak)) { +static int rustsecp256k1_v0_8_0_eckey_pubkey_tweak_mul(rustsecp256k1_v0_8_0_ge *key, const rustsecp256k1_v0_8_0_scalar *tweak) { + rustsecp256k1_v0_8_0_scalar zero; + rustsecp256k1_v0_8_0_gej pt; + if (rustsecp256k1_v0_8_0_scalar_is_zero(tweak)) { return 0; } - rustsecp256k1_v0_7_0_scalar_set_int(&zero, 0); - rustsecp256k1_v0_7_0_gej_set_ge(&pt, key); - rustsecp256k1_v0_7_0_ecmult(&pt, &pt, tweak, &zero); - rustsecp256k1_v0_7_0_ge_set_gej(key, &pt); + rustsecp256k1_v0_8_0_scalar_set_int(&zero, 0); + rustsecp256k1_v0_8_0_gej_set_ge(&pt, key); + rustsecp256k1_v0_8_0_ecmult(&pt, &pt, tweak, &zero); + rustsecp256k1_v0_8_0_ge_set_gej(key, &pt); return 1; } diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult.h b/secp256k1-sys/depend/secp256k1/src/ecmult.h index 090c322f3..3615b422d 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult.h @@ -11,6 +11,17 @@ #include "scalar.h" #include "scratch.h" +#ifndef ECMULT_WINDOW_SIZE +# define ECMULT_WINDOW_SIZE 15 +# ifdef DEBUG_CONFIG +# pragma message DEBUG_CONFIG_MSG("ECMULT_WINDOW_SIZE undefined, assuming default value") +# endif +#endif + +#ifdef DEBUG_CONFIG +# pragma message DEBUG_CONFIG_DEF(ECMULT_WINDOW_SIZE) +#endif + /* Noone will ever need more than a window size of 24. The code might * be correct for larger values of ECMULT_WINDOW_SIZE but this is not * tested. @@ -30,9 +41,9 @@ #define ECMULT_TABLE_SIZE(w) (1L << ((w)-2)) /** Double multiply: R = na*A + ng*G */ -static void rustsecp256k1_v0_7_0_ecmult(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a, const rustsecp256k1_v0_7_0_scalar *na, const rustsecp256k1_v0_7_0_scalar *ng); +static void rustsecp256k1_v0_8_0_ecmult(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a, const rustsecp256k1_v0_8_0_scalar *na, const rustsecp256k1_v0_8_0_scalar *ng); -typedef int (rustsecp256k1_v0_7_0_ecmult_multi_callback)(rustsecp256k1_v0_7_0_scalar *sc, rustsecp256k1_v0_7_0_ge *pt, size_t idx, void *data); +typedef int (rustsecp256k1_v0_8_0_ecmult_multi_callback)(rustsecp256k1_v0_8_0_scalar *sc, rustsecp256k1_v0_8_0_ge *pt, size_t idx, void *data); /** * Multi-multiply: R = inp_g_sc * G + sum_i ni * Ai. @@ -45,6 +56,6 @@ typedef int (rustsecp256k1_v0_7_0_ecmult_multi_callback)(rustsecp256k1_v0_7_0_sc * 0 if there is not enough scratch space for a single point or * callback returns 0 */ -static int rustsecp256k1_v0_7_0_ecmult_multi_var(const rustsecp256k1_v0_7_0_callback* error_callback, rustsecp256k1_v0_7_0_scratch *scratch, rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_scalar *inp_g_sc, rustsecp256k1_v0_7_0_ecmult_multi_callback cb, void *cbdata, size_t n); +static int rustsecp256k1_v0_8_0_ecmult_multi_var(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch *scratch, rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_scalar *inp_g_sc, rustsecp256k1_v0_8_0_ecmult_multi_callback cb, void *cbdata, size_t n); #endif /* SECP256K1_ECMULT_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_compute_table.h b/secp256k1-sys/depend/secp256k1/src/ecmult_compute_table.h index 25786d479..d446df5ee 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_compute_table.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_compute_table.h @@ -8,9 +8,9 @@ #define SECP256K1_ECMULT_COMPUTE_TABLE_H /* Construct table of all odd multiples of gen in range 1..(2**(window_g-1)-1). */ -static void rustsecp256k1_v0_7_0_ecmult_compute_table(rustsecp256k1_v0_7_0_ge_storage* table, int window_g, const rustsecp256k1_v0_7_0_gej* gen); +static void rustsecp256k1_v0_8_0_ecmult_compute_table(rustsecp256k1_v0_8_0_ge_storage* table, int window_g, const rustsecp256k1_v0_8_0_gej* gen); -/* Like rustsecp256k1_v0_7_0_ecmult_compute_table, but one for both gen and gen*2^128. */ -static void rustsecp256k1_v0_7_0_ecmult_compute_two_tables(rustsecp256k1_v0_7_0_ge_storage* table, rustsecp256k1_v0_7_0_ge_storage* table_128, int window_g, const rustsecp256k1_v0_7_0_ge* gen); +/* Like rustsecp256k1_v0_8_0_ecmult_compute_table, but one for both gen and gen*2^128. */ +static void rustsecp256k1_v0_8_0_ecmult_compute_two_tables(rustsecp256k1_v0_8_0_ge_storage* table, rustsecp256k1_v0_8_0_ge_storage* table_128, int window_g, const rustsecp256k1_v0_8_0_ge* gen); #endif /* SECP256K1_ECMULT_COMPUTE_TABLE_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_compute_table_impl.h b/secp256k1-sys/depend/secp256k1/src/ecmult_compute_table_impl.h index 0a19e8a18..4529aa67b 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_compute_table_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_compute_table_impl.h @@ -13,37 +13,37 @@ #include "ecmult.h" #include "util.h" -static void rustsecp256k1_v0_7_0_ecmult_compute_table(rustsecp256k1_v0_7_0_ge_storage* table, int window_g, const rustsecp256k1_v0_7_0_gej* gen) { - rustsecp256k1_v0_7_0_gej gj; - rustsecp256k1_v0_7_0_ge ge, dgen; +static void rustsecp256k1_v0_8_0_ecmult_compute_table(rustsecp256k1_v0_8_0_ge_storage* table, int window_g, const rustsecp256k1_v0_8_0_gej* gen) { + rustsecp256k1_v0_8_0_gej gj; + rustsecp256k1_v0_8_0_ge ge, dgen; int j; gj = *gen; - rustsecp256k1_v0_7_0_ge_set_gej_var(&ge, &gj); - rustsecp256k1_v0_7_0_ge_to_storage(&table[0], &ge); + rustsecp256k1_v0_8_0_ge_set_gej_var(&ge, &gj); + rustsecp256k1_v0_8_0_ge_to_storage(&table[0], &ge); - rustsecp256k1_v0_7_0_gej_double_var(&gj, gen, NULL); - rustsecp256k1_v0_7_0_ge_set_gej_var(&dgen, &gj); + rustsecp256k1_v0_8_0_gej_double_var(&gj, gen, NULL); + rustsecp256k1_v0_8_0_ge_set_gej_var(&dgen, &gj); for (j = 1; j < ECMULT_TABLE_SIZE(window_g); ++j) { - rustsecp256k1_v0_7_0_gej_set_ge(&gj, &ge); - rustsecp256k1_v0_7_0_gej_add_ge_var(&gj, &gj, &dgen, NULL); - rustsecp256k1_v0_7_0_ge_set_gej_var(&ge, &gj); - rustsecp256k1_v0_7_0_ge_to_storage(&table[j], &ge); + rustsecp256k1_v0_8_0_gej_set_ge(&gj, &ge); + rustsecp256k1_v0_8_0_gej_add_ge_var(&gj, &gj, &dgen, NULL); + rustsecp256k1_v0_8_0_ge_set_gej_var(&ge, &gj); + rustsecp256k1_v0_8_0_ge_to_storage(&table[j], &ge); } } -/* Like rustsecp256k1_v0_7_0_ecmult_compute_table, but one for both gen and gen*2^128. */ -static void rustsecp256k1_v0_7_0_ecmult_compute_two_tables(rustsecp256k1_v0_7_0_ge_storage* table, rustsecp256k1_v0_7_0_ge_storage* table_128, int window_g, const rustsecp256k1_v0_7_0_ge* gen) { - rustsecp256k1_v0_7_0_gej gj; +/* Like rustsecp256k1_v0_8_0_ecmult_compute_table, but one for both gen and gen*2^128. */ +static void rustsecp256k1_v0_8_0_ecmult_compute_two_tables(rustsecp256k1_v0_8_0_ge_storage* table, rustsecp256k1_v0_8_0_ge_storage* table_128, int window_g, const rustsecp256k1_v0_8_0_ge* gen) { + rustsecp256k1_v0_8_0_gej gj; int i; - rustsecp256k1_v0_7_0_gej_set_ge(&gj, gen); - rustsecp256k1_v0_7_0_ecmult_compute_table(table, window_g, &gj); + rustsecp256k1_v0_8_0_gej_set_ge(&gj, gen); + rustsecp256k1_v0_8_0_ecmult_compute_table(table, window_g, &gj); for (i = 0; i < 128; ++i) { - rustsecp256k1_v0_7_0_gej_double_var(&gj, &gj, NULL); + rustsecp256k1_v0_8_0_gej_double_var(&gj, &gj, NULL); } - rustsecp256k1_v0_7_0_ecmult_compute_table(table_128, window_g, &gj); + rustsecp256k1_v0_8_0_ecmult_compute_table(table_128, window_g, &gj); } #endif /* SECP256K1_ECMULT_COMPUTE_TABLE_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_const.h b/secp256k1-sys/depend/secp256k1/src/ecmult_const.h index 013bfa2cd..82421b544 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_const.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_const.h @@ -16,6 +16,6 @@ * one because we internally sometimes add 2 to the number during the WNAF conversion. * A must not be infinity. */ -static void rustsecp256k1_v0_7_0_ecmult_const(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_ge *a, const rustsecp256k1_v0_7_0_scalar *q, int bits); +static void rustsecp256k1_v0_8_0_ecmult_const(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_ge *a, const rustsecp256k1_v0_8_0_scalar *q, int bits); #endif /* SECP256K1_ECMULT_CONST_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_const_impl.h b/secp256k1-sys/depend/secp256k1/src/ecmult_const_impl.h index 38f2a683c..99b385107 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_const_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_const_impl.h @@ -18,14 +18,11 @@ * coordinates as ge_storage points in pre, and stores the global Z in globalz. * It only operates on tables sized for WINDOW_A wnaf multiples. */ -static void rustsecp256k1_v0_7_0_ecmult_odd_multiples_table_globalz_windowa(rustsecp256k1_v0_7_0_ge *pre, rustsecp256k1_v0_7_0_fe *globalz, const rustsecp256k1_v0_7_0_gej *a) { - rustsecp256k1_v0_7_0_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)]; - rustsecp256k1_v0_7_0_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; - - /* Compute the odd multiples in Jacobian form. */ - rustsecp256k1_v0_7_0_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), prej, zr, a); - /* Bring them to the same Z denominator. */ - rustsecp256k1_v0_7_0_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A), pre, globalz, prej, zr); +static void rustsecp256k1_v0_8_0_ecmult_odd_multiples_table_globalz_windowa(rustsecp256k1_v0_8_0_ge *pre, rustsecp256k1_v0_8_0_fe *globalz, const rustsecp256k1_v0_8_0_gej *a) { + rustsecp256k1_v0_8_0_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; + + rustsecp256k1_v0_8_0_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), pre, zr, globalz, a); + rustsecp256k1_v0_8_0_ge_table_set_globalz(ECMULT_TABLE_SIZE(WINDOW_A), pre, zr); } /* This is like `ECMULT_TABLE_GET_GE` but is constant time */ @@ -35,12 +32,12 @@ static void rustsecp256k1_v0_7_0_ecmult_odd_multiples_table_globalz_windowa(rust int mask = (n) >> (sizeof(n) * CHAR_BIT - 1); \ int abs_n = ((n) + mask) ^ mask; \ int idx_n = abs_n >> 1; \ - rustsecp256k1_v0_7_0_fe neg_y; \ + rustsecp256k1_v0_8_0_fe neg_y; \ VERIFY_CHECK(((n) & 1) == 1); \ VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ - VERIFY_SETUP(rustsecp256k1_v0_7_0_fe_clear(&(r)->x)); \ - VERIFY_SETUP(rustsecp256k1_v0_7_0_fe_clear(&(r)->y)); \ + VERIFY_SETUP(rustsecp256k1_v0_8_0_fe_clear(&(r)->x)); \ + VERIFY_SETUP(rustsecp256k1_v0_8_0_fe_clear(&(r)->y)); \ /* Unconditionally set r->x = (pre)[m].x. r->y = (pre)[m].y. because it's either the correct one \ * or will get replaced in the later iterations, this is needed to make sure `r` is initialized. */ \ (r)->x = (pre)[m].x; \ @@ -48,12 +45,12 @@ static void rustsecp256k1_v0_7_0_ecmult_odd_multiples_table_globalz_windowa(rust for (m = 1; m < ECMULT_TABLE_SIZE(w); m++) { \ /* This loop is used to avoid secret data in array indices. See * the comment in ecmult_gen_impl.h for rationale. */ \ - rustsecp256k1_v0_7_0_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \ - rustsecp256k1_v0_7_0_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \ + rustsecp256k1_v0_8_0_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \ + rustsecp256k1_v0_8_0_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \ } \ (r)->infinity = 0; \ - rustsecp256k1_v0_7_0_fe_negate(&neg_y, &(r)->y, 1); \ - rustsecp256k1_v0_7_0_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \ + rustsecp256k1_v0_8_0_fe_negate(&neg_y, &(r)->y, 1); \ + rustsecp256k1_v0_8_0_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \ } while(0) /** Convert a number to WNAF notation. @@ -69,7 +66,7 @@ static void rustsecp256k1_v0_7_0_ecmult_odd_multiples_table_globalz_windowa(rust * * Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335 */ -static int rustsecp256k1_v0_7_0_wnaf_const(int *wnaf, const rustsecp256k1_v0_7_0_scalar *scalar, int w, int size) { +static int rustsecp256k1_v0_8_0_wnaf_const(int *wnaf, const rustsecp256k1_v0_8_0_scalar *scalar, int w, int size) { int global_sign; int skew; int word = 0; @@ -79,7 +76,7 @@ static int rustsecp256k1_v0_7_0_wnaf_const(int *wnaf, const rustsecp256k1_v0_7_0 int u; int flip; - rustsecp256k1_v0_7_0_scalar s = *scalar; + rustsecp256k1_v0_8_0_scalar s = *scalar; VERIFY_CHECK(w > 0); VERIFY_CHECK(size > 0); @@ -96,18 +93,18 @@ static int rustsecp256k1_v0_7_0_wnaf_const(int *wnaf, const rustsecp256k1_v0_7_0 * particular, to ensure that the outputs from the endomorphism-split fit into * 128 bits). If we negate, the parity of our number flips, affecting whether * we want to add to the scalar to ensure that it's odd. */ - flip = rustsecp256k1_v0_7_0_scalar_is_high(&s); - skew = flip ^ rustsecp256k1_v0_7_0_scalar_is_even(&s); - rustsecp256k1_v0_7_0_scalar_cadd_bit(&s, 0, skew); - global_sign = rustsecp256k1_v0_7_0_scalar_cond_negate(&s, flip); + flip = rustsecp256k1_v0_8_0_scalar_is_high(&s); + skew = flip ^ rustsecp256k1_v0_8_0_scalar_is_even(&s); + rustsecp256k1_v0_8_0_scalar_cadd_bit(&s, 0, skew); + global_sign = rustsecp256k1_v0_8_0_scalar_cond_negate(&s, flip); /* 4 */ - u_last = rustsecp256k1_v0_7_0_scalar_shr_int(&s, w); + u_last = rustsecp256k1_v0_8_0_scalar_shr_int(&s, w); do { int even; /* 4.1 4.4 */ - u = rustsecp256k1_v0_7_0_scalar_shr_int(&s, w); + u = rustsecp256k1_v0_8_0_scalar_shr_int(&s, w); /* 4.2 */ even = ((u & 1) == 0); /* In contrast to the original algorithm, u_last is always > 0 and @@ -128,21 +125,21 @@ static int rustsecp256k1_v0_7_0_wnaf_const(int *wnaf, const rustsecp256k1_v0_7_0 } while (word * w < size); wnaf[word] = u * global_sign; - VERIFY_CHECK(rustsecp256k1_v0_7_0_scalar_is_zero(&s)); + VERIFY_CHECK(rustsecp256k1_v0_8_0_scalar_is_zero(&s)); VERIFY_CHECK(word == WNAF_SIZE_BITS(size, w)); return skew; } -static void rustsecp256k1_v0_7_0_ecmult_const(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_ge *a, const rustsecp256k1_v0_7_0_scalar *scalar, int size) { - rustsecp256k1_v0_7_0_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; - rustsecp256k1_v0_7_0_ge tmpa; - rustsecp256k1_v0_7_0_fe Z; +static void rustsecp256k1_v0_8_0_ecmult_const(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_ge *a, const rustsecp256k1_v0_8_0_scalar *scalar, int size) { + rustsecp256k1_v0_8_0_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; + rustsecp256k1_v0_8_0_ge tmpa; + rustsecp256k1_v0_8_0_fe Z; int skew_1; - rustsecp256k1_v0_7_0_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; + rustsecp256k1_v0_8_0_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)]; int skew_lam; - rustsecp256k1_v0_7_0_scalar q_1, q_lam; + rustsecp256k1_v0_8_0_scalar q_1, q_lam; int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)]; int i; @@ -152,12 +149,12 @@ static void rustsecp256k1_v0_7_0_ecmult_const(rustsecp256k1_v0_7_0_gej *r, const if (size > 128) { rsize = 128; /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */ - rustsecp256k1_v0_7_0_scalar_split_lambda(&q_1, &q_lam, scalar); - skew_1 = rustsecp256k1_v0_7_0_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128); - skew_lam = rustsecp256k1_v0_7_0_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128); + rustsecp256k1_v0_8_0_scalar_split_lambda(&q_1, &q_lam, scalar); + skew_1 = rustsecp256k1_v0_8_0_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128); + skew_lam = rustsecp256k1_v0_8_0_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128); } else { - skew_1 = rustsecp256k1_v0_7_0_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size); + skew_1 = rustsecp256k1_v0_8_0_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size); skew_lam = 0; } @@ -168,14 +165,14 @@ static void rustsecp256k1_v0_7_0_ecmult_const(rustsecp256k1_v0_7_0_gej *r, const * the Z coordinate of the result once at the end. */ VERIFY_CHECK(!a->infinity); - rustsecp256k1_v0_7_0_gej_set_ge(r, a); - rustsecp256k1_v0_7_0_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r); + rustsecp256k1_v0_8_0_gej_set_ge(r, a); + rustsecp256k1_v0_8_0_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r); for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { - rustsecp256k1_v0_7_0_fe_normalize_weak(&pre_a[i].y); + rustsecp256k1_v0_8_0_fe_normalize_weak(&pre_a[i].y); } if (size > 128) { for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { - rustsecp256k1_v0_7_0_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]); + rustsecp256k1_v0_8_0_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]); } } @@ -186,49 +183,49 @@ static void rustsecp256k1_v0_7_0_ecmult_const(rustsecp256k1_v0_7_0_gej *r, const i = wnaf_1[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)]; VERIFY_CHECK(i != 0); ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A); - rustsecp256k1_v0_7_0_gej_set_ge(r, &tmpa); + rustsecp256k1_v0_8_0_gej_set_ge(r, &tmpa); if (size > 128) { i = wnaf_lam[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)]; VERIFY_CHECK(i != 0); ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A); - rustsecp256k1_v0_7_0_gej_add_ge(r, r, &tmpa); + rustsecp256k1_v0_8_0_gej_add_ge(r, r, &tmpa); } /* remaining loop iterations */ for (i = WNAF_SIZE_BITS(rsize, WINDOW_A - 1) - 1; i >= 0; i--) { int n; int j; for (j = 0; j < WINDOW_A - 1; ++j) { - rustsecp256k1_v0_7_0_gej_double(r, r); + rustsecp256k1_v0_8_0_gej_double(r, r); } n = wnaf_1[i]; ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A); VERIFY_CHECK(n != 0); - rustsecp256k1_v0_7_0_gej_add_ge(r, r, &tmpa); + rustsecp256k1_v0_8_0_gej_add_ge(r, r, &tmpa); if (size > 128) { n = wnaf_lam[i]; ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A); VERIFY_CHECK(n != 0); - rustsecp256k1_v0_7_0_gej_add_ge(r, r, &tmpa); + rustsecp256k1_v0_8_0_gej_add_ge(r, r, &tmpa); } } { /* Correct for wNAF skew */ - rustsecp256k1_v0_7_0_gej tmpj; + rustsecp256k1_v0_8_0_gej tmpj; - rustsecp256k1_v0_7_0_ge_neg(&tmpa, &pre_a[0]); - rustsecp256k1_v0_7_0_gej_add_ge(&tmpj, r, &tmpa); - rustsecp256k1_v0_7_0_gej_cmov(r, &tmpj, skew_1); + rustsecp256k1_v0_8_0_ge_neg(&tmpa, &pre_a[0]); + rustsecp256k1_v0_8_0_gej_add_ge(&tmpj, r, &tmpa); + rustsecp256k1_v0_8_0_gej_cmov(r, &tmpj, skew_1); if (size > 128) { - rustsecp256k1_v0_7_0_ge_neg(&tmpa, &pre_a_lam[0]); - rustsecp256k1_v0_7_0_gej_add_ge(&tmpj, r, &tmpa); - rustsecp256k1_v0_7_0_gej_cmov(r, &tmpj, skew_lam); + rustsecp256k1_v0_8_0_ge_neg(&tmpa, &pre_a_lam[0]); + rustsecp256k1_v0_8_0_gej_add_ge(&tmpj, r, &tmpa); + rustsecp256k1_v0_8_0_gej_cmov(r, &tmpj, skew_lam); } } - rustsecp256k1_v0_7_0_fe_mul(&r->z, &r->z, &Z); + rustsecp256k1_v0_8_0_fe_mul(&r->z, &r->z, &Z); } #endif /* SECP256K1_ECMULT_CONST_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_gen.h b/secp256k1-sys/depend/secp256k1/src/ecmult_gen.h index d701a5b53..2892268c1 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_gen.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_gen.h @@ -10,9 +10,21 @@ #include "scalar.h" #include "group.h" +#ifndef ECMULT_GEN_PREC_BITS +# define ECMULT_GEN_PREC_BITS 4 +# ifdef DEBUG_CONFIG +# pragma message DEBUG_CONFIG_MSG("ECMULT_GEN_PREC_BITS undefined, assuming default value") +# endif +#endif + +#ifdef DEBUG_CONFIG +# pragma message DEBUG_CONFIG_DEF(ECMULT_GEN_PREC_BITS) +#endif + #if ECMULT_GEN_PREC_BITS != 2 && ECMULT_GEN_PREC_BITS != 4 && ECMULT_GEN_PREC_BITS != 8 # error "Set ECMULT_GEN_PREC_BITS to 2, 4 or 8." #endif + #define ECMULT_GEN_PREC_G(bits) (1 << bits) #define ECMULT_GEN_PREC_N(bits) (256 / bits) @@ -21,16 +33,16 @@ typedef struct { int built; /* Blinding values used when computing (n-b)G + bG. */ - rustsecp256k1_v0_7_0_scalar blind; /* -b */ - rustsecp256k1_v0_7_0_gej initial; /* bG */ -} rustsecp256k1_v0_7_0_ecmult_gen_context; + rustsecp256k1_v0_8_0_scalar blind; /* -b */ + rustsecp256k1_v0_8_0_gej initial; /* bG */ +} rustsecp256k1_v0_8_0_ecmult_gen_context; -static void rustsecp256k1_v0_7_0_ecmult_gen_context_build(rustsecp256k1_v0_7_0_ecmult_gen_context* ctx); -static void rustsecp256k1_v0_7_0_ecmult_gen_context_clear(rustsecp256k1_v0_7_0_ecmult_gen_context* ctx); +static void rustsecp256k1_v0_8_0_ecmult_gen_context_build(rustsecp256k1_v0_8_0_ecmult_gen_context* ctx); +static void rustsecp256k1_v0_8_0_ecmult_gen_context_clear(rustsecp256k1_v0_8_0_ecmult_gen_context* ctx); /** Multiply with the generator: R = a*G */ -static void rustsecp256k1_v0_7_0_ecmult_gen(const rustsecp256k1_v0_7_0_ecmult_gen_context* ctx, rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_scalar *a); +static void rustsecp256k1_v0_8_0_ecmult_gen(const rustsecp256k1_v0_8_0_ecmult_gen_context* ctx, rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_scalar *a); -static void rustsecp256k1_v0_7_0_ecmult_gen_blind(rustsecp256k1_v0_7_0_ecmult_gen_context *ctx, const unsigned char *seed32); +static void rustsecp256k1_v0_8_0_ecmult_gen_blind(rustsecp256k1_v0_8_0_ecmult_gen_context *ctx, const unsigned char *seed32); #endif /* SECP256K1_ECMULT_GEN_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_gen_compute_table.h b/secp256k1-sys/depend/secp256k1/src/ecmult_gen_compute_table.h index 0abdf1958..2b6ab8961 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_gen_compute_table.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_gen_compute_table.h @@ -9,6 +9,6 @@ #include "ecmult_gen.h" -static void rustsecp256k1_v0_7_0_ecmult_gen_compute_table(rustsecp256k1_v0_7_0_ge_storage* table, const rustsecp256k1_v0_7_0_ge* gen, int bits); +static void rustsecp256k1_v0_8_0_ecmult_gen_compute_table(rustsecp256k1_v0_8_0_ge_storage* table, const rustsecp256k1_v0_8_0_ge* gen, int bits); #endif /* SECP256K1_ECMULT_GEN_COMPUTE_TABLE_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_gen_compute_table_impl.h b/secp256k1-sys/depend/secp256k1/src/ecmult_gen_compute_table_impl.h index a68895d29..a547e3828 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_gen_compute_table_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_gen_compute_table_impl.h @@ -13,66 +13,66 @@ #include "ecmult_gen.h" #include "util.h" -static void rustsecp256k1_v0_7_0_ecmult_gen_compute_table(rustsecp256k1_v0_7_0_ge_storage* table, const rustsecp256k1_v0_7_0_ge* gen, int bits) { +static void rustsecp256k1_v0_8_0_ecmult_gen_compute_table(rustsecp256k1_v0_8_0_ge_storage* table, const rustsecp256k1_v0_8_0_ge* gen, int bits) { int g = ECMULT_GEN_PREC_G(bits); int n = ECMULT_GEN_PREC_N(bits); - rustsecp256k1_v0_7_0_ge* prec = checked_malloc(&default_error_callback, n * g * sizeof(*prec)); - rustsecp256k1_v0_7_0_gej gj; - rustsecp256k1_v0_7_0_gej nums_gej; + rustsecp256k1_v0_8_0_ge* prec = checked_malloc(&default_error_callback, n * g * sizeof(*prec)); + rustsecp256k1_v0_8_0_gej gj; + rustsecp256k1_v0_8_0_gej nums_gej; int i, j; /* get the generator */ - rustsecp256k1_v0_7_0_gej_set_ge(&gj, gen); + rustsecp256k1_v0_8_0_gej_set_ge(&gj, gen); /* Construct a group element with no known corresponding scalar (nothing up my sleeve). */ { static const unsigned char nums_b32[33] = "The scalar for this x is unknown"; - rustsecp256k1_v0_7_0_fe nums_x; - rustsecp256k1_v0_7_0_ge nums_ge; + rustsecp256k1_v0_8_0_fe nums_x; + rustsecp256k1_v0_8_0_ge nums_ge; int r; - r = rustsecp256k1_v0_7_0_fe_set_b32(&nums_x, nums_b32); + r = rustsecp256k1_v0_8_0_fe_set_b32(&nums_x, nums_b32); (void)r; VERIFY_CHECK(r); - r = rustsecp256k1_v0_7_0_ge_set_xo_var(&nums_ge, &nums_x, 0); + r = rustsecp256k1_v0_8_0_ge_set_xo_var(&nums_ge, &nums_x, 0); (void)r; VERIFY_CHECK(r); - rustsecp256k1_v0_7_0_gej_set_ge(&nums_gej, &nums_ge); + rustsecp256k1_v0_8_0_gej_set_ge(&nums_gej, &nums_ge); /* Add G to make the bits in x uniformly distributed. */ - rustsecp256k1_v0_7_0_gej_add_ge_var(&nums_gej, &nums_gej, gen, NULL); + rustsecp256k1_v0_8_0_gej_add_ge_var(&nums_gej, &nums_gej, gen, NULL); } /* compute prec. */ { - rustsecp256k1_v0_7_0_gej gbase; - rustsecp256k1_v0_7_0_gej numsbase; - rustsecp256k1_v0_7_0_gej* precj = checked_malloc(&default_error_callback, n * g * sizeof(*precj)); /* Jacobian versions of prec. */ + rustsecp256k1_v0_8_0_gej gbase; + rustsecp256k1_v0_8_0_gej numsbase; + rustsecp256k1_v0_8_0_gej* precj = checked_malloc(&default_error_callback, n * g * sizeof(*precj)); /* Jacobian versions of prec. */ gbase = gj; /* PREC_G^j * G */ numsbase = nums_gej; /* 2^j * nums. */ for (j = 0; j < n; j++) { /* Set precj[j*PREC_G .. j*PREC_G+(PREC_G-1)] to (numsbase, numsbase + gbase, ..., numsbase + (PREC_G-1)*gbase). */ precj[j*g] = numsbase; for (i = 1; i < g; i++) { - rustsecp256k1_v0_7_0_gej_add_var(&precj[j*g + i], &precj[j*g + i - 1], &gbase, NULL); + rustsecp256k1_v0_8_0_gej_add_var(&precj[j*g + i], &precj[j*g + i - 1], &gbase, NULL); } /* Multiply gbase by PREC_G. */ for (i = 0; i < bits; i++) { - rustsecp256k1_v0_7_0_gej_double_var(&gbase, &gbase, NULL); + rustsecp256k1_v0_8_0_gej_double_var(&gbase, &gbase, NULL); } /* Multiply numbase by 2. */ - rustsecp256k1_v0_7_0_gej_double_var(&numsbase, &numsbase, NULL); + rustsecp256k1_v0_8_0_gej_double_var(&numsbase, &numsbase, NULL); if (j == n - 2) { /* In the last iteration, numsbase is (1 - 2^j) * nums instead. */ - rustsecp256k1_v0_7_0_gej_neg(&numsbase, &numsbase); - rustsecp256k1_v0_7_0_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL); + rustsecp256k1_v0_8_0_gej_neg(&numsbase, &numsbase); + rustsecp256k1_v0_8_0_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL); } } - rustsecp256k1_v0_7_0_ge_set_all_gej_var(prec, precj, n * g); + rustsecp256k1_v0_8_0_ge_set_all_gej_var(prec, precj, n * g); free(precj); } for (j = 0; j < n; j++) { for (i = 0; i < g; i++) { - rustsecp256k1_v0_7_0_ge_to_storage(&table[j*g + i], &prec[j*g + i]); + rustsecp256k1_v0_8_0_ge_to_storage(&table[j*g + i], &prec[j*g + i]); } } free(prec); diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_gen_impl.h b/secp256k1-sys/depend/secp256k1/src/ecmult_gen_impl.h index 20d6edb52..c3dd2a1ee 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_gen_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_gen_impl.h @@ -14,19 +14,19 @@ #include "hash_impl.h" #include "precomputed_ecmult_gen.h" -static void rustsecp256k1_v0_7_0_ecmult_gen_context_build(rustsecp256k1_v0_7_0_ecmult_gen_context *ctx) { - rustsecp256k1_v0_7_0_ecmult_gen_blind(ctx, NULL); +static void rustsecp256k1_v0_8_0_ecmult_gen_context_build(rustsecp256k1_v0_8_0_ecmult_gen_context *ctx) { + rustsecp256k1_v0_8_0_ecmult_gen_blind(ctx, NULL); ctx->built = 1; } -static int rustsecp256k1_v0_7_0_ecmult_gen_context_is_built(const rustsecp256k1_v0_7_0_ecmult_gen_context* ctx) { +static int rustsecp256k1_v0_8_0_ecmult_gen_context_is_built(const rustsecp256k1_v0_8_0_ecmult_gen_context* ctx) { return ctx->built; } -static void rustsecp256k1_v0_7_0_ecmult_gen_context_clear(rustsecp256k1_v0_7_0_ecmult_gen_context *ctx) { +static void rustsecp256k1_v0_8_0_ecmult_gen_context_clear(rustsecp256k1_v0_8_0_ecmult_gen_context *ctx) { ctx->built = 0; - rustsecp256k1_v0_7_0_scalar_clear(&ctx->blind); - rustsecp256k1_v0_7_0_gej_clear(&ctx->initial); + rustsecp256k1_v0_8_0_scalar_clear(&ctx->blind); + rustsecp256k1_v0_8_0_gej_clear(&ctx->initial); } /* For accelerating the computation of a*G: @@ -40,25 +40,25 @@ static void rustsecp256k1_v0_7_0_ecmult_gen_context_clear(rustsecp256k1_v0_7_0_e * precomputed (call it prec(i, n_i)). The formula now becomes sum(prec(i, n_i), i=0 ... PREC_N-1). * None of the resulting prec group elements have a known scalar, and neither do any of * the intermediate sums while computing a*G. - * The prec values are stored in rustsecp256k1_v0_7_0_ecmult_gen_prec_table[i][n_i] = n_i * (PREC_G)^i * G + U_i. + * The prec values are stored in rustsecp256k1_v0_8_0_ecmult_gen_prec_table[i][n_i] = n_i * (PREC_G)^i * G + U_i. */ -static void rustsecp256k1_v0_7_0_ecmult_gen(const rustsecp256k1_v0_7_0_ecmult_gen_context *ctx, rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_scalar *gn) { +static void rustsecp256k1_v0_8_0_ecmult_gen(const rustsecp256k1_v0_8_0_ecmult_gen_context *ctx, rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_scalar *gn) { int bits = ECMULT_GEN_PREC_BITS; int g = ECMULT_GEN_PREC_G(bits); int n = ECMULT_GEN_PREC_N(bits); - rustsecp256k1_v0_7_0_ge add; - rustsecp256k1_v0_7_0_ge_storage adds; - rustsecp256k1_v0_7_0_scalar gnb; + rustsecp256k1_v0_8_0_ge add; + rustsecp256k1_v0_8_0_ge_storage adds; + rustsecp256k1_v0_8_0_scalar gnb; int i, j, n_i; memset(&adds, 0, sizeof(adds)); *r = ctx->initial; /* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */ - rustsecp256k1_v0_7_0_scalar_add(&gnb, gn, &ctx->blind); + rustsecp256k1_v0_8_0_scalar_add(&gnb, gn, &ctx->blind); add.infinity = 0; for (i = 0; i < n; i++) { - n_i = rustsecp256k1_v0_7_0_scalar_get_bits(&gnb, i * bits, bits); + n_i = rustsecp256k1_v0_8_0_scalar_get_bits(&gnb, i * bits, bits); for (j = 0; j < g; j++) { /** This uses a conditional move to avoid any secret data in array indexes. * _Any_ use of secret indexes has been demonstrated to result in timing @@ -70,63 +70,64 @@ static void rustsecp256k1_v0_7_0_ecmult_gen(const rustsecp256k1_v0_7_0_ecmult_ge * by Dag Arne Osvik, Adi Shamir, and Eran Tromer * (https://www.tau.ac.il/~tromer/papers/cache.pdf) */ - rustsecp256k1_v0_7_0_ge_storage_cmov(&adds, &rustsecp256k1_v0_7_0_ecmult_gen_prec_table[i][j], j == n_i); + rustsecp256k1_v0_8_0_ge_storage_cmov(&adds, &rustsecp256k1_v0_8_0_ecmult_gen_prec_table[i][j], j == n_i); } - rustsecp256k1_v0_7_0_ge_from_storage(&add, &adds); - rustsecp256k1_v0_7_0_gej_add_ge(r, r, &add); + rustsecp256k1_v0_8_0_ge_from_storage(&add, &adds); + rustsecp256k1_v0_8_0_gej_add_ge(r, r, &add); } n_i = 0; - rustsecp256k1_v0_7_0_ge_clear(&add); - rustsecp256k1_v0_7_0_scalar_clear(&gnb); + rustsecp256k1_v0_8_0_ge_clear(&add); + rustsecp256k1_v0_8_0_scalar_clear(&gnb); } -/* Setup blinding values for rustsecp256k1_v0_7_0_ecmult_gen. */ -static void rustsecp256k1_v0_7_0_ecmult_gen_blind(rustsecp256k1_v0_7_0_ecmult_gen_context *ctx, const unsigned char *seed32) { - rustsecp256k1_v0_7_0_scalar b; - rustsecp256k1_v0_7_0_gej gb; - rustsecp256k1_v0_7_0_fe s; +/* Setup blinding values for rustsecp256k1_v0_8_0_ecmult_gen. */ +static void rustsecp256k1_v0_8_0_ecmult_gen_blind(rustsecp256k1_v0_8_0_ecmult_gen_context *ctx, const unsigned char *seed32) { + rustsecp256k1_v0_8_0_scalar b; + rustsecp256k1_v0_8_0_gej gb; + rustsecp256k1_v0_8_0_fe s; unsigned char nonce32[32]; - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256 rng; + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256 rng; int overflow; - unsigned char keydata[64] = {0}; + unsigned char keydata[64]; if (seed32 == NULL) { /* When seed is NULL, reset the initial point and blinding value. */ - rustsecp256k1_v0_7_0_gej_set_ge(&ctx->initial, &rustsecp256k1_v0_7_0_ge_const_g); - rustsecp256k1_v0_7_0_gej_neg(&ctx->initial, &ctx->initial); - rustsecp256k1_v0_7_0_scalar_set_int(&ctx->blind, 1); + rustsecp256k1_v0_8_0_gej_set_ge(&ctx->initial, &rustsecp256k1_v0_8_0_ge_const_g); + rustsecp256k1_v0_8_0_gej_neg(&ctx->initial, &ctx->initial); + rustsecp256k1_v0_8_0_scalar_set_int(&ctx->blind, 1); + return; } /* The prior blinding value (if not reset) is chained forward by including it in the hash. */ - rustsecp256k1_v0_7_0_scalar_get_b32(nonce32, &ctx->blind); + rustsecp256k1_v0_8_0_scalar_get_b32(keydata, &ctx->blind); /** Using a CSPRNG allows a failure free interface, avoids needing large amounts of random data, * and guards against weak or adversarial seeds. This is a simpler and safer interface than * asking the caller for blinding values directly and expecting them to retry on failure. */ - memcpy(keydata, nonce32, 32); - if (seed32 != NULL) { - memcpy(keydata + 32, seed32, 32); - } - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_initialize(&rng, keydata, seed32 ? 64 : 32); + VERIFY_CHECK(seed32 != NULL); + memcpy(keydata + 32, seed32, 32); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_initialize(&rng, keydata, 64); memset(keydata, 0, sizeof(keydata)); /* Accept unobservably small non-uniformity. */ - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); - overflow = !rustsecp256k1_v0_7_0_fe_set_b32(&s, nonce32); - overflow |= rustsecp256k1_v0_7_0_fe_is_zero(&s); - rustsecp256k1_v0_7_0_fe_cmov(&s, &rustsecp256k1_v0_7_0_fe_one, overflow); - /* Randomize the projection to defend against multiplier sidechannels. */ - rustsecp256k1_v0_7_0_gej_rescale(&ctx->initial, &s); - rustsecp256k1_v0_7_0_fe_clear(&s); - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); - rustsecp256k1_v0_7_0_scalar_set_b32(&b, nonce32, NULL); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); + overflow = !rustsecp256k1_v0_8_0_fe_set_b32(&s, nonce32); + overflow |= rustsecp256k1_v0_8_0_fe_is_zero(&s); + rustsecp256k1_v0_8_0_fe_cmov(&s, &rustsecp256k1_v0_8_0_fe_one, overflow); + /* Randomize the projection to defend against multiplier sidechannels. + Do this before our own call to rustsecp256k1_v0_8_0_ecmult_gen below. */ + rustsecp256k1_v0_8_0_gej_rescale(&ctx->initial, &s); + rustsecp256k1_v0_8_0_fe_clear(&s); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); + rustsecp256k1_v0_8_0_scalar_set_b32(&b, nonce32, NULL); /* A blinding value of 0 works, but would undermine the projection hardening. */ - rustsecp256k1_v0_7_0_scalar_cmov(&b, &rustsecp256k1_v0_7_0_scalar_one, rustsecp256k1_v0_7_0_scalar_is_zero(&b)); - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_8_0_scalar_cmov(&b, &rustsecp256k1_v0_8_0_scalar_one, rustsecp256k1_v0_8_0_scalar_is_zero(&b)); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_finalize(&rng); memset(nonce32, 0, 32); - rustsecp256k1_v0_7_0_ecmult_gen(ctx, &gb, &b); - rustsecp256k1_v0_7_0_scalar_negate(&b, &b); + /* The random projection in ctx->initial ensures that gb will have a random projection. */ + rustsecp256k1_v0_8_0_ecmult_gen(ctx, &gb, &b); + rustsecp256k1_v0_8_0_scalar_negate(&b, &b); ctx->blind = b; ctx->initial = gb; - rustsecp256k1_v0_7_0_scalar_clear(&b); - rustsecp256k1_v0_7_0_gej_clear(&gb); + rustsecp256k1_v0_8_0_scalar_clear(&b); + rustsecp256k1_v0_8_0_gej_clear(&gb); } #endif /* SECP256K1_ECMULT_GEN_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/ecmult_impl.h b/secp256k1-sys/depend/secp256k1/src/ecmult_impl.h index 671bb53b2..5c5d6d26a 100644 --- a/secp256k1-sys/depend/secp256k1/src/ecmult_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/ecmult_impl.h @@ -33,8 +33,8 @@ /** Larger values for ECMULT_WINDOW_SIZE result in possibly better * performance at the cost of an exponentially larger precomputed * table. The exact table size is - * (1 << (WINDOW_G - 2)) * sizeof(rustsecp256k1_v0_7_0_ge_storage) bytes, - * where sizeof(rustsecp256k1_v0_7_0_ge_storage) is typically 64 bytes but can + * (1 << (WINDOW_G - 2)) * sizeof(rustsecp256k1_v0_8_0_ge_storage) bytes, + * where sizeof(rustsecp256k1_v0_8_0_ge_storage) is typically 64 bytes but can * be larger due to platform-specific padding and alignment. * Two tables of this size are used (due to the endomorphism * optimization). @@ -47,7 +47,7 @@ /* The number of objects allocated on the scratch space for ecmult_multi algorithms */ #define PIPPENGER_SCRATCH_OBJECTS 6 -#define STRAUSS_SCRATCH_OBJECTS 7 +#define STRAUSS_SCRATCH_OBJECTS 5 #define PIPPENGER_MAX_BUCKET_WINDOW 12 @@ -56,71 +56,98 @@ #define ECMULT_MAX_POINTS_PER_BATCH 5000000 -/** Fill a table 'prej' with precomputed odd multiples of a. Prej will contain - * the values [1*a,3*a,...,(2*n-1)*a], so it space for n values. zr[0] will - * contain prej[0].z / a.z. The other zr[i] values = prej[i].z / prej[i-1].z. - * Prej's Z values are undefined, except for the last value. +/** Fill a table 'pre_a' with precomputed odd multiples of a. + * pre_a will contain [1*a,3*a,...,(2*n-1)*a], so it needs space for n group elements. + * zr needs space for n field elements. + * + * Although pre_a is an array of _ge rather than _gej, it actually represents elements + * in Jacobian coordinates with their z coordinates omitted. The omitted z-coordinates + * can be recovered using z and zr. Using the notation z(b) to represent the omitted + * z coordinate of b: + * - z(pre_a[n-1]) = 'z' + * - z(pre_a[i-1]) = z(pre_a[i]) / zr[i] for n > i > 0 + * + * Lastly the zr[0] value, which isn't used above, is set so that: + * - a.z = z(pre_a[0]) / zr[0] */ -static void rustsecp256k1_v0_7_0_ecmult_odd_multiples_table(int n, rustsecp256k1_v0_7_0_gej *prej, rustsecp256k1_v0_7_0_fe *zr, const rustsecp256k1_v0_7_0_gej *a) { - rustsecp256k1_v0_7_0_gej d; - rustsecp256k1_v0_7_0_ge a_ge, d_ge; +static void rustsecp256k1_v0_8_0_ecmult_odd_multiples_table(int n, rustsecp256k1_v0_8_0_ge *pre_a, rustsecp256k1_v0_8_0_fe *zr, rustsecp256k1_v0_8_0_fe *z, const rustsecp256k1_v0_8_0_gej *a) { + rustsecp256k1_v0_8_0_gej d, ai; + rustsecp256k1_v0_8_0_ge d_ge; int i; VERIFY_CHECK(!a->infinity); - rustsecp256k1_v0_7_0_gej_double_var(&d, a, NULL); + rustsecp256k1_v0_8_0_gej_double_var(&d, a, NULL); /* - * Perform the additions on an isomorphism where 'd' is affine: drop the z coordinate - * of 'd', and scale the 1P starting value's x/y coordinates without changing its z. + * Perform the additions using an isomorphic curve Y^2 = X^3 + 7*C^6 where C := d.z. + * The isomorphism, phi, maps a secp256k1 point (x, y) to the point (x*C^2, y*C^3) on the other curve. + * In Jacobian coordinates phi maps (x, y, z) to (x*C^2, y*C^3, z) or, equivalently to (x, y, z/C). + * + * phi(x, y, z) = (x*C^2, y*C^3, z) = (x, y, z/C) + * d_ge := phi(d) = (d.x, d.y, 1) + * ai := phi(a) = (a.x*C^2, a.y*C^3, a.z) + * + * The group addition functions work correctly on these isomorphic curves. + * In particular phi(d) is easy to represent in affine coordinates under this isomorphism. + * This lets us use the faster rustsecp256k1_v0_8_0_gej_add_ge_var group addition function that we wouldn't be able to use otherwise. */ - d_ge.x = d.x; - d_ge.y = d.y; - d_ge.infinity = 0; - - rustsecp256k1_v0_7_0_ge_set_gej_zinv(&a_ge, a, &d.z); - prej[0].x = a_ge.x; - prej[0].y = a_ge.y; - prej[0].z = a->z; - prej[0].infinity = 0; + rustsecp256k1_v0_8_0_ge_set_xy(&d_ge, &d.x, &d.y); + rustsecp256k1_v0_8_0_ge_set_gej_zinv(&pre_a[0], a, &d.z); + rustsecp256k1_v0_8_0_gej_set_ge(&ai, &pre_a[0]); + ai.z = a->z; + /* pre_a[0] is the point (a.x*C^2, a.y*C^3, a.z*C) which is equvalent to a. + * Set zr[0] to C, which is the ratio between the omitted z(pre_a[0]) value and a.z. + */ zr[0] = d.z; + for (i = 1; i < n; i++) { - rustsecp256k1_v0_7_0_gej_add_ge_var(&prej[i], &prej[i-1], &d_ge, &zr[i]); + rustsecp256k1_v0_8_0_gej_add_ge_var(&ai, &ai, &d_ge, &zr[i]); + rustsecp256k1_v0_8_0_ge_set_xy(&pre_a[i], &ai.x, &ai.y); } - /* - * Each point in 'prej' has a z coordinate too small by a factor of 'd.z'. Only - * the final point's z coordinate is actually used though, so just update that. + /* Multiply the last z-coordinate by C to undo the isomorphism. + * Since the z-coordinates of the pre_a values are implied by the zr array of z-coordinate ratios, + * undoing the isomorphism here undoes the isomorphism for all pre_a values. */ - rustsecp256k1_v0_7_0_fe_mul(&prej[n-1].z, &prej[n-1].z, &d.z); + rustsecp256k1_v0_8_0_fe_mul(z, &ai.z, &d.z); } -/** The following two macro retrieves a particular odd multiple from a table - * of precomputed multiples. */ -#define ECMULT_TABLE_GET_GE(r,pre,n,w) do { \ +#define SECP256K1_ECMULT_TABLE_VERIFY(n,w) \ VERIFY_CHECK(((n) & 1) == 1); \ VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ - VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ - if ((n) > 0) { \ - *(r) = (pre)[((n)-1)/2]; \ - } else { \ - *(r) = (pre)[(-(n)-1)/2]; \ - rustsecp256k1_v0_7_0_fe_negate(&((r)->y), &((r)->y), 1); \ - } \ -} while(0) - -#define ECMULT_TABLE_GET_GE_STORAGE(r,pre,n,w) do { \ - VERIFY_CHECK(((n) & 1) == 1); \ - VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ - VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ - if ((n) > 0) { \ - rustsecp256k1_v0_7_0_ge_from_storage((r), &(pre)[((n)-1)/2]); \ - } else { \ - rustsecp256k1_v0_7_0_ge_from_storage((r), &(pre)[(-(n)-1)/2]); \ - rustsecp256k1_v0_7_0_fe_negate(&((r)->y), &((r)->y), 1); \ - } \ -} while(0) + VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); + +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_ecmult_table_get_ge(rustsecp256k1_v0_8_0_ge *r, const rustsecp256k1_v0_8_0_ge *pre, int n, int w) { + SECP256K1_ECMULT_TABLE_VERIFY(n,w) + if (n > 0) { + *r = pre[(n-1)/2]; + } else { + *r = pre[(-n-1)/2]; + rustsecp256k1_v0_8_0_fe_negate(&(r->y), &(r->y), 1); + } +} + +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_ecmult_table_get_ge_lambda(rustsecp256k1_v0_8_0_ge *r, const rustsecp256k1_v0_8_0_ge *pre, const rustsecp256k1_v0_8_0_fe *x, int n, int w) { + SECP256K1_ECMULT_TABLE_VERIFY(n,w) + if (n > 0) { + rustsecp256k1_v0_8_0_ge_set_xy(r, &x[(n-1)/2], &pre[(n-1)/2].y); + } else { + rustsecp256k1_v0_8_0_ge_set_xy(r, &x[(-n-1)/2], &pre[(-n-1)/2].y); + rustsecp256k1_v0_8_0_fe_negate(&(r->y), &(r->y), 1); + } +} + +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_ecmult_table_get_ge_storage(rustsecp256k1_v0_8_0_ge *r, const rustsecp256k1_v0_8_0_ge_storage *pre, int n, int w) { + SECP256K1_ECMULT_TABLE_VERIFY(n,w) + if (n > 0) { + rustsecp256k1_v0_8_0_ge_from_storage(r, &pre[(n-1)/2]); + } else { + rustsecp256k1_v0_8_0_ge_from_storage(r, &pre[(-n-1)/2]); + rustsecp256k1_v0_8_0_fe_negate(&(r->y), &(r->y), 1); + } +} /** Convert a number to WNAF notation. The number becomes represented by sum(2^i * wnaf[i], i=0..bits), * with the following guarantees: @@ -129,8 +156,8 @@ static void rustsecp256k1_v0_7_0_ecmult_odd_multiples_table(int n, rustsecp256k1 * - the number of set values in wnaf is returned. This number is at most 256, and at most one more * than the number of bits in the (absolute value) of the input. */ -static int rustsecp256k1_v0_7_0_ecmult_wnaf(int *wnaf, int len, const rustsecp256k1_v0_7_0_scalar *a, int w) { - rustsecp256k1_v0_7_0_scalar s; +static int rustsecp256k1_v0_8_0_ecmult_wnaf(int *wnaf, int len, const rustsecp256k1_v0_8_0_scalar *a, int w) { + rustsecp256k1_v0_8_0_scalar s; int last_set_bit = -1; int bit = 0; int sign = 1; @@ -144,15 +171,15 @@ static int rustsecp256k1_v0_7_0_ecmult_wnaf(int *wnaf, int len, const rustsecp25 memset(wnaf, 0, len * sizeof(wnaf[0])); s = *a; - if (rustsecp256k1_v0_7_0_scalar_get_bits(&s, 255, 1)) { - rustsecp256k1_v0_7_0_scalar_negate(&s, &s); + if (rustsecp256k1_v0_8_0_scalar_get_bits(&s, 255, 1)) { + rustsecp256k1_v0_8_0_scalar_negate(&s, &s); sign = -1; } while (bit < len) { int now; int word; - if (rustsecp256k1_v0_7_0_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) { + if (rustsecp256k1_v0_8_0_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) { bit++; continue; } @@ -162,7 +189,7 @@ static int rustsecp256k1_v0_7_0_ecmult_wnaf(int *wnaf, int len, const rustsecp25 now = len - bit; } - word = rustsecp256k1_v0_7_0_scalar_get_bits_var(&s, bit, now) + carry; + word = rustsecp256k1_v0_8_0_scalar_get_bits_var(&s, bit, now) + carry; carry = (word >> (w-1)) & 1; word -= carry << w; @@ -173,36 +200,39 @@ static int rustsecp256k1_v0_7_0_ecmult_wnaf(int *wnaf, int len, const rustsecp25 bit += now; } #ifdef VERIFY - CHECK(carry == 0); - while (bit < 256) { - CHECK(rustsecp256k1_v0_7_0_scalar_get_bits(&s, bit++, 1) == 0); + { + int verify_bit = bit; + + VERIFY_CHECK(carry == 0); + + while (verify_bit < 256) { + VERIFY_CHECK(rustsecp256k1_v0_8_0_scalar_get_bits(&s, verify_bit, 1) == 0); + verify_bit++; + } } #endif return last_set_bit + 1; } -struct rustsecp256k1_v0_7_0_strauss_point_state { - rustsecp256k1_v0_7_0_scalar na_1, na_lam; +struct rustsecp256k1_v0_8_0_strauss_point_state { int wnaf_na_1[129]; int wnaf_na_lam[129]; int bits_na_1; int bits_na_lam; - size_t input_pos; }; -struct rustsecp256k1_v0_7_0_strauss_state { - rustsecp256k1_v0_7_0_gej* prej; - rustsecp256k1_v0_7_0_fe* zr; - rustsecp256k1_v0_7_0_ge* pre_a; - rustsecp256k1_v0_7_0_ge* pre_a_lam; - struct rustsecp256k1_v0_7_0_strauss_point_state* ps; +struct rustsecp256k1_v0_8_0_strauss_state { + /* aux is used to hold z-ratios, and then used to hold pre_a[i].x * BETA values. */ + rustsecp256k1_v0_8_0_fe* aux; + rustsecp256k1_v0_8_0_ge* pre_a; + struct rustsecp256k1_v0_8_0_strauss_point_state* ps; }; -static void rustsecp256k1_v0_7_0_ecmult_strauss_wnaf(const struct rustsecp256k1_v0_7_0_strauss_state *state, rustsecp256k1_v0_7_0_gej *r, size_t num, const rustsecp256k1_v0_7_0_gej *a, const rustsecp256k1_v0_7_0_scalar *na, const rustsecp256k1_v0_7_0_scalar *ng) { - rustsecp256k1_v0_7_0_ge tmpa; - rustsecp256k1_v0_7_0_fe Z; +static void rustsecp256k1_v0_8_0_ecmult_strauss_wnaf(const struct rustsecp256k1_v0_8_0_strauss_state *state, rustsecp256k1_v0_8_0_gej *r, size_t num, const rustsecp256k1_v0_8_0_gej *a, const rustsecp256k1_v0_8_0_scalar *na, const rustsecp256k1_v0_8_0_scalar *ng) { + rustsecp256k1_v0_8_0_ge tmpa; + rustsecp256k1_v0_8_0_fe Z; /* Split G factors. */ - rustsecp256k1_v0_7_0_scalar ng_1, ng_128; + rustsecp256k1_v0_8_0_scalar ng_1, ng_128; int wnaf_ng_1[129]; int bits_ng_1 = 0; int wnaf_ng_128[129]; @@ -212,17 +242,19 @@ static void rustsecp256k1_v0_7_0_ecmult_strauss_wnaf(const struct rustsecp256k1_ size_t np; size_t no = 0; + rustsecp256k1_v0_8_0_fe_set_int(&Z, 1); for (np = 0; np < num; ++np) { - if (rustsecp256k1_v0_7_0_scalar_is_zero(&na[np]) || rustsecp256k1_v0_7_0_gej_is_infinity(&a[np])) { + rustsecp256k1_v0_8_0_gej tmp; + rustsecp256k1_v0_8_0_scalar na_1, na_lam; + if (rustsecp256k1_v0_8_0_scalar_is_zero(&na[np]) || rustsecp256k1_v0_8_0_gej_is_infinity(&a[np])) { continue; } - state->ps[no].input_pos = np; /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */ - rustsecp256k1_v0_7_0_scalar_split_lambda(&state->ps[no].na_1, &state->ps[no].na_lam, &na[np]); + rustsecp256k1_v0_8_0_scalar_split_lambda(&na_1, &na_lam, &na[np]); /* build wnaf representation for na_1 and na_lam. */ - state->ps[no].bits_na_1 = rustsecp256k1_v0_7_0_ecmult_wnaf(state->ps[no].wnaf_na_1, 129, &state->ps[no].na_1, WINDOW_A); - state->ps[no].bits_na_lam = rustsecp256k1_v0_7_0_ecmult_wnaf(state->ps[no].wnaf_na_lam, 129, &state->ps[no].na_lam, WINDOW_A); + state->ps[no].bits_na_1 = rustsecp256k1_v0_8_0_ecmult_wnaf(state->ps[no].wnaf_na_1, 129, &na_1, WINDOW_A); + state->ps[no].bits_na_lam = rustsecp256k1_v0_8_0_ecmult_wnaf(state->ps[no].wnaf_na_lam, 129, &na_lam, WINDOW_A); VERIFY_CHECK(state->ps[no].bits_na_1 <= 129); VERIFY_CHECK(state->ps[no].bits_na_lam <= 129); if (state->ps[no].bits_na_1 > bits) { @@ -231,50 +263,46 @@ static void rustsecp256k1_v0_7_0_ecmult_strauss_wnaf(const struct rustsecp256k1_ if (state->ps[no].bits_na_lam > bits) { bits = state->ps[no].bits_na_lam; } - ++no; - } - /* Calculate odd multiples of a. - * All multiples are brought to the same Z 'denominator', which is stored - * in Z. Due to secp256k1' isomorphism we can do all operations pretending - * that the Z coordinate was 1, use affine addition formulae, and correct - * the Z coordinate of the result once at the end. - * The exception is the precomputed G table points, which are actually - * affine. Compared to the base used for other points, they have a Z ratio - * of 1/Z, so we can use rustsecp256k1_v0_7_0_gej_add_zinv_var, which uses the same - * isomorphism to efficiently add with a known Z inverse. - */ - if (no > 0) { - /* Compute the odd multiples in Jacobian form. */ - rustsecp256k1_v0_7_0_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej, state->zr, &a[state->ps[0].input_pos]); - for (np = 1; np < no; ++np) { - rustsecp256k1_v0_7_0_gej tmp = a[state->ps[np].input_pos]; + /* Calculate odd multiples of a. + * All multiples are brought to the same Z 'denominator', which is stored + * in Z. Due to secp256k1' isomorphism we can do all operations pretending + * that the Z coordinate was 1, use affine addition formulae, and correct + * the Z coordinate of the result once at the end. + * The exception is the precomputed G table points, which are actually + * affine. Compared to the base used for other points, they have a Z ratio + * of 1/Z, so we can use rustsecp256k1_v0_8_0_gej_add_zinv_var, which uses the same + * isomorphism to efficiently add with a known Z inverse. + */ + tmp = a[np]; + if (no) { #ifdef VERIFY - rustsecp256k1_v0_7_0_fe_normalize_var(&(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z)); + rustsecp256k1_v0_8_0_fe_normalize_var(&Z); #endif - rustsecp256k1_v0_7_0_gej_rescale(&tmp, &(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z)); - rustsecp256k1_v0_7_0_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &tmp); - rustsecp256k1_v0_7_0_fe_mul(state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &(a[state->ps[np].input_pos].z)); + rustsecp256k1_v0_8_0_gej_rescale(&tmp, &Z); } - /* Bring them to the same Z denominator. */ - rustsecp256k1_v0_7_0_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, &Z, state->prej, state->zr); - } else { - rustsecp256k1_v0_7_0_fe_set_int(&Z, 1); + rustsecp256k1_v0_8_0_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->pre_a + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &Z, &tmp); + if (no) rustsecp256k1_v0_8_0_fe_mul(state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &(a[np].z)); + + ++no; } + /* Bring them to the same Z denominator. */ + rustsecp256k1_v0_8_0_ge_table_set_globalz(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, state->aux); + for (np = 0; np < no; ++np) { for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { - rustsecp256k1_v0_7_0_ge_mul_lambda(&state->pre_a_lam[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i]); + rustsecp256k1_v0_8_0_fe_mul(&state->aux[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i].x, &rustsecp256k1_v0_8_0_const_beta); } } if (ng) { /* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */ - rustsecp256k1_v0_7_0_scalar_split_128(&ng_1, &ng_128, ng); + rustsecp256k1_v0_8_0_scalar_split_128(&ng_1, &ng_128, ng); /* Build wnaf representation for ng_1 and ng_128 */ - bits_ng_1 = rustsecp256k1_v0_7_0_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G); - bits_ng_128 = rustsecp256k1_v0_7_0_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G); + bits_ng_1 = rustsecp256k1_v0_8_0_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G); + bits_ng_128 = rustsecp256k1_v0_8_0_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G); if (bits_ng_1 > bits) { bits = bits_ng_1; } @@ -283,65 +311,61 @@ static void rustsecp256k1_v0_7_0_ecmult_strauss_wnaf(const struct rustsecp256k1_ } } - rustsecp256k1_v0_7_0_gej_set_infinity(r); + rustsecp256k1_v0_8_0_gej_set_infinity(r); for (i = bits - 1; i >= 0; i--) { int n; - rustsecp256k1_v0_7_0_gej_double_var(r, r, NULL); + rustsecp256k1_v0_8_0_gej_double_var(r, r, NULL); for (np = 0; np < no; ++np) { if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) { - ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); - rustsecp256k1_v0_7_0_gej_add_ge_var(r, r, &tmpa, NULL); + rustsecp256k1_v0_8_0_ecmult_table_get_ge(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); + rustsecp256k1_v0_8_0_gej_add_ge_var(r, r, &tmpa, NULL); } if (i < state->ps[np].bits_na_lam && (n = state->ps[np].wnaf_na_lam[i])) { - ECMULT_TABLE_GET_GE(&tmpa, state->pre_a_lam + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); - rustsecp256k1_v0_7_0_gej_add_ge_var(r, r, &tmpa, NULL); + rustsecp256k1_v0_8_0_ecmult_table_get_ge_lambda(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); + rustsecp256k1_v0_8_0_gej_add_ge_var(r, r, &tmpa, NULL); } } if (i < bits_ng_1 && (n = wnaf_ng_1[i])) { - ECMULT_TABLE_GET_GE_STORAGE(&tmpa, rustsecp256k1_v0_7_0_pre_g, n, WINDOW_G); - rustsecp256k1_v0_7_0_gej_add_zinv_var(r, r, &tmpa, &Z); + rustsecp256k1_v0_8_0_ecmult_table_get_ge_storage(&tmpa, rustsecp256k1_v0_8_0_pre_g, n, WINDOW_G); + rustsecp256k1_v0_8_0_gej_add_zinv_var(r, r, &tmpa, &Z); } if (i < bits_ng_128 && (n = wnaf_ng_128[i])) { - ECMULT_TABLE_GET_GE_STORAGE(&tmpa, rustsecp256k1_v0_7_0_pre_g_128, n, WINDOW_G); - rustsecp256k1_v0_7_0_gej_add_zinv_var(r, r, &tmpa, &Z); + rustsecp256k1_v0_8_0_ecmult_table_get_ge_storage(&tmpa, rustsecp256k1_v0_8_0_pre_g_128, n, WINDOW_G); + rustsecp256k1_v0_8_0_gej_add_zinv_var(r, r, &tmpa, &Z); } } if (!r->infinity) { - rustsecp256k1_v0_7_0_fe_mul(&r->z, &r->z, &Z); + rustsecp256k1_v0_8_0_fe_mul(&r->z, &r->z, &Z); } } -static void rustsecp256k1_v0_7_0_ecmult(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a, const rustsecp256k1_v0_7_0_scalar *na, const rustsecp256k1_v0_7_0_scalar *ng) { - rustsecp256k1_v0_7_0_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)]; - rustsecp256k1_v0_7_0_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; - rustsecp256k1_v0_7_0_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; - struct rustsecp256k1_v0_7_0_strauss_point_state ps[1]; - rustsecp256k1_v0_7_0_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; - struct rustsecp256k1_v0_7_0_strauss_state state; +static void rustsecp256k1_v0_8_0_ecmult(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a, const rustsecp256k1_v0_8_0_scalar *na, const rustsecp256k1_v0_8_0_scalar *ng) { + rustsecp256k1_v0_8_0_fe aux[ECMULT_TABLE_SIZE(WINDOW_A)]; + rustsecp256k1_v0_8_0_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; + struct rustsecp256k1_v0_8_0_strauss_point_state ps[1]; + struct rustsecp256k1_v0_8_0_strauss_state state; - state.prej = prej; - state.zr = zr; + state.aux = aux; state.pre_a = pre_a; - state.pre_a_lam = pre_a_lam; state.ps = ps; - rustsecp256k1_v0_7_0_ecmult_strauss_wnaf(&state, r, 1, a, na, ng); + rustsecp256k1_v0_8_0_ecmult_strauss_wnaf(&state, r, 1, a, na, ng); } -static size_t rustsecp256k1_v0_7_0_strauss_scratch_size(size_t n_points) { - static const size_t point_size = (2 * sizeof(rustsecp256k1_v0_7_0_ge) + sizeof(rustsecp256k1_v0_7_0_gej) + sizeof(rustsecp256k1_v0_7_0_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct rustsecp256k1_v0_7_0_strauss_point_state) + sizeof(rustsecp256k1_v0_7_0_gej) + sizeof(rustsecp256k1_v0_7_0_scalar); +static size_t rustsecp256k1_v0_8_0_strauss_scratch_size(size_t n_points) { + static const size_t point_size = (sizeof(rustsecp256k1_v0_8_0_ge) + sizeof(rustsecp256k1_v0_8_0_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct rustsecp256k1_v0_8_0_strauss_point_state) + sizeof(rustsecp256k1_v0_8_0_gej) + sizeof(rustsecp256k1_v0_8_0_scalar); return n_points*point_size; } -static int rustsecp256k1_v0_7_0_ecmult_strauss_batch(const rustsecp256k1_v0_7_0_callback* error_callback, rustsecp256k1_v0_7_0_scratch *scratch, rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_scalar *inp_g_sc, rustsecp256k1_v0_7_0_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { - rustsecp256k1_v0_7_0_gej* points; - rustsecp256k1_v0_7_0_scalar* scalars; - struct rustsecp256k1_v0_7_0_strauss_state state; +static int rustsecp256k1_v0_8_0_ecmult_strauss_batch(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch *scratch, rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_scalar *inp_g_sc, rustsecp256k1_v0_8_0_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { + rustsecp256k1_v0_8_0_gej* points; + rustsecp256k1_v0_8_0_scalar* scalars; + struct rustsecp256k1_v0_8_0_strauss_state state; size_t i; - const size_t scratch_checkpoint = rustsecp256k1_v0_7_0_scratch_checkpoint(error_callback, scratch); + const size_t scratch_checkpoint = rustsecp256k1_v0_8_0_scratch_checkpoint(error_callback, scratch); - rustsecp256k1_v0_7_0_gej_set_infinity(r); + rustsecp256k1_v0_8_0_gej_set_infinity(r); if (inp_g_sc == NULL && n_points == 0) { return 1; } @@ -349,39 +373,37 @@ static int rustsecp256k1_v0_7_0_ecmult_strauss_batch(const rustsecp256k1_v0_7_0_ /* We allocate STRAUSS_SCRATCH_OBJECTS objects on the scratch space. If these * allocations change, make sure to update the STRAUSS_SCRATCH_OBJECTS * constant and strauss_scratch_size accordingly. */ - points = (rustsecp256k1_v0_7_0_gej*)rustsecp256k1_v0_7_0_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_7_0_gej)); - scalars = (rustsecp256k1_v0_7_0_scalar*)rustsecp256k1_v0_7_0_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_7_0_scalar)); - state.prej = (rustsecp256k1_v0_7_0_gej*)rustsecp256k1_v0_7_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_7_0_gej)); - state.zr = (rustsecp256k1_v0_7_0_fe*)rustsecp256k1_v0_7_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_7_0_fe)); - state.pre_a = (rustsecp256k1_v0_7_0_ge*)rustsecp256k1_v0_7_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_7_0_ge)); - state.pre_a_lam = (rustsecp256k1_v0_7_0_ge*)rustsecp256k1_v0_7_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_7_0_ge)); - state.ps = (struct rustsecp256k1_v0_7_0_strauss_point_state*)rustsecp256k1_v0_7_0_scratch_alloc(error_callback, scratch, n_points * sizeof(struct rustsecp256k1_v0_7_0_strauss_point_state)); - - if (points == NULL || scalars == NULL || state.prej == NULL || state.zr == NULL || state.pre_a == NULL || state.pre_a_lam == NULL || state.ps == NULL) { - rustsecp256k1_v0_7_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + points = (rustsecp256k1_v0_8_0_gej*)rustsecp256k1_v0_8_0_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_8_0_gej)); + scalars = (rustsecp256k1_v0_8_0_scalar*)rustsecp256k1_v0_8_0_scratch_alloc(error_callback, scratch, n_points * sizeof(rustsecp256k1_v0_8_0_scalar)); + state.aux = (rustsecp256k1_v0_8_0_fe*)rustsecp256k1_v0_8_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_8_0_fe)); + state.pre_a = (rustsecp256k1_v0_8_0_ge*)rustsecp256k1_v0_8_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(rustsecp256k1_v0_8_0_ge)); + state.ps = (struct rustsecp256k1_v0_8_0_strauss_point_state*)rustsecp256k1_v0_8_0_scratch_alloc(error_callback, scratch, n_points * sizeof(struct rustsecp256k1_v0_8_0_strauss_point_state)); + + if (points == NULL || scalars == NULL || state.aux == NULL || state.pre_a == NULL || state.ps == NULL) { + rustsecp256k1_v0_8_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } for (i = 0; i < n_points; i++) { - rustsecp256k1_v0_7_0_ge point; + rustsecp256k1_v0_8_0_ge point; if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) { - rustsecp256k1_v0_7_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_8_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } - rustsecp256k1_v0_7_0_gej_set_ge(&points[i], &point); + rustsecp256k1_v0_8_0_gej_set_ge(&points[i], &point); } - rustsecp256k1_v0_7_0_ecmult_strauss_wnaf(&state, r, n_points, points, scalars, inp_g_sc); - rustsecp256k1_v0_7_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_8_0_ecmult_strauss_wnaf(&state, r, n_points, points, scalars, inp_g_sc); + rustsecp256k1_v0_8_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 1; } -/* Wrapper for rustsecp256k1_v0_7_0_ecmult_multi_func interface */ -static int rustsecp256k1_v0_7_0_ecmult_strauss_batch_single(const rustsecp256k1_v0_7_0_callback* error_callback, rustsecp256k1_v0_7_0_scratch *scratch, rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_scalar *inp_g_sc, rustsecp256k1_v0_7_0_ecmult_multi_callback cb, void *cbdata, size_t n) { - return rustsecp256k1_v0_7_0_ecmult_strauss_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0); +/* Wrapper for rustsecp256k1_v0_8_0_ecmult_multi_func interface */ +static int rustsecp256k1_v0_8_0_ecmult_strauss_batch_single(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch *scratch, rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_scalar *inp_g_sc, rustsecp256k1_v0_8_0_ecmult_multi_callback cb, void *cbdata, size_t n) { + return rustsecp256k1_v0_8_0_ecmult_strauss_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0); } -static size_t rustsecp256k1_v0_7_0_strauss_max_points(const rustsecp256k1_v0_7_0_callback* error_callback, rustsecp256k1_v0_7_0_scratch *scratch) { - return rustsecp256k1_v0_7_0_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / rustsecp256k1_v0_7_0_strauss_scratch_size(1); +static size_t rustsecp256k1_v0_8_0_strauss_max_points(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch *scratch) { + return rustsecp256k1_v0_8_0_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / rustsecp256k1_v0_8_0_strauss_scratch_size(1); } /** Convert a number to WNAF notation. @@ -391,25 +413,25 @@ static size_t rustsecp256k1_v0_7_0_strauss_max_points(const rustsecp256k1_v0_7_0 * - the number of words set is always WNAF_SIZE(w) * - the returned skew is 0 or 1 */ -static int rustsecp256k1_v0_7_0_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_7_0_scalar *s, int w) { +static int rustsecp256k1_v0_8_0_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_8_0_scalar *s, int w) { int skew = 0; int pos; int max_pos; int last_w; - const rustsecp256k1_v0_7_0_scalar *work = s; + const rustsecp256k1_v0_8_0_scalar *work = s; - if (rustsecp256k1_v0_7_0_scalar_is_zero(s)) { + if (rustsecp256k1_v0_8_0_scalar_is_zero(s)) { for (pos = 0; pos < WNAF_SIZE(w); pos++) { wnaf[pos] = 0; } return 0; } - if (rustsecp256k1_v0_7_0_scalar_is_even(s)) { + if (rustsecp256k1_v0_8_0_scalar_is_even(s)) { skew = 1; } - wnaf[0] = rustsecp256k1_v0_7_0_scalar_get_bits_var(work, 0, w) + skew; + wnaf[0] = rustsecp256k1_v0_8_0_scalar_get_bits_var(work, 0, w) + skew; /* Compute last window size. Relevant when window size doesn't divide the * number of bits in the scalar */ last_w = WNAF_BITS - (WNAF_SIZE(w) - 1) * w; @@ -417,7 +439,7 @@ static int rustsecp256k1_v0_7_0_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_7_0 /* Store the position of the first nonzero word in max_pos to allow * skipping leading zeros when calculating the wnaf. */ for (pos = WNAF_SIZE(w) - 1; pos > 0; pos--) { - int val = rustsecp256k1_v0_7_0_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); + int val = rustsecp256k1_v0_8_0_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); if(val != 0) { break; } @@ -427,7 +449,7 @@ static int rustsecp256k1_v0_7_0_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_7_0 pos = 1; while (pos <= max_pos) { - int val = rustsecp256k1_v0_7_0_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); + int val = rustsecp256k1_v0_8_0_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); if ((val & 1) == 0) { wnaf[pos - 1] -= (1 << w); wnaf[pos] = (val + 1); @@ -453,14 +475,14 @@ static int rustsecp256k1_v0_7_0_wnaf_fixed(int *wnaf, const rustsecp256k1_v0_7_0 return skew; } -struct rustsecp256k1_v0_7_0_pippenger_point_state { +struct rustsecp256k1_v0_8_0_pippenger_point_state { int skew_na; size_t input_pos; }; -struct rustsecp256k1_v0_7_0_pippenger_state { +struct rustsecp256k1_v0_8_0_pippenger_state { int *wnaf_na; - struct rustsecp256k1_v0_7_0_pippenger_point_state* ps; + struct rustsecp256k1_v0_8_0_pippenger_point_state* ps; }; /* @@ -470,7 +492,7 @@ struct rustsecp256k1_v0_7_0_pippenger_state { * to the point's wnaf[i]. Second, the buckets are added together such that * r += 1*bucket[0] + 3*bucket[1] + 5*bucket[2] + ... */ -static int rustsecp256k1_v0_7_0_ecmult_pippenger_wnaf(rustsecp256k1_v0_7_0_gej *buckets, int bucket_window, struct rustsecp256k1_v0_7_0_pippenger_state *state, rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_scalar *sc, const rustsecp256k1_v0_7_0_ge *pt, size_t num) { +static int rustsecp256k1_v0_8_0_ecmult_pippenger_wnaf(rustsecp256k1_v0_8_0_gej *buckets, int bucket_window, struct rustsecp256k1_v0_8_0_pippenger_state *state, rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_scalar *sc, const rustsecp256k1_v0_8_0_ge *pt, size_t num) { size_t n_wnaf = WNAF_SIZE(bucket_window+1); size_t np; size_t no = 0; @@ -478,55 +500,55 @@ static int rustsecp256k1_v0_7_0_ecmult_pippenger_wnaf(rustsecp256k1_v0_7_0_gej * int j; for (np = 0; np < num; ++np) { - if (rustsecp256k1_v0_7_0_scalar_is_zero(&sc[np]) || rustsecp256k1_v0_7_0_ge_is_infinity(&pt[np])) { + if (rustsecp256k1_v0_8_0_scalar_is_zero(&sc[np]) || rustsecp256k1_v0_8_0_ge_is_infinity(&pt[np])) { continue; } state->ps[no].input_pos = np; - state->ps[no].skew_na = rustsecp256k1_v0_7_0_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1); + state->ps[no].skew_na = rustsecp256k1_v0_8_0_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1); no++; } - rustsecp256k1_v0_7_0_gej_set_infinity(r); + rustsecp256k1_v0_8_0_gej_set_infinity(r); if (no == 0) { return 1; } for (i = n_wnaf - 1; i >= 0; i--) { - rustsecp256k1_v0_7_0_gej running_sum; + rustsecp256k1_v0_8_0_gej running_sum; for(j = 0; j < ECMULT_TABLE_SIZE(bucket_window+2); j++) { - rustsecp256k1_v0_7_0_gej_set_infinity(&buckets[j]); + rustsecp256k1_v0_8_0_gej_set_infinity(&buckets[j]); } for (np = 0; np < no; ++np) { int n = state->wnaf_na[np*n_wnaf + i]; - struct rustsecp256k1_v0_7_0_pippenger_point_state point_state = state->ps[np]; - rustsecp256k1_v0_7_0_ge tmp; + struct rustsecp256k1_v0_8_0_pippenger_point_state point_state = state->ps[np]; + rustsecp256k1_v0_8_0_ge tmp; int idx; if (i == 0) { /* correct for wnaf skew */ int skew = point_state.skew_na; if (skew) { - rustsecp256k1_v0_7_0_ge_neg(&tmp, &pt[point_state.input_pos]); - rustsecp256k1_v0_7_0_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL); + rustsecp256k1_v0_8_0_ge_neg(&tmp, &pt[point_state.input_pos]); + rustsecp256k1_v0_8_0_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL); } } if (n > 0) { idx = (n - 1)/2; - rustsecp256k1_v0_7_0_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.input_pos], NULL); + rustsecp256k1_v0_8_0_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.input_pos], NULL); } else if (n < 0) { idx = -(n + 1)/2; - rustsecp256k1_v0_7_0_ge_neg(&tmp, &pt[point_state.input_pos]); - rustsecp256k1_v0_7_0_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL); + rustsecp256k1_v0_8_0_ge_neg(&tmp, &pt[point_state.input_pos]); + rustsecp256k1_v0_8_0_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL); } } for(j = 0; j < bucket_window; j++) { - rustsecp256k1_v0_7_0_gej_double_var(r, r, NULL); + rustsecp256k1_v0_8_0_gej_double_var(r, r, NULL); } - rustsecp256k1_v0_7_0_gej_set_infinity(&running_sum); + rustsecp256k1_v0_8_0_gej_set_infinity(&running_sum); /* Accumulate the sum: bucket[0] + 3*bucket[1] + 5*bucket[2] + 7*bucket[3] + ... * = bucket[0] + bucket[1] + bucket[2] + bucket[3] + ... * + 2 * (bucket[1] + 2*bucket[2] + 3*bucket[3] + ...) @@ -536,13 +558,13 @@ static int rustsecp256k1_v0_7_0_ecmult_pippenger_wnaf(rustsecp256k1_v0_7_0_gej * * The doubling is done implicitly by deferring the final window doubling (of 'r'). */ for(j = ECMULT_TABLE_SIZE(bucket_window+2) - 1; j > 0; j--) { - rustsecp256k1_v0_7_0_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL); - rustsecp256k1_v0_7_0_gej_add_var(r, r, &running_sum, NULL); + rustsecp256k1_v0_8_0_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL); + rustsecp256k1_v0_8_0_gej_add_var(r, r, &running_sum, NULL); } - rustsecp256k1_v0_7_0_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL); - rustsecp256k1_v0_7_0_gej_double_var(r, r, NULL); - rustsecp256k1_v0_7_0_gej_add_var(r, r, &running_sum, NULL); + rustsecp256k1_v0_8_0_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL); + rustsecp256k1_v0_8_0_gej_double_var(r, r, NULL); + rustsecp256k1_v0_8_0_gej_add_var(r, r, &running_sum, NULL); } return 1; } @@ -551,7 +573,7 @@ static int rustsecp256k1_v0_7_0_ecmult_pippenger_wnaf(rustsecp256k1_v0_7_0_gej * * Returns optimal bucket_window (number of bits of a scalar represented by a * set of buckets) for a given number of points. */ -static int rustsecp256k1_v0_7_0_pippenger_bucket_window(size_t n) { +static int rustsecp256k1_v0_8_0_pippenger_bucket_window(size_t n) { if (n <= 1) { return 1; } else if (n <= 4) { @@ -580,7 +602,7 @@ static int rustsecp256k1_v0_7_0_pippenger_bucket_window(size_t n) { /** * Returns the maximum optimal number of points for a bucket_window. */ -static size_t rustsecp256k1_v0_7_0_pippenger_bucket_window_inv(int bucket_window) { +static size_t rustsecp256k1_v0_8_0_pippenger_bucket_window_inv(int bucket_window) { switch(bucket_window) { case 1: return 1; case 2: return 4; @@ -599,18 +621,18 @@ static size_t rustsecp256k1_v0_7_0_pippenger_bucket_window_inv(int bucket_window } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_ecmult_endo_split(rustsecp256k1_v0_7_0_scalar *s1, rustsecp256k1_v0_7_0_scalar *s2, rustsecp256k1_v0_7_0_ge *p1, rustsecp256k1_v0_7_0_ge *p2) { - rustsecp256k1_v0_7_0_scalar tmp = *s1; - rustsecp256k1_v0_7_0_scalar_split_lambda(s1, s2, &tmp); - rustsecp256k1_v0_7_0_ge_mul_lambda(p2, p1); +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_ecmult_endo_split(rustsecp256k1_v0_8_0_scalar *s1, rustsecp256k1_v0_8_0_scalar *s2, rustsecp256k1_v0_8_0_ge *p1, rustsecp256k1_v0_8_0_ge *p2) { + rustsecp256k1_v0_8_0_scalar tmp = *s1; + rustsecp256k1_v0_8_0_scalar_split_lambda(s1, s2, &tmp); + rustsecp256k1_v0_8_0_ge_mul_lambda(p2, p1); - if (rustsecp256k1_v0_7_0_scalar_is_high(s1)) { - rustsecp256k1_v0_7_0_scalar_negate(s1, s1); - rustsecp256k1_v0_7_0_ge_neg(p1, p1); + if (rustsecp256k1_v0_8_0_scalar_is_high(s1)) { + rustsecp256k1_v0_8_0_scalar_negate(s1, s1); + rustsecp256k1_v0_8_0_ge_neg(p1, p1); } - if (rustsecp256k1_v0_7_0_scalar_is_high(s2)) { - rustsecp256k1_v0_7_0_scalar_negate(s2, s2); - rustsecp256k1_v0_7_0_ge_neg(p2, p2); + if (rustsecp256k1_v0_8_0_scalar_is_high(s2)) { + rustsecp256k1_v0_8_0_scalar_negate(s2, s2); + rustsecp256k1_v0_8_0_ge_neg(p2, p2); } } @@ -618,91 +640,91 @@ SECP256K1_INLINE static void rustsecp256k1_v0_7_0_ecmult_endo_split(rustsecp256k * Returns the scratch size required for a given number of points (excluding * base point G) without considering alignment. */ -static size_t rustsecp256k1_v0_7_0_pippenger_scratch_size(size_t n_points, int bucket_window) { +static size_t rustsecp256k1_v0_8_0_pippenger_scratch_size(size_t n_points, int bucket_window) { size_t entries = 2*n_points + 2; - size_t entry_size = sizeof(rustsecp256k1_v0_7_0_ge) + sizeof(rustsecp256k1_v0_7_0_scalar) + sizeof(struct rustsecp256k1_v0_7_0_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); - return (sizeof(rustsecp256k1_v0_7_0_gej) << bucket_window) + sizeof(struct rustsecp256k1_v0_7_0_pippenger_state) + entries * entry_size; + size_t entry_size = sizeof(rustsecp256k1_v0_8_0_ge) + sizeof(rustsecp256k1_v0_8_0_scalar) + sizeof(struct rustsecp256k1_v0_8_0_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); + return (sizeof(rustsecp256k1_v0_8_0_gej) << bucket_window) + sizeof(struct rustsecp256k1_v0_8_0_pippenger_state) + entries * entry_size; } -static int rustsecp256k1_v0_7_0_ecmult_pippenger_batch(const rustsecp256k1_v0_7_0_callback* error_callback, rustsecp256k1_v0_7_0_scratch *scratch, rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_scalar *inp_g_sc, rustsecp256k1_v0_7_0_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { - const size_t scratch_checkpoint = rustsecp256k1_v0_7_0_scratch_checkpoint(error_callback, scratch); +static int rustsecp256k1_v0_8_0_ecmult_pippenger_batch(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch *scratch, rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_scalar *inp_g_sc, rustsecp256k1_v0_8_0_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { + const size_t scratch_checkpoint = rustsecp256k1_v0_8_0_scratch_checkpoint(error_callback, scratch); /* Use 2(n+1) with the endomorphism, when calculating batch * sizes. The reason for +1 is that we add the G scalar to the list of * other scalars. */ size_t entries = 2*n_points + 2; - rustsecp256k1_v0_7_0_ge *points; - rustsecp256k1_v0_7_0_scalar *scalars; - rustsecp256k1_v0_7_0_gej *buckets; - struct rustsecp256k1_v0_7_0_pippenger_state *state_space; + rustsecp256k1_v0_8_0_ge *points; + rustsecp256k1_v0_8_0_scalar *scalars; + rustsecp256k1_v0_8_0_gej *buckets; + struct rustsecp256k1_v0_8_0_pippenger_state *state_space; size_t idx = 0; size_t point_idx = 0; int i, j; int bucket_window; - rustsecp256k1_v0_7_0_gej_set_infinity(r); + rustsecp256k1_v0_8_0_gej_set_infinity(r); if (inp_g_sc == NULL && n_points == 0) { return 1; } - bucket_window = rustsecp256k1_v0_7_0_pippenger_bucket_window(n_points); + bucket_window = rustsecp256k1_v0_8_0_pippenger_bucket_window(n_points); /* We allocate PIPPENGER_SCRATCH_OBJECTS objects on the scratch space. If * these allocations change, make sure to update the * PIPPENGER_SCRATCH_OBJECTS constant and pippenger_scratch_size * accordingly. */ - points = (rustsecp256k1_v0_7_0_ge *) rustsecp256k1_v0_7_0_scratch_alloc(error_callback, scratch, entries * sizeof(*points)); - scalars = (rustsecp256k1_v0_7_0_scalar *) rustsecp256k1_v0_7_0_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars)); - state_space = (struct rustsecp256k1_v0_7_0_pippenger_state *) rustsecp256k1_v0_7_0_scratch_alloc(error_callback, scratch, sizeof(*state_space)); + points = (rustsecp256k1_v0_8_0_ge *) rustsecp256k1_v0_8_0_scratch_alloc(error_callback, scratch, entries * sizeof(*points)); + scalars = (rustsecp256k1_v0_8_0_scalar *) rustsecp256k1_v0_8_0_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars)); + state_space = (struct rustsecp256k1_v0_8_0_pippenger_state *) rustsecp256k1_v0_8_0_scratch_alloc(error_callback, scratch, sizeof(*state_space)); if (points == NULL || scalars == NULL || state_space == NULL) { - rustsecp256k1_v0_7_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_8_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } - state_space->ps = (struct rustsecp256k1_v0_7_0_pippenger_point_state *) rustsecp256k1_v0_7_0_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps)); - state_space->wnaf_na = (int *) rustsecp256k1_v0_7_0_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int)); - buckets = (rustsecp256k1_v0_7_0_gej *) rustsecp256k1_v0_7_0_scratch_alloc(error_callback, scratch, (1<ps = (struct rustsecp256k1_v0_8_0_pippenger_point_state *) rustsecp256k1_v0_8_0_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps)); + state_space->wnaf_na = (int *) rustsecp256k1_v0_8_0_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int)); + buckets = (rustsecp256k1_v0_8_0_gej *) rustsecp256k1_v0_8_0_scratch_alloc(error_callback, scratch, (1<ps == NULL || state_space->wnaf_na == NULL || buckets == NULL) { - rustsecp256k1_v0_7_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_8_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } if (inp_g_sc != NULL) { scalars[0] = *inp_g_sc; - points[0] = rustsecp256k1_v0_7_0_ge_const_g; + points[0] = rustsecp256k1_v0_8_0_ge_const_g; idx++; - rustsecp256k1_v0_7_0_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]); + rustsecp256k1_v0_8_0_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]); idx++; } while (point_idx < n_points) { if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) { - rustsecp256k1_v0_7_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + rustsecp256k1_v0_8_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } idx++; - rustsecp256k1_v0_7_0_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]); + rustsecp256k1_v0_8_0_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]); idx++; point_idx++; } - rustsecp256k1_v0_7_0_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx); + rustsecp256k1_v0_8_0_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx); /* Clear data */ for(i = 0; (size_t)i < idx; i++) { - rustsecp256k1_v0_7_0_scalar_clear(&scalars[i]); + rustsecp256k1_v0_8_0_scalar_clear(&scalars[i]); state_space->ps[i].skew_na = 0; for(j = 0; j < WNAF_SIZE(bucket_window+1); j++) { state_space->wnaf_na[i * WNAF_SIZE(bucket_window+1) + j] = 0; } } for(i = 0; i < 1< max_alloc) { break; } @@ -746,34 +768,34 @@ static size_t rustsecp256k1_v0_7_0_pippenger_max_points(const rustsecp256k1_v0_7 /* Computes ecmult_multi by simply multiplying and adding each point. Does not * require a scratch space */ -static int rustsecp256k1_v0_7_0_ecmult_multi_simple_var(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_scalar *inp_g_sc, rustsecp256k1_v0_7_0_ecmult_multi_callback cb, void *cbdata, size_t n_points) { +static int rustsecp256k1_v0_8_0_ecmult_multi_simple_var(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_scalar *inp_g_sc, rustsecp256k1_v0_8_0_ecmult_multi_callback cb, void *cbdata, size_t n_points) { size_t point_idx; - rustsecp256k1_v0_7_0_scalar szero; - rustsecp256k1_v0_7_0_gej tmpj; + rustsecp256k1_v0_8_0_scalar szero; + rustsecp256k1_v0_8_0_gej tmpj; - rustsecp256k1_v0_7_0_scalar_set_int(&szero, 0); - rustsecp256k1_v0_7_0_gej_set_infinity(r); - rustsecp256k1_v0_7_0_gej_set_infinity(&tmpj); + rustsecp256k1_v0_8_0_scalar_set_int(&szero, 0); + rustsecp256k1_v0_8_0_gej_set_infinity(r); + rustsecp256k1_v0_8_0_gej_set_infinity(&tmpj); /* r = inp_g_sc*G */ - rustsecp256k1_v0_7_0_ecmult(r, &tmpj, &szero, inp_g_sc); + rustsecp256k1_v0_8_0_ecmult(r, &tmpj, &szero, inp_g_sc); for (point_idx = 0; point_idx < n_points; point_idx++) { - rustsecp256k1_v0_7_0_ge point; - rustsecp256k1_v0_7_0_gej pointj; - rustsecp256k1_v0_7_0_scalar scalar; + rustsecp256k1_v0_8_0_ge point; + rustsecp256k1_v0_8_0_gej pointj; + rustsecp256k1_v0_8_0_scalar scalar; if (!cb(&scalar, &point, point_idx, cbdata)) { return 0; } /* r += scalar*point */ - rustsecp256k1_v0_7_0_gej_set_ge(&pointj, &point); - rustsecp256k1_v0_7_0_ecmult(&tmpj, &pointj, &scalar, NULL); - rustsecp256k1_v0_7_0_gej_add_var(r, r, &tmpj, NULL); + rustsecp256k1_v0_8_0_gej_set_ge(&pointj, &point); + rustsecp256k1_v0_8_0_ecmult(&tmpj, &pointj, &scalar, NULL); + rustsecp256k1_v0_8_0_gej_add_var(r, r, &tmpj, NULL); } return 1; } /* Compute the number of batches and the batch size given the maximum batch size and the * total number of points */ -static int rustsecp256k1_v0_7_0_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) { +static int rustsecp256k1_v0_8_0_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) { if (max_n_batch_points == 0) { return 0; } @@ -791,50 +813,50 @@ static int rustsecp256k1_v0_7_0_ecmult_multi_batch_size_helper(size_t *n_batches return 1; } -typedef int (*rustsecp256k1_v0_7_0_ecmult_multi_func)(const rustsecp256k1_v0_7_0_callback* error_callback, rustsecp256k1_v0_7_0_scratch*, rustsecp256k1_v0_7_0_gej*, const rustsecp256k1_v0_7_0_scalar*, rustsecp256k1_v0_7_0_ecmult_multi_callback cb, void*, size_t); -static int rustsecp256k1_v0_7_0_ecmult_multi_var(const rustsecp256k1_v0_7_0_callback* error_callback, rustsecp256k1_v0_7_0_scratch *scratch, rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_scalar *inp_g_sc, rustsecp256k1_v0_7_0_ecmult_multi_callback cb, void *cbdata, size_t n) { +typedef int (*rustsecp256k1_v0_8_0_ecmult_multi_func)(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch*, rustsecp256k1_v0_8_0_gej*, const rustsecp256k1_v0_8_0_scalar*, rustsecp256k1_v0_8_0_ecmult_multi_callback cb, void*, size_t); +static int rustsecp256k1_v0_8_0_ecmult_multi_var(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch *scratch, rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_scalar *inp_g_sc, rustsecp256k1_v0_8_0_ecmult_multi_callback cb, void *cbdata, size_t n) { size_t i; - int (*f)(const rustsecp256k1_v0_7_0_callback* error_callback, rustsecp256k1_v0_7_0_scratch*, rustsecp256k1_v0_7_0_gej*, const rustsecp256k1_v0_7_0_scalar*, rustsecp256k1_v0_7_0_ecmult_multi_callback cb, void*, size_t, size_t); + int (*f)(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch*, rustsecp256k1_v0_8_0_gej*, const rustsecp256k1_v0_8_0_scalar*, rustsecp256k1_v0_8_0_ecmult_multi_callback cb, void*, size_t, size_t); size_t n_batches; size_t n_batch_points; - rustsecp256k1_v0_7_0_gej_set_infinity(r); + rustsecp256k1_v0_8_0_gej_set_infinity(r); if (inp_g_sc == NULL && n == 0) { return 1; } else if (n == 0) { - rustsecp256k1_v0_7_0_scalar szero; - rustsecp256k1_v0_7_0_scalar_set_int(&szero, 0); - rustsecp256k1_v0_7_0_ecmult(r, r, &szero, inp_g_sc); + rustsecp256k1_v0_8_0_scalar szero; + rustsecp256k1_v0_8_0_scalar_set_int(&szero, 0); + rustsecp256k1_v0_8_0_ecmult(r, r, &szero, inp_g_sc); return 1; } if (scratch == NULL) { - return rustsecp256k1_v0_7_0_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); + return rustsecp256k1_v0_8_0_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); } /* Compute the batch sizes for Pippenger's algorithm given a scratch space. If it's greater than * a threshold use Pippenger's algorithm. Otherwise use Strauss' algorithm. * As a first step check if there's enough space for Pippenger's algo (which requires less space * than Strauss' algo) and if not, use the simple algorithm. */ - if (!rustsecp256k1_v0_7_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_7_0_pippenger_max_points(error_callback, scratch), n)) { - return rustsecp256k1_v0_7_0_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); + if (!rustsecp256k1_v0_8_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_8_0_pippenger_max_points(error_callback, scratch), n)) { + return rustsecp256k1_v0_8_0_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); } if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD) { - f = rustsecp256k1_v0_7_0_ecmult_pippenger_batch; + f = rustsecp256k1_v0_8_0_ecmult_pippenger_batch; } else { - if (!rustsecp256k1_v0_7_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_7_0_strauss_max_points(error_callback, scratch), n)) { - return rustsecp256k1_v0_7_0_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); + if (!rustsecp256k1_v0_8_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, rustsecp256k1_v0_8_0_strauss_max_points(error_callback, scratch), n)) { + return rustsecp256k1_v0_8_0_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); } - f = rustsecp256k1_v0_7_0_ecmult_strauss_batch; + f = rustsecp256k1_v0_8_0_ecmult_strauss_batch; } for(i = 0; i < n_batches; i++) { size_t nbp = n < n_batch_points ? n : n_batch_points; size_t offset = n_batch_points*i; - rustsecp256k1_v0_7_0_gej tmp; + rustsecp256k1_v0_8_0_gej tmp; if (!f(error_callback, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) { return 0; } - rustsecp256k1_v0_7_0_gej_add_var(r, r, &tmp, NULL); + rustsecp256k1_v0_8_0_gej_add_var(r, r, &tmp, NULL); n -= nbp; } return 1; diff --git a/secp256k1-sys/depend/secp256k1/src/field.h b/secp256k1-sys/depend/secp256k1/src/field.h index 92037489b..f1b8d18dd 100644 --- a/secp256k1-sys/depend/secp256k1/src/field.h +++ b/secp256k1-sys/depend/secp256k1/src/field.h @@ -32,96 +32,111 @@ #error "Please select wide multiplication implementation" #endif +static const rustsecp256k1_v0_8_0_fe rustsecp256k1_v0_8_0_fe_one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); +static const rustsecp256k1_v0_8_0_fe rustsecp256k1_v0_8_0_const_beta = SECP256K1_FE_CONST( + 0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul, + 0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul +); + /** Normalize a field element. This brings the field element to a canonical representation, reduces * its magnitude to 1, and reduces it modulo field size `p`. */ -static void rustsecp256k1_v0_7_0_fe_normalize(rustsecp256k1_v0_7_0_fe *r); +static void rustsecp256k1_v0_8_0_fe_normalize(rustsecp256k1_v0_8_0_fe *r); /** Weakly normalize a field element: reduce its magnitude to 1, but don't fully normalize. */ -static void rustsecp256k1_v0_7_0_fe_normalize_weak(rustsecp256k1_v0_7_0_fe *r); +static void rustsecp256k1_v0_8_0_fe_normalize_weak(rustsecp256k1_v0_8_0_fe *r); /** Normalize a field element, without constant-time guarantee. */ -static void rustsecp256k1_v0_7_0_fe_normalize_var(rustsecp256k1_v0_7_0_fe *r); +static void rustsecp256k1_v0_8_0_fe_normalize_var(rustsecp256k1_v0_8_0_fe *r); /** Verify whether a field element represents zero i.e. would normalize to a zero value. */ -static int rustsecp256k1_v0_7_0_fe_normalizes_to_zero(const rustsecp256k1_v0_7_0_fe *r); +static int rustsecp256k1_v0_8_0_fe_normalizes_to_zero(const rustsecp256k1_v0_8_0_fe *r); /** Verify whether a field element represents zero i.e. would normalize to a zero value, * without constant-time guarantee. */ -static int rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(const rustsecp256k1_v0_7_0_fe *r); +static int rustsecp256k1_v0_8_0_fe_normalizes_to_zero_var(const rustsecp256k1_v0_8_0_fe *r); /** Set a field element equal to a small (not greater than 0x7FFF), non-negative integer. * Resulting field element is normalized; it has magnitude 0 if a == 0, and magnitude 1 otherwise. */ -static void rustsecp256k1_v0_7_0_fe_set_int(rustsecp256k1_v0_7_0_fe *r, int a); +static void rustsecp256k1_v0_8_0_fe_set_int(rustsecp256k1_v0_8_0_fe *r, int a); /** Sets a field element equal to zero, initializing all fields. */ -static void rustsecp256k1_v0_7_0_fe_clear(rustsecp256k1_v0_7_0_fe *a); +static void rustsecp256k1_v0_8_0_fe_clear(rustsecp256k1_v0_8_0_fe *a); /** Verify whether a field element is zero. Requires the input to be normalized. */ -static int rustsecp256k1_v0_7_0_fe_is_zero(const rustsecp256k1_v0_7_0_fe *a); +static int rustsecp256k1_v0_8_0_fe_is_zero(const rustsecp256k1_v0_8_0_fe *a); /** Check the "oddness" of a field element. Requires the input to be normalized. */ -static int rustsecp256k1_v0_7_0_fe_is_odd(const rustsecp256k1_v0_7_0_fe *a); +static int rustsecp256k1_v0_8_0_fe_is_odd(const rustsecp256k1_v0_8_0_fe *a); /** Compare two field elements. Requires magnitude-1 inputs. */ -static int rustsecp256k1_v0_7_0_fe_equal(const rustsecp256k1_v0_7_0_fe *a, const rustsecp256k1_v0_7_0_fe *b); +static int rustsecp256k1_v0_8_0_fe_equal(const rustsecp256k1_v0_8_0_fe *a, const rustsecp256k1_v0_8_0_fe *b); -/** Same as rustsecp256k1_v0_7_0_fe_equal, but may be variable time. */ -static int rustsecp256k1_v0_7_0_fe_equal_var(const rustsecp256k1_v0_7_0_fe *a, const rustsecp256k1_v0_7_0_fe *b); +/** Same as rustsecp256k1_v0_8_0_fe_equal, but may be variable time. */ +static int rustsecp256k1_v0_8_0_fe_equal_var(const rustsecp256k1_v0_8_0_fe *a, const rustsecp256k1_v0_8_0_fe *b); /** Compare two field elements. Requires both inputs to be normalized */ -static int rustsecp256k1_v0_7_0_fe_cmp_var(const rustsecp256k1_v0_7_0_fe *a, const rustsecp256k1_v0_7_0_fe *b); +static int rustsecp256k1_v0_8_0_fe_cmp_var(const rustsecp256k1_v0_8_0_fe *a, const rustsecp256k1_v0_8_0_fe *b); /** Set a field element equal to 32-byte big endian value. If successful, the resulting field element is normalized. */ -static int rustsecp256k1_v0_7_0_fe_set_b32(rustsecp256k1_v0_7_0_fe *r, const unsigned char *a); +static int rustsecp256k1_v0_8_0_fe_set_b32(rustsecp256k1_v0_8_0_fe *r, const unsigned char *a); /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ -static void rustsecp256k1_v0_7_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_7_0_fe *a); +static void rustsecp256k1_v0_8_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_8_0_fe *a); /** Set a field element equal to the additive inverse of another. Takes a maximum magnitude of the input * as an argument. The magnitude of the output is one higher. */ -static void rustsecp256k1_v0_7_0_fe_negate(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a, int m); +static void rustsecp256k1_v0_8_0_fe_negate(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a, int m); /** Multiplies the passed field element with a small integer constant. Multiplies the magnitude by that * small integer. */ -static void rustsecp256k1_v0_7_0_fe_mul_int(rustsecp256k1_v0_7_0_fe *r, int a); +static void rustsecp256k1_v0_8_0_fe_mul_int(rustsecp256k1_v0_8_0_fe *r, int a); /** Adds a field element to another. The result has the sum of the inputs' magnitudes as magnitude. */ -static void rustsecp256k1_v0_7_0_fe_add(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a); +static void rustsecp256k1_v0_8_0_fe_add(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a); /** Sets a field element to be the product of two others. Requires the inputs' magnitudes to be at most 8. * The output magnitude is 1 (but not guaranteed to be normalized). */ -static void rustsecp256k1_v0_7_0_fe_mul(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a, const rustsecp256k1_v0_7_0_fe * SECP256K1_RESTRICT b); +static void rustsecp256k1_v0_8_0_fe_mul(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a, const rustsecp256k1_v0_8_0_fe * SECP256K1_RESTRICT b); /** Sets a field element to be the square of another. Requires the input's magnitude to be at most 8. * The output magnitude is 1 (but not guaranteed to be normalized). */ -static void rustsecp256k1_v0_7_0_fe_sqr(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a); +static void rustsecp256k1_v0_8_0_fe_sqr(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a); /** If a has a square root, it is computed in r and 1 is returned. If a does not * have a square root, the root of its negation is computed and 0 is returned. * The input's magnitude can be at most 8. The output magnitude is 1 (but not * guaranteed to be normalized). The result in r will always be a square * itself. */ -static int rustsecp256k1_v0_7_0_fe_sqrt(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a); +static int rustsecp256k1_v0_8_0_fe_sqrt(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a); /** Sets a field element to be the (modular) inverse of another. Requires the input's magnitude to be * at most 8. The output magnitude is 1 (but not guaranteed to be normalized). */ -static void rustsecp256k1_v0_7_0_fe_inv(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a); +static void rustsecp256k1_v0_8_0_fe_inv(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a); -/** Potentially faster version of rustsecp256k1_v0_7_0_fe_inv, without constant-time guarantee. */ -static void rustsecp256k1_v0_7_0_fe_inv_var(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a); +/** Potentially faster version of rustsecp256k1_v0_8_0_fe_inv, without constant-time guarantee. */ +static void rustsecp256k1_v0_8_0_fe_inv_var(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a); /** Convert a field element to the storage type. */ -static void rustsecp256k1_v0_7_0_fe_to_storage(rustsecp256k1_v0_7_0_fe_storage *r, const rustsecp256k1_v0_7_0_fe *a); +static void rustsecp256k1_v0_8_0_fe_to_storage(rustsecp256k1_v0_8_0_fe_storage *r, const rustsecp256k1_v0_8_0_fe *a); /** Convert a field element back from the storage type. */ -static void rustsecp256k1_v0_7_0_fe_from_storage(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe_storage *a); +static void rustsecp256k1_v0_8_0_fe_from_storage(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe_storage *a); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/ -static void rustsecp256k1_v0_7_0_fe_storage_cmov(rustsecp256k1_v0_7_0_fe_storage *r, const rustsecp256k1_v0_7_0_fe_storage *a, int flag); +static void rustsecp256k1_v0_8_0_fe_storage_cmov(rustsecp256k1_v0_8_0_fe_storage *r, const rustsecp256k1_v0_8_0_fe_storage *a, int flag); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/ -static void rustsecp256k1_v0_7_0_fe_cmov(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a, int flag); +static void rustsecp256k1_v0_8_0_fe_cmov(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a, int flag); + +/** Halves the value of a field element modulo the field prime. Constant-time. + * For an input magnitude 'm', the output magnitude is set to 'floor(m/2) + 1'. + * The output is not guaranteed to be normalized, regardless of the input. */ +static void rustsecp256k1_v0_8_0_fe_half(rustsecp256k1_v0_8_0_fe *r); + +/** Sets each limb of 'r' to its upper bound at magnitude 'm'. The output will also have its + * magnitude set to 'm' and is normalized if (and only if) 'm' is zero. */ +static void rustsecp256k1_v0_8_0_fe_get_bounds(rustsecp256k1_v0_8_0_fe *r, int m); #endif /* SECP256K1_FIELD_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/field_10x26.h b/secp256k1-sys/depend/secp256k1/src/field_10x26.h index a90951d56..9f6ef3bec 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_10x26.h +++ b/secp256k1-sys/depend/secp256k1/src/field_10x26.h @@ -18,7 +18,7 @@ typedef struct { int magnitude; int normalized; #endif -} rustsecp256k1_v0_7_0_fe; +} rustsecp256k1_v0_8_0_fe; /* Unpacks a constant into a overlapping multi-limbed FE element. */ #define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \ @@ -42,7 +42,7 @@ typedef struct { typedef struct { uint32_t n[8]; -} rustsecp256k1_v0_7_0_fe_storage; +} rustsecp256k1_v0_8_0_fe_storage; #define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }} #define SECP256K1_FE_STORAGE_CONST_GET(d) d.n[7], d.n[6], d.n[5], d.n[4],d.n[3], d.n[2], d.n[1], d.n[0] diff --git a/secp256k1-sys/depend/secp256k1/src/field_10x26_impl.h b/secp256k1-sys/depend/secp256k1/src/field_10x26_impl.h index bce82c407..0a66b32a8 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_10x26_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_10x26_impl.h @@ -21,7 +21,7 @@ */ #ifdef VERIFY -static void rustsecp256k1_v0_7_0_fe_verify(const rustsecp256k1_v0_7_0_fe *a) { +static void rustsecp256k1_v0_8_0_fe_verify(const rustsecp256k1_v0_8_0_fe *a) { const uint32_t *d = a->n; int m = a->normalized ? 1 : 2 * a->magnitude, r = 1; r &= (d[0] <= 0x3FFFFFFUL * m); @@ -49,7 +49,27 @@ static void rustsecp256k1_v0_7_0_fe_verify(const rustsecp256k1_v0_7_0_fe *a) { } #endif -static void rustsecp256k1_v0_7_0_fe_normalize(rustsecp256k1_v0_7_0_fe *r) { +static void rustsecp256k1_v0_8_0_fe_get_bounds(rustsecp256k1_v0_8_0_fe *r, int m) { + VERIFY_CHECK(m >= 0); + VERIFY_CHECK(m <= 2048); + r->n[0] = 0x3FFFFFFUL * 2 * m; + r->n[1] = 0x3FFFFFFUL * 2 * m; + r->n[2] = 0x3FFFFFFUL * 2 * m; + r->n[3] = 0x3FFFFFFUL * 2 * m; + r->n[4] = 0x3FFFFFFUL * 2 * m; + r->n[5] = 0x3FFFFFFUL * 2 * m; + r->n[6] = 0x3FFFFFFUL * 2 * m; + r->n[7] = 0x3FFFFFFUL * 2 * m; + r->n[8] = 0x3FFFFFFUL * 2 * m; + r->n[9] = 0x03FFFFFUL * 2 * m; +#ifdef VERIFY + r->magnitude = m; + r->normalized = (m == 0); + rustsecp256k1_v0_8_0_fe_verify(r); +#endif +} + +static void rustsecp256k1_v0_8_0_fe_normalize(rustsecp256k1_v0_8_0_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; @@ -100,11 +120,11 @@ static void rustsecp256k1_v0_7_0_fe_normalize(rustsecp256k1_v0_7_0_fe *r) { #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -static void rustsecp256k1_v0_7_0_fe_normalize_weak(rustsecp256k1_v0_7_0_fe *r) { +static void rustsecp256k1_v0_8_0_fe_normalize_weak(rustsecp256k1_v0_8_0_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; @@ -131,11 +151,11 @@ static void rustsecp256k1_v0_7_0_fe_normalize_weak(rustsecp256k1_v0_7_0_fe *r) { #ifdef VERIFY r->magnitude = 1; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -static void rustsecp256k1_v0_7_0_fe_normalize_var(rustsecp256k1_v0_7_0_fe *r) { +static void rustsecp256k1_v0_8_0_fe_normalize_var(rustsecp256k1_v0_8_0_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; @@ -187,11 +207,11 @@ static void rustsecp256k1_v0_7_0_fe_normalize_var(rustsecp256k1_v0_7_0_fe *r) { #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -static int rustsecp256k1_v0_7_0_fe_normalizes_to_zero(const rustsecp256k1_v0_7_0_fe *r) { +static int rustsecp256k1_v0_8_0_fe_normalizes_to_zero(const rustsecp256k1_v0_8_0_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; @@ -220,7 +240,7 @@ static int rustsecp256k1_v0_7_0_fe_normalizes_to_zero(const rustsecp256k1_v0_7_0 return (z0 == 0) | (z1 == 0x3FFFFFFUL); } -static int rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(const rustsecp256k1_v0_7_0_fe *r) { +static int rustsecp256k1_v0_8_0_fe_normalizes_to_zero_var(const rustsecp256k1_v0_8_0_fe *r) { uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; uint32_t z0, z1; uint32_t x; @@ -272,35 +292,35 @@ static int rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(const rustsecp256k1_v0 return (z0 == 0) | (z1 == 0x3FFFFFFUL); } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_set_int(rustsecp256k1_v0_7_0_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_fe_set_int(rustsecp256k1_v0_8_0_fe *r, int a) { VERIFY_CHECK(0 <= a && a <= 0x7FFF); r->n[0] = a; r->n[1] = r->n[2] = r->n[3] = r->n[4] = r->n[5] = r->n[6] = r->n[7] = r->n[8] = r->n[9] = 0; #ifdef VERIFY r->magnitude = (a != 0); r->normalized = 1; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_fe_is_zero(const rustsecp256k1_v0_7_0_fe *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_fe_is_zero(const rustsecp256k1_v0_8_0_fe *a) { const uint32_t *t = a->n; #ifdef VERIFY VERIFY_CHECK(a->normalized); - rustsecp256k1_v0_7_0_fe_verify(a); + rustsecp256k1_v0_8_0_fe_verify(a); #endif return (t[0] | t[1] | t[2] | t[3] | t[4] | t[5] | t[6] | t[7] | t[8] | t[9]) == 0; } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_fe_is_odd(const rustsecp256k1_v0_7_0_fe *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_fe_is_odd(const rustsecp256k1_v0_8_0_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); - rustsecp256k1_v0_7_0_fe_verify(a); + rustsecp256k1_v0_8_0_fe_verify(a); #endif return a->n[0] & 1; } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_clear(rustsecp256k1_v0_7_0_fe *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_fe_clear(rustsecp256k1_v0_8_0_fe *a) { int i; #ifdef VERIFY a->magnitude = 0; @@ -311,13 +331,13 @@ SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_clear(rustsecp256k1_v0_7_0_ } } -static int rustsecp256k1_v0_7_0_fe_cmp_var(const rustsecp256k1_v0_7_0_fe *a, const rustsecp256k1_v0_7_0_fe *b) { +static int rustsecp256k1_v0_8_0_fe_cmp_var(const rustsecp256k1_v0_8_0_fe *a, const rustsecp256k1_v0_8_0_fe *b) { int i; #ifdef VERIFY VERIFY_CHECK(a->normalized); VERIFY_CHECK(b->normalized); - rustsecp256k1_v0_7_0_fe_verify(a); - rustsecp256k1_v0_7_0_fe_verify(b); + rustsecp256k1_v0_8_0_fe_verify(a); + rustsecp256k1_v0_8_0_fe_verify(b); #endif for (i = 9; i >= 0; i--) { if (a->n[i] > b->n[i]) { @@ -330,7 +350,7 @@ static int rustsecp256k1_v0_7_0_fe_cmp_var(const rustsecp256k1_v0_7_0_fe *a, con return 0; } -static int rustsecp256k1_v0_7_0_fe_set_b32(rustsecp256k1_v0_7_0_fe *r, const unsigned char *a) { +static int rustsecp256k1_v0_8_0_fe_set_b32(rustsecp256k1_v0_8_0_fe *r, const unsigned char *a) { int ret; r->n[0] = (uint32_t)a[31] | ((uint32_t)a[30] << 8) | ((uint32_t)a[29] << 16) | ((uint32_t)(a[28] & 0x3) << 24); r->n[1] = (uint32_t)((a[28] >> 2) & 0x3f) | ((uint32_t)a[27] << 6) | ((uint32_t)a[26] << 14) | ((uint32_t)(a[25] & 0xf) << 22); @@ -348,7 +368,7 @@ static int rustsecp256k1_v0_7_0_fe_set_b32(rustsecp256k1_v0_7_0_fe *r, const uns r->magnitude = 1; if (ret) { r->normalized = 1; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); } else { r->normalized = 0; } @@ -357,10 +377,10 @@ static int rustsecp256k1_v0_7_0_fe_set_b32(rustsecp256k1_v0_7_0_fe *r, const uns } /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ -static void rustsecp256k1_v0_7_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_7_0_fe *a) { +static void rustsecp256k1_v0_8_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_8_0_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); - rustsecp256k1_v0_7_0_fe_verify(a); + rustsecp256k1_v0_8_0_fe_verify(a); #endif r[0] = (a->n[9] >> 14) & 0xff; r[1] = (a->n[9] >> 6) & 0xff; @@ -396,10 +416,10 @@ static void rustsecp256k1_v0_7_0_fe_get_b32(unsigned char *r, const rustsecp256k r[31] = a->n[0] & 0xff; } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_negate(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a, int m) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_fe_negate(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a, int m) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= m); - rustsecp256k1_v0_7_0_fe_verify(a); + rustsecp256k1_v0_8_0_fe_verify(a); VERIFY_CHECK(0x3FFFC2FUL * 2 * (m + 1) >= 0x3FFFFFFUL * 2 * m); VERIFY_CHECK(0x3FFFFBFUL * 2 * (m + 1) >= 0x3FFFFFFUL * 2 * m); VERIFY_CHECK(0x3FFFFFFUL * 2 * (m + 1) >= 0x3FFFFFFUL * 2 * m); @@ -418,11 +438,11 @@ SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_negate(rustsecp256k1_v0_7_0 #ifdef VERIFY r->magnitude = m + 1; r->normalized = 0; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_mul_int(rustsecp256k1_v0_7_0_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_fe_mul_int(rustsecp256k1_v0_8_0_fe *r, int a) { r->n[0] *= a; r->n[1] *= a; r->n[2] *= a; @@ -436,13 +456,13 @@ SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_mul_int(rustsecp256k1_v0_7_ #ifdef VERIFY r->magnitude *= a; r->normalized = 0; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_add(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_fe_add(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a) { #ifdef VERIFY - rustsecp256k1_v0_7_0_fe_verify(a); + rustsecp256k1_v0_8_0_fe_verify(a); #endif r->n[0] += a->n[0]; r->n[1] += a->n[1]; @@ -457,15 +477,15 @@ SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_add(rustsecp256k1_v0_7_0_fe #ifdef VERIFY r->magnitude += a->magnitude; r->normalized = 0; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } #if defined(USE_EXTERNAL_ASM) /* External assembler implementation */ -void rustsecp256k1_v0_7_0_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b); -void rustsecp256k1_v0_7_0_fe_sqr_inner(uint32_t *r, const uint32_t *a); +void rustsecp256k1_v0_8_0_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b); +void rustsecp256k1_v0_8_0_fe_sqr_inner(uint32_t *r, const uint32_t *a); #else @@ -475,7 +495,7 @@ void rustsecp256k1_v0_7_0_fe_sqr_inner(uint32_t *r, const uint32_t *a); #define VERIFY_BITS(x, n) do { } while(0) #endif -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) { uint64_t c, d; uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8; uint32_t t9, t1, t0, t2, t3, t4, t5, t6, t7; @@ -805,7 +825,7 @@ SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_mul_inner(uint32_t *r, cons /* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_sqr_inner(uint32_t *r, const uint32_t *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_fe_sqr_inner(uint32_t *r, const uint32_t *a) { uint64_t c, d; uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8; uint32_t t9, t0, t1, t2, t3, t4, t5, t6, t7; @@ -1080,37 +1100,37 @@ SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_sqr_inner(uint32_t *r, cons } #endif -static void rustsecp256k1_v0_7_0_fe_mul(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a, const rustsecp256k1_v0_7_0_fe * SECP256K1_RESTRICT b) { +static void rustsecp256k1_v0_8_0_fe_mul(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a, const rustsecp256k1_v0_8_0_fe * SECP256K1_RESTRICT b) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= 8); VERIFY_CHECK(b->magnitude <= 8); - rustsecp256k1_v0_7_0_fe_verify(a); - rustsecp256k1_v0_7_0_fe_verify(b); + rustsecp256k1_v0_8_0_fe_verify(a); + rustsecp256k1_v0_8_0_fe_verify(b); VERIFY_CHECK(r != b); VERIFY_CHECK(a != b); #endif - rustsecp256k1_v0_7_0_fe_mul_inner(r->n, a->n, b->n); + rustsecp256k1_v0_8_0_fe_mul_inner(r->n, a->n, b->n); #ifdef VERIFY r->magnitude = 1; r->normalized = 0; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -static void rustsecp256k1_v0_7_0_fe_sqr(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a) { +static void rustsecp256k1_v0_8_0_fe_sqr(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= 8); - rustsecp256k1_v0_7_0_fe_verify(a); + rustsecp256k1_v0_8_0_fe_verify(a); #endif - rustsecp256k1_v0_7_0_fe_sqr_inner(r->n, a->n); + rustsecp256k1_v0_8_0_fe_sqr_inner(r->n, a->n); #ifdef VERIFY r->magnitude = 1; r->normalized = 0; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_fe_cmov(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_fe_cmov(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a, int flag) { uint32_t mask0, mask1; VG_CHECK_VERIFY(r->n, sizeof(r->n)); mask0 = flag + ~((uint32_t)0); @@ -1133,7 +1153,83 @@ static SECP256K1_INLINE void rustsecp256k1_v0_7_0_fe_cmov(rustsecp256k1_v0_7_0_f #endif } -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_fe_storage_cmov(rustsecp256k1_v0_7_0_fe_storage *r, const rustsecp256k1_v0_7_0_fe_storage *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_fe_half(rustsecp256k1_v0_8_0_fe *r) { + uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], + t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; + uint32_t one = (uint32_t)1; + uint32_t mask = -(t0 & one) >> 6; + +#ifdef VERIFY + rustsecp256k1_v0_8_0_fe_verify(r); + VERIFY_CHECK(r->magnitude < 32); +#endif + + /* Bounds analysis (over the rationals). + * + * Let m = r->magnitude + * C = 0x3FFFFFFUL * 2 + * D = 0x03FFFFFUL * 2 + * + * Initial bounds: t0..t8 <= C * m + * t9 <= D * m + */ + + t0 += 0x3FFFC2FUL & mask; + t1 += 0x3FFFFBFUL & mask; + t2 += mask; + t3 += mask; + t4 += mask; + t5 += mask; + t6 += mask; + t7 += mask; + t8 += mask; + t9 += mask >> 4; + + VERIFY_CHECK((t0 & one) == 0); + + /* t0..t8: added <= C/2 + * t9: added <= D/2 + * + * Current bounds: t0..t8 <= C * (m + 1/2) + * t9 <= D * (m + 1/2) + */ + + r->n[0] = (t0 >> 1) + ((t1 & one) << 25); + r->n[1] = (t1 >> 1) + ((t2 & one) << 25); + r->n[2] = (t2 >> 1) + ((t3 & one) << 25); + r->n[3] = (t3 >> 1) + ((t4 & one) << 25); + r->n[4] = (t4 >> 1) + ((t5 & one) << 25); + r->n[5] = (t5 >> 1) + ((t6 & one) << 25); + r->n[6] = (t6 >> 1) + ((t7 & one) << 25); + r->n[7] = (t7 >> 1) + ((t8 & one) << 25); + r->n[8] = (t8 >> 1) + ((t9 & one) << 25); + r->n[9] = (t9 >> 1); + + /* t0..t8: shifted right and added <= C/4 + 1/2 + * t9: shifted right + * + * Current bounds: t0..t8 <= C * (m/2 + 1/2) + * t9 <= D * (m/2 + 1/4) + */ + +#ifdef VERIFY + /* Therefore the output magnitude (M) has to be set such that: + * t0..t8: C * M >= C * (m/2 + 1/2) + * t9: D * M >= D * (m/2 + 1/4) + * + * It suffices for all limbs that, for any input magnitude m: + * M >= m/2 + 1/2 + * + * and since we want the smallest such integer value for M: + * M == floor(m/2) + 1 + */ + r->magnitude = (r->magnitude >> 1) + 1; + r->normalized = 0; + rustsecp256k1_v0_8_0_fe_verify(r); +#endif +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_fe_storage_cmov(rustsecp256k1_v0_8_0_fe_storage *r, const rustsecp256k1_v0_8_0_fe_storage *a, int flag) { uint32_t mask0, mask1; VG_CHECK_VERIFY(r->n, sizeof(r->n)); mask0 = flag + ~((uint32_t)0); @@ -1148,7 +1244,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_7_0_fe_storage_cmov(rustsecp256k1_ r->n[7] = (r->n[7] & mask0) | (a->n[7] & mask1); } -static void rustsecp256k1_v0_7_0_fe_to_storage(rustsecp256k1_v0_7_0_fe_storage *r, const rustsecp256k1_v0_7_0_fe *a) { +static void rustsecp256k1_v0_8_0_fe_to_storage(rustsecp256k1_v0_8_0_fe_storage *r, const rustsecp256k1_v0_8_0_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); #endif @@ -1162,7 +1258,7 @@ static void rustsecp256k1_v0_7_0_fe_to_storage(rustsecp256k1_v0_7_0_fe_storage * r->n[7] = a->n[8] >> 16 | a->n[9] << 10; } -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_fe_from_storage(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe_storage *a) { +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_fe_from_storage(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe_storage *a) { r->n[0] = a->n[0] & 0x3FFFFFFUL; r->n[1] = a->n[0] >> 26 | ((a->n[1] << 6) & 0x3FFFFFFUL); r->n[2] = a->n[1] >> 20 | ((a->n[2] << 12) & 0x3FFFFFFUL); @@ -1176,16 +1272,16 @@ static SECP256K1_INLINE void rustsecp256k1_v0_7_0_fe_from_storage(rustsecp256k1_ #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -static void rustsecp256k1_v0_7_0_fe_from_signed30(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_modinv32_signed30 *a) { +static void rustsecp256k1_v0_8_0_fe_from_signed30(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_modinv32_signed30 *a) { const uint32_t M26 = UINT32_MAX >> 6; const uint32_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4], a5 = a->v[5], a6 = a->v[6], a7 = a->v[7], a8 = a->v[8]; - /* The output from rustsecp256k1_v0_7_0_modinv32{_var} should be normalized to range [0,modulus), and + /* The output from rustsecp256k1_v0_8_0_modinv32{_var} should be normalized to range [0,modulus), and * have limbs in [0,2^30). The modulus is < 2^256, so the top limb must be below 2^(256-30*8). */ VERIFY_CHECK(a0 >> 30 == 0); @@ -1212,11 +1308,11 @@ static void rustsecp256k1_v0_7_0_fe_from_signed30(rustsecp256k1_v0_7_0_fe *r, co #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -static void rustsecp256k1_v0_7_0_fe_to_signed30(rustsecp256k1_v0_7_0_modinv32_signed30 *r, const rustsecp256k1_v0_7_0_fe *a) { +static void rustsecp256k1_v0_8_0_fe_to_signed30(rustsecp256k1_v0_8_0_modinv32_signed30 *r, const rustsecp256k1_v0_8_0_fe *a) { const uint32_t M30 = UINT32_MAX >> 2; const uint64_t a0 = a->n[0], a1 = a->n[1], a2 = a->n[2], a3 = a->n[3], a4 = a->n[4], a5 = a->n[5], a6 = a->n[6], a7 = a->n[7], a8 = a->n[8], a9 = a->n[9]; @@ -1237,35 +1333,35 @@ static void rustsecp256k1_v0_7_0_fe_to_signed30(rustsecp256k1_v0_7_0_modinv32_si r->v[8] = a9 >> 6; } -static const rustsecp256k1_v0_7_0_modinv32_modinfo rustsecp256k1_v0_7_0_const_modinfo_fe = { +static const rustsecp256k1_v0_8_0_modinv32_modinfo rustsecp256k1_v0_8_0_const_modinfo_fe = { {{-0x3D1, -4, 0, 0, 0, 0, 0, 0, 65536}}, 0x2DDACACFL }; -static void rustsecp256k1_v0_7_0_fe_inv(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *x) { - rustsecp256k1_v0_7_0_fe tmp; - rustsecp256k1_v0_7_0_modinv32_signed30 s; +static void rustsecp256k1_v0_8_0_fe_inv(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *x) { + rustsecp256k1_v0_8_0_fe tmp; + rustsecp256k1_v0_8_0_modinv32_signed30 s; tmp = *x; - rustsecp256k1_v0_7_0_fe_normalize(&tmp); - rustsecp256k1_v0_7_0_fe_to_signed30(&s, &tmp); - rustsecp256k1_v0_7_0_modinv32(&s, &rustsecp256k1_v0_7_0_const_modinfo_fe); - rustsecp256k1_v0_7_0_fe_from_signed30(r, &s); + rustsecp256k1_v0_8_0_fe_normalize(&tmp); + rustsecp256k1_v0_8_0_fe_to_signed30(&s, &tmp); + rustsecp256k1_v0_8_0_modinv32(&s, &rustsecp256k1_v0_8_0_const_modinfo_fe); + rustsecp256k1_v0_8_0_fe_from_signed30(r, &s); - VERIFY_CHECK(rustsecp256k1_v0_7_0_fe_normalizes_to_zero(r) == rustsecp256k1_v0_7_0_fe_normalizes_to_zero(&tmp)); + VERIFY_CHECK(rustsecp256k1_v0_8_0_fe_normalizes_to_zero(r) == rustsecp256k1_v0_8_0_fe_normalizes_to_zero(&tmp)); } -static void rustsecp256k1_v0_7_0_fe_inv_var(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *x) { - rustsecp256k1_v0_7_0_fe tmp; - rustsecp256k1_v0_7_0_modinv32_signed30 s; +static void rustsecp256k1_v0_8_0_fe_inv_var(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *x) { + rustsecp256k1_v0_8_0_fe tmp; + rustsecp256k1_v0_8_0_modinv32_signed30 s; tmp = *x; - rustsecp256k1_v0_7_0_fe_normalize_var(&tmp); - rustsecp256k1_v0_7_0_fe_to_signed30(&s, &tmp); - rustsecp256k1_v0_7_0_modinv32_var(&s, &rustsecp256k1_v0_7_0_const_modinfo_fe); - rustsecp256k1_v0_7_0_fe_from_signed30(r, &s); + rustsecp256k1_v0_8_0_fe_normalize_var(&tmp); + rustsecp256k1_v0_8_0_fe_to_signed30(&s, &tmp); + rustsecp256k1_v0_8_0_modinv32_var(&s, &rustsecp256k1_v0_8_0_const_modinfo_fe); + rustsecp256k1_v0_8_0_fe_from_signed30(r, &s); - VERIFY_CHECK(rustsecp256k1_v0_7_0_fe_normalizes_to_zero(r) == rustsecp256k1_v0_7_0_fe_normalizes_to_zero(&tmp)); + VERIFY_CHECK(rustsecp256k1_v0_8_0_fe_normalizes_to_zero(r) == rustsecp256k1_v0_8_0_fe_normalizes_to_zero(&tmp)); } #endif /* SECP256K1_FIELD_REPR_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/field_5x52.h b/secp256k1-sys/depend/secp256k1/src/field_5x52.h index 8971268b2..94bebc35f 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_5x52.h +++ b/secp256k1-sys/depend/secp256k1/src/field_5x52.h @@ -18,7 +18,7 @@ typedef struct { int magnitude; int normalized; #endif -} rustsecp256k1_v0_7_0_fe; +} rustsecp256k1_v0_8_0_fe; /* Unpacks a constant into a overlapping multi-limbed FE element. */ #define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \ @@ -37,7 +37,7 @@ typedef struct { typedef struct { uint64_t n[4]; -} rustsecp256k1_v0_7_0_fe_storage; +} rustsecp256k1_v0_8_0_fe_storage; #define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ \ (d0) | (((uint64_t)(d1)) << 32), \ diff --git a/secp256k1-sys/depend/secp256k1/src/field_5x52_asm_impl.h b/secp256k1-sys/depend/secp256k1/src/field_5x52_asm_impl.h index d45652070..b1ede21fe 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_5x52_asm_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_5x52_asm_impl.h @@ -14,7 +14,7 @@ #ifndef SECP256K1_FIELD_INNER5X52_IMPL_H #define SECP256K1_FIELD_INNER5X52_IMPL_H -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { /** * Registers: rdx:rax = multiplication accumulator * r9:r8 = c @@ -284,7 +284,7 @@ __asm__ __volatile__( ); } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_sqr_inner(uint64_t *r, const uint64_t *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_fe_sqr_inner(uint64_t *r, const uint64_t *a) { /** * Registers: rdx:rax = multiplication accumulator * r9:r8 = c diff --git a/secp256k1-sys/depend/secp256k1/src/field_5x52_impl.h b/secp256k1-sys/depend/secp256k1/src/field_5x52_impl.h index a800ef2d3..fd459cfe1 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_5x52_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_5x52_impl.h @@ -37,7 +37,7 @@ */ #ifdef VERIFY -static void rustsecp256k1_v0_7_0_fe_verify(const rustsecp256k1_v0_7_0_fe *a) { +static void rustsecp256k1_v0_8_0_fe_verify(const rustsecp256k1_v0_8_0_fe *a) { const uint64_t *d = a->n; int m = a->normalized ? 1 : 2 * a->magnitude, r = 1; /* secp256k1 'p' value defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ @@ -58,7 +58,22 @@ static void rustsecp256k1_v0_7_0_fe_verify(const rustsecp256k1_v0_7_0_fe *a) { } #endif -static void rustsecp256k1_v0_7_0_fe_normalize(rustsecp256k1_v0_7_0_fe *r) { +static void rustsecp256k1_v0_8_0_fe_get_bounds(rustsecp256k1_v0_8_0_fe *r, int m) { + VERIFY_CHECK(m >= 0); + VERIFY_CHECK(m <= 2048); + r->n[0] = 0xFFFFFFFFFFFFFULL * 2 * m; + r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * m; + r->n[2] = 0xFFFFFFFFFFFFFULL * 2 * m; + r->n[3] = 0xFFFFFFFFFFFFFULL * 2 * m; + r->n[4] = 0x0FFFFFFFFFFFFULL * 2 * m; +#ifdef VERIFY + r->magnitude = m; + r->normalized = (m == 0); + rustsecp256k1_v0_8_0_fe_verify(r); +#endif +} + +static void rustsecp256k1_v0_8_0_fe_normalize(rustsecp256k1_v0_8_0_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; /* Reduce t4 at the start so there will be at most a single carry from the first pass */ @@ -97,11 +112,11 @@ static void rustsecp256k1_v0_7_0_fe_normalize(rustsecp256k1_v0_7_0_fe *r) { #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -static void rustsecp256k1_v0_7_0_fe_normalize_weak(rustsecp256k1_v0_7_0_fe *r) { +static void rustsecp256k1_v0_8_0_fe_normalize_weak(rustsecp256k1_v0_8_0_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; /* Reduce t4 at the start so there will be at most a single carry from the first pass */ @@ -121,11 +136,11 @@ static void rustsecp256k1_v0_7_0_fe_normalize_weak(rustsecp256k1_v0_7_0_fe *r) { #ifdef VERIFY r->magnitude = 1; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -static void rustsecp256k1_v0_7_0_fe_normalize_var(rustsecp256k1_v0_7_0_fe *r) { +static void rustsecp256k1_v0_8_0_fe_normalize_var(rustsecp256k1_v0_8_0_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; /* Reduce t4 at the start so there will be at most a single carry from the first pass */ @@ -165,11 +180,11 @@ static void rustsecp256k1_v0_7_0_fe_normalize_var(rustsecp256k1_v0_7_0_fe *r) { #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -static int rustsecp256k1_v0_7_0_fe_normalizes_to_zero(const rustsecp256k1_v0_7_0_fe *r) { +static int rustsecp256k1_v0_8_0_fe_normalizes_to_zero(const rustsecp256k1_v0_8_0_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */ @@ -192,7 +207,7 @@ static int rustsecp256k1_v0_7_0_fe_normalizes_to_zero(const rustsecp256k1_v0_7_0 return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL); } -static int rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(const rustsecp256k1_v0_7_0_fe *r) { +static int rustsecp256k1_v0_8_0_fe_normalizes_to_zero_var(const rustsecp256k1_v0_8_0_fe *r) { uint64_t t0, t1, t2, t3, t4; uint64_t z0, z1; uint64_t x; @@ -233,35 +248,35 @@ static int rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(const rustsecp256k1_v0 return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL); } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_set_int(rustsecp256k1_v0_7_0_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_fe_set_int(rustsecp256k1_v0_8_0_fe *r, int a) { VERIFY_CHECK(0 <= a && a <= 0x7FFF); r->n[0] = a; r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0; #ifdef VERIFY r->magnitude = (a != 0); r->normalized = 1; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_fe_is_zero(const rustsecp256k1_v0_7_0_fe *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_fe_is_zero(const rustsecp256k1_v0_8_0_fe *a) { const uint64_t *t = a->n; #ifdef VERIFY VERIFY_CHECK(a->normalized); - rustsecp256k1_v0_7_0_fe_verify(a); + rustsecp256k1_v0_8_0_fe_verify(a); #endif return (t[0] | t[1] | t[2] | t[3] | t[4]) == 0; } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_fe_is_odd(const rustsecp256k1_v0_7_0_fe *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_fe_is_odd(const rustsecp256k1_v0_8_0_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); - rustsecp256k1_v0_7_0_fe_verify(a); + rustsecp256k1_v0_8_0_fe_verify(a); #endif return a->n[0] & 1; } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_clear(rustsecp256k1_v0_7_0_fe *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_fe_clear(rustsecp256k1_v0_8_0_fe *a) { int i; #ifdef VERIFY a->magnitude = 0; @@ -272,13 +287,13 @@ SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_clear(rustsecp256k1_v0_7_0_ } } -static int rustsecp256k1_v0_7_0_fe_cmp_var(const rustsecp256k1_v0_7_0_fe *a, const rustsecp256k1_v0_7_0_fe *b) { +static int rustsecp256k1_v0_8_0_fe_cmp_var(const rustsecp256k1_v0_8_0_fe *a, const rustsecp256k1_v0_8_0_fe *b) { int i; #ifdef VERIFY VERIFY_CHECK(a->normalized); VERIFY_CHECK(b->normalized); - rustsecp256k1_v0_7_0_fe_verify(a); - rustsecp256k1_v0_7_0_fe_verify(b); + rustsecp256k1_v0_8_0_fe_verify(a); + rustsecp256k1_v0_8_0_fe_verify(b); #endif for (i = 4; i >= 0; i--) { if (a->n[i] > b->n[i]) { @@ -291,7 +306,7 @@ static int rustsecp256k1_v0_7_0_fe_cmp_var(const rustsecp256k1_v0_7_0_fe *a, con return 0; } -static int rustsecp256k1_v0_7_0_fe_set_b32(rustsecp256k1_v0_7_0_fe *r, const unsigned char *a) { +static int rustsecp256k1_v0_8_0_fe_set_b32(rustsecp256k1_v0_8_0_fe *r, const unsigned char *a) { int ret; r->n[0] = (uint64_t)a[31] | ((uint64_t)a[30] << 8) @@ -332,7 +347,7 @@ static int rustsecp256k1_v0_7_0_fe_set_b32(rustsecp256k1_v0_7_0_fe *r, const uns r->magnitude = 1; if (ret) { r->normalized = 1; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); } else { r->normalized = 0; } @@ -341,10 +356,10 @@ static int rustsecp256k1_v0_7_0_fe_set_b32(rustsecp256k1_v0_7_0_fe *r, const uns } /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ -static void rustsecp256k1_v0_7_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_7_0_fe *a) { +static void rustsecp256k1_v0_8_0_fe_get_b32(unsigned char *r, const rustsecp256k1_v0_8_0_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); - rustsecp256k1_v0_7_0_fe_verify(a); + rustsecp256k1_v0_8_0_fe_verify(a); #endif r[0] = (a->n[4] >> 40) & 0xFF; r[1] = (a->n[4] >> 32) & 0xFF; @@ -380,10 +395,10 @@ static void rustsecp256k1_v0_7_0_fe_get_b32(unsigned char *r, const rustsecp256k r[31] = a->n[0] & 0xFF; } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_negate(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a, int m) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_fe_negate(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a, int m) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= m); - rustsecp256k1_v0_7_0_fe_verify(a); + rustsecp256k1_v0_8_0_fe_verify(a); VERIFY_CHECK(0xFFFFEFFFFFC2FULL * 2 * (m + 1) >= 0xFFFFFFFFFFFFFULL * 2 * m); VERIFY_CHECK(0xFFFFFFFFFFFFFULL * 2 * (m + 1) >= 0xFFFFFFFFFFFFFULL * 2 * m); VERIFY_CHECK(0x0FFFFFFFFFFFFULL * 2 * (m + 1) >= 0x0FFFFFFFFFFFFULL * 2 * m); @@ -396,11 +411,11 @@ SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_negate(rustsecp256k1_v0_7_0 #ifdef VERIFY r->magnitude = m + 1; r->normalized = 0; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_mul_int(rustsecp256k1_v0_7_0_fe *r, int a) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_fe_mul_int(rustsecp256k1_v0_8_0_fe *r, int a) { r->n[0] *= a; r->n[1] *= a; r->n[2] *= a; @@ -409,13 +424,13 @@ SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_mul_int(rustsecp256k1_v0_7_ #ifdef VERIFY r->magnitude *= a; r->normalized = 0; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_add(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_fe_add(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a) { #ifdef VERIFY - rustsecp256k1_v0_7_0_fe_verify(a); + rustsecp256k1_v0_8_0_fe_verify(a); #endif r->n[0] += a->n[0]; r->n[1] += a->n[1]; @@ -425,41 +440,41 @@ SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_add(rustsecp256k1_v0_7_0_fe #ifdef VERIFY r->magnitude += a->magnitude; r->normalized = 0; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -static void rustsecp256k1_v0_7_0_fe_mul(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a, const rustsecp256k1_v0_7_0_fe * SECP256K1_RESTRICT b) { +static void rustsecp256k1_v0_8_0_fe_mul(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a, const rustsecp256k1_v0_8_0_fe * SECP256K1_RESTRICT b) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= 8); VERIFY_CHECK(b->magnitude <= 8); - rustsecp256k1_v0_7_0_fe_verify(a); - rustsecp256k1_v0_7_0_fe_verify(b); + rustsecp256k1_v0_8_0_fe_verify(a); + rustsecp256k1_v0_8_0_fe_verify(b); VERIFY_CHECK(r != b); VERIFY_CHECK(a != b); #endif - rustsecp256k1_v0_7_0_fe_mul_inner(r->n, a->n, b->n); + rustsecp256k1_v0_8_0_fe_mul_inner(r->n, a->n, b->n); #ifdef VERIFY r->magnitude = 1; r->normalized = 0; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -static void rustsecp256k1_v0_7_0_fe_sqr(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a) { +static void rustsecp256k1_v0_8_0_fe_sqr(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= 8); - rustsecp256k1_v0_7_0_fe_verify(a); + rustsecp256k1_v0_8_0_fe_verify(a); #endif - rustsecp256k1_v0_7_0_fe_sqr_inner(r->n, a->n); + rustsecp256k1_v0_8_0_fe_sqr_inner(r->n, a->n); #ifdef VERIFY r->magnitude = 1; r->normalized = 0; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_fe_cmov(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_fe_cmov(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a, int flag) { uint64_t mask0, mask1; VG_CHECK_VERIFY(r->n, sizeof(r->n)); mask0 = flag + ~((uint64_t)0); @@ -477,7 +492,72 @@ static SECP256K1_INLINE void rustsecp256k1_v0_7_0_fe_cmov(rustsecp256k1_v0_7_0_f #endif } -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_fe_storage_cmov(rustsecp256k1_v0_7_0_fe_storage *r, const rustsecp256k1_v0_7_0_fe_storage *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_fe_half(rustsecp256k1_v0_8_0_fe *r) { + uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; + uint64_t one = (uint64_t)1; + uint64_t mask = -(t0 & one) >> 12; + +#ifdef VERIFY + rustsecp256k1_v0_8_0_fe_verify(r); + VERIFY_CHECK(r->magnitude < 32); +#endif + + /* Bounds analysis (over the rationals). + * + * Let m = r->magnitude + * C = 0xFFFFFFFFFFFFFULL * 2 + * D = 0x0FFFFFFFFFFFFULL * 2 + * + * Initial bounds: t0..t3 <= C * m + * t4 <= D * m + */ + + t0 += 0xFFFFEFFFFFC2FULL & mask; + t1 += mask; + t2 += mask; + t3 += mask; + t4 += mask >> 4; + + VERIFY_CHECK((t0 & one) == 0); + + /* t0..t3: added <= C/2 + * t4: added <= D/2 + * + * Current bounds: t0..t3 <= C * (m + 1/2) + * t4 <= D * (m + 1/2) + */ + + r->n[0] = (t0 >> 1) + ((t1 & one) << 51); + r->n[1] = (t1 >> 1) + ((t2 & one) << 51); + r->n[2] = (t2 >> 1) + ((t3 & one) << 51); + r->n[3] = (t3 >> 1) + ((t4 & one) << 51); + r->n[4] = (t4 >> 1); + + /* t0..t3: shifted right and added <= C/4 + 1/2 + * t4: shifted right + * + * Current bounds: t0..t3 <= C * (m/2 + 1/2) + * t4 <= D * (m/2 + 1/4) + */ + +#ifdef VERIFY + /* Therefore the output magnitude (M) has to be set such that: + * t0..t3: C * M >= C * (m/2 + 1/2) + * t4: D * M >= D * (m/2 + 1/4) + * + * It suffices for all limbs that, for any input magnitude m: + * M >= m/2 + 1/2 + * + * and since we want the smallest such integer value for M: + * M == floor(m/2) + 1 + */ + r->magnitude = (r->magnitude >> 1) + 1; + r->normalized = 0; + rustsecp256k1_v0_8_0_fe_verify(r); +#endif +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_fe_storage_cmov(rustsecp256k1_v0_8_0_fe_storage *r, const rustsecp256k1_v0_8_0_fe_storage *a, int flag) { uint64_t mask0, mask1; VG_CHECK_VERIFY(r->n, sizeof(r->n)); mask0 = flag + ~((uint64_t)0); @@ -488,7 +568,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_7_0_fe_storage_cmov(rustsecp256k1_ r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1); } -static void rustsecp256k1_v0_7_0_fe_to_storage(rustsecp256k1_v0_7_0_fe_storage *r, const rustsecp256k1_v0_7_0_fe *a) { +static void rustsecp256k1_v0_8_0_fe_to_storage(rustsecp256k1_v0_8_0_fe_storage *r, const rustsecp256k1_v0_8_0_fe *a) { #ifdef VERIFY VERIFY_CHECK(a->normalized); #endif @@ -498,7 +578,7 @@ static void rustsecp256k1_v0_7_0_fe_to_storage(rustsecp256k1_v0_7_0_fe_storage * r->n[3] = a->n[3] >> 36 | a->n[4] << 16; } -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_fe_from_storage(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe_storage *a) { +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_fe_from_storage(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe_storage *a) { r->n[0] = a->n[0] & 0xFFFFFFFFFFFFFULL; r->n[1] = a->n[0] >> 52 | ((a->n[1] << 12) & 0xFFFFFFFFFFFFFULL); r->n[2] = a->n[1] >> 40 | ((a->n[2] << 24) & 0xFFFFFFFFFFFFFULL); @@ -507,15 +587,15 @@ static SECP256K1_INLINE void rustsecp256k1_v0_7_0_fe_from_storage(rustsecp256k1_ #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -static void rustsecp256k1_v0_7_0_fe_from_signed62(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_modinv64_signed62 *a) { +static void rustsecp256k1_v0_8_0_fe_from_signed62(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_modinv64_signed62 *a) { const uint64_t M52 = UINT64_MAX >> 12; const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4]; - /* The output from rustsecp256k1_v0_7_0_modinv64{_var} should be normalized to range [0,modulus), and + /* The output from rustsecp256k1_v0_8_0_modinv64{_var} should be normalized to range [0,modulus), and * have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4). */ VERIFY_CHECK(a0 >> 62 == 0); @@ -533,11 +613,11 @@ static void rustsecp256k1_v0_7_0_fe_from_signed62(rustsecp256k1_v0_7_0_fe *r, co #ifdef VERIFY r->magnitude = 1; r->normalized = 1; - rustsecp256k1_v0_7_0_fe_verify(r); + rustsecp256k1_v0_8_0_fe_verify(r); #endif } -static void rustsecp256k1_v0_7_0_fe_to_signed62(rustsecp256k1_v0_7_0_modinv64_signed62 *r, const rustsecp256k1_v0_7_0_fe *a) { +static void rustsecp256k1_v0_8_0_fe_to_signed62(rustsecp256k1_v0_8_0_modinv64_signed62 *r, const rustsecp256k1_v0_8_0_fe *a) { const uint64_t M62 = UINT64_MAX >> 2; const uint64_t a0 = a->n[0], a1 = a->n[1], a2 = a->n[2], a3 = a->n[3], a4 = a->n[4]; @@ -552,38 +632,38 @@ static void rustsecp256k1_v0_7_0_fe_to_signed62(rustsecp256k1_v0_7_0_modinv64_si r->v[4] = a4 >> 40; } -static const rustsecp256k1_v0_7_0_modinv64_modinfo rustsecp256k1_v0_7_0_const_modinfo_fe = { +static const rustsecp256k1_v0_8_0_modinv64_modinfo rustsecp256k1_v0_8_0_const_modinfo_fe = { {{-0x1000003D1LL, 0, 0, 0, 256}}, 0x27C7F6E22DDACACFLL }; -static void rustsecp256k1_v0_7_0_fe_inv(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *x) { - rustsecp256k1_v0_7_0_fe tmp; - rustsecp256k1_v0_7_0_modinv64_signed62 s; +static void rustsecp256k1_v0_8_0_fe_inv(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *x) { + rustsecp256k1_v0_8_0_fe tmp; + rustsecp256k1_v0_8_0_modinv64_signed62 s; tmp = *x; - rustsecp256k1_v0_7_0_fe_normalize(&tmp); - rustsecp256k1_v0_7_0_fe_to_signed62(&s, &tmp); - rustsecp256k1_v0_7_0_modinv64(&s, &rustsecp256k1_v0_7_0_const_modinfo_fe); - rustsecp256k1_v0_7_0_fe_from_signed62(r, &s); + rustsecp256k1_v0_8_0_fe_normalize(&tmp); + rustsecp256k1_v0_8_0_fe_to_signed62(&s, &tmp); + rustsecp256k1_v0_8_0_modinv64(&s, &rustsecp256k1_v0_8_0_const_modinfo_fe); + rustsecp256k1_v0_8_0_fe_from_signed62(r, &s); #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_fe_normalizes_to_zero(r) == rustsecp256k1_v0_7_0_fe_normalizes_to_zero(&tmp)); + VERIFY_CHECK(rustsecp256k1_v0_8_0_fe_normalizes_to_zero(r) == rustsecp256k1_v0_8_0_fe_normalizes_to_zero(&tmp)); #endif } -static void rustsecp256k1_v0_7_0_fe_inv_var(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *x) { - rustsecp256k1_v0_7_0_fe tmp; - rustsecp256k1_v0_7_0_modinv64_signed62 s; +static void rustsecp256k1_v0_8_0_fe_inv_var(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *x) { + rustsecp256k1_v0_8_0_fe tmp; + rustsecp256k1_v0_8_0_modinv64_signed62 s; tmp = *x; - rustsecp256k1_v0_7_0_fe_normalize_var(&tmp); - rustsecp256k1_v0_7_0_fe_to_signed62(&s, &tmp); - rustsecp256k1_v0_7_0_modinv64_var(&s, &rustsecp256k1_v0_7_0_const_modinfo_fe); - rustsecp256k1_v0_7_0_fe_from_signed62(r, &s); + rustsecp256k1_v0_8_0_fe_normalize_var(&tmp); + rustsecp256k1_v0_8_0_fe_to_signed62(&s, &tmp); + rustsecp256k1_v0_8_0_modinv64_var(&s, &rustsecp256k1_v0_8_0_const_modinfo_fe); + rustsecp256k1_v0_8_0_fe_from_signed62(r, &s); #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_fe_normalizes_to_zero(r) == rustsecp256k1_v0_7_0_fe_normalizes_to_zero(&tmp)); + VERIFY_CHECK(rustsecp256k1_v0_8_0_fe_normalizes_to_zero(r) == rustsecp256k1_v0_8_0_fe_normalizes_to_zero(&tmp)); #endif } diff --git a/secp256k1-sys/depend/secp256k1/src/field_5x52_int128_impl.h b/secp256k1-sys/depend/secp256k1/src/field_5x52_int128_impl.h index 405dce946..2fa22ca37 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_5x52_int128_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_5x52_int128_impl.h @@ -9,14 +9,18 @@ #include +#include "int128.h" + #ifdef VERIFY #define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0) +#define VERIFY_BITS_128(x, n) VERIFY_CHECK(rustsecp256k1_v0_8_0_u128_check_bits((x), (n))) #else #define VERIFY_BITS(x, n) do { } while(0) +#define VERIFY_BITS_128(x, n) do { } while(0) #endif -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { - uint128_t c, d; +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { + rustsecp256k1_v0_8_0_uint128 c, d; uint64_t t3, t4, tx, u0; uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4]; const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL; @@ -40,121 +44,119 @@ SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_mul_inner(uint64_t *r, cons * Note that [x 0 0 0 0 0] = [x*R]. */ - d = (uint128_t)a0 * b[3] - + (uint128_t)a1 * b[2] - + (uint128_t)a2 * b[1] - + (uint128_t)a3 * b[0]; - VERIFY_BITS(d, 114); + rustsecp256k1_v0_8_0_u128_mul(&d, a0, b[3]); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a1, b[2]); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a2, b[1]); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a3, b[0]); + VERIFY_BITS_128(&d, 114); /* [d 0 0 0] = [p3 0 0 0] */ - c = (uint128_t)a4 * b[4]; - VERIFY_BITS(c, 112); + rustsecp256k1_v0_8_0_u128_mul(&c, a4, b[4]); + VERIFY_BITS_128(&c, 112); /* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ - d += (uint128_t)R * (uint64_t)c; c >>= 64; - VERIFY_BITS(d, 115); - VERIFY_BITS(c, 48); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, R, rustsecp256k1_v0_8_0_u128_to_u64(&c)); rustsecp256k1_v0_8_0_u128_rshift(&c, 64); + VERIFY_BITS_128(&d, 115); + VERIFY_BITS_128(&c, 48); /* [(c<<12) 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ - t3 = d & M; d >>= 52; + t3 = rustsecp256k1_v0_8_0_u128_to_u64(&d) & M; rustsecp256k1_v0_8_0_u128_rshift(&d, 52); VERIFY_BITS(t3, 52); - VERIFY_BITS(d, 63); + VERIFY_BITS_128(&d, 63); /* [(c<<12) 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ - d += (uint128_t)a0 * b[4] - + (uint128_t)a1 * b[3] - + (uint128_t)a2 * b[2] - + (uint128_t)a3 * b[1] - + (uint128_t)a4 * b[0]; - VERIFY_BITS(d, 115); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a0, b[4]); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a1, b[3]); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a2, b[2]); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a3, b[1]); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a4, b[0]); + VERIFY_BITS_128(&d, 115); /* [(c<<12) 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - d += (uint128_t)(R << 12) * (uint64_t)c; - VERIFY_BITS(d, 116); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, R << 12, rustsecp256k1_v0_8_0_u128_to_u64(&c)); + VERIFY_BITS_128(&d, 116); /* [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - t4 = d & M; d >>= 52; + t4 = rustsecp256k1_v0_8_0_u128_to_u64(&d) & M; rustsecp256k1_v0_8_0_u128_rshift(&d, 52); VERIFY_BITS(t4, 52); - VERIFY_BITS(d, 64); + VERIFY_BITS_128(&d, 64); /* [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ tx = (t4 >> 48); t4 &= (M >> 4); VERIFY_BITS(tx, 4); VERIFY_BITS(t4, 48); /* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - c = (uint128_t)a0 * b[0]; - VERIFY_BITS(c, 112); + rustsecp256k1_v0_8_0_u128_mul(&c, a0, b[0]); + VERIFY_BITS_128(&c, 112); /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */ - d += (uint128_t)a1 * b[4] - + (uint128_t)a2 * b[3] - + (uint128_t)a3 * b[2] - + (uint128_t)a4 * b[1]; - VERIFY_BITS(d, 115); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a1, b[4]); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a2, b[3]); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a3, b[2]); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a4, b[1]); + VERIFY_BITS_128(&d, 115); /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - u0 = d & M; d >>= 52; + u0 = rustsecp256k1_v0_8_0_u128_to_u64(&d) & M; rustsecp256k1_v0_8_0_u128_rshift(&d, 52); VERIFY_BITS(u0, 52); - VERIFY_BITS(d, 63); + VERIFY_BITS_128(&d, 63); /* [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ /* [d 0 t4+(tx<<48)+(u0<<52) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ u0 = (u0 << 4) | tx; VERIFY_BITS(u0, 56); /* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - c += (uint128_t)u0 * (R >> 4); - VERIFY_BITS(c, 115); + rustsecp256k1_v0_8_0_u128_accum_mul(&c, u0, R >> 4); + VERIFY_BITS_128(&c, 115); /* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - r[0] = c & M; c >>= 52; + r[0] = rustsecp256k1_v0_8_0_u128_to_u64(&c) & M; rustsecp256k1_v0_8_0_u128_rshift(&c, 52); VERIFY_BITS(r[0], 52); - VERIFY_BITS(c, 61); + VERIFY_BITS_128(&c, 61); /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */ - c += (uint128_t)a0 * b[1] - + (uint128_t)a1 * b[0]; - VERIFY_BITS(c, 114); + rustsecp256k1_v0_8_0_u128_accum_mul(&c, a0, b[1]); + rustsecp256k1_v0_8_0_u128_accum_mul(&c, a1, b[0]); + VERIFY_BITS_128(&c, 114); /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */ - d += (uint128_t)a2 * b[4] - + (uint128_t)a3 * b[3] - + (uint128_t)a4 * b[2]; - VERIFY_BITS(d, 114); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a2, b[4]); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a3, b[3]); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a4, b[2]); + VERIFY_BITS_128(&d, 114); /* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - c += (d & M) * R; d >>= 52; - VERIFY_BITS(c, 115); - VERIFY_BITS(d, 62); + rustsecp256k1_v0_8_0_u128_accum_mul(&c, rustsecp256k1_v0_8_0_u128_to_u64(&d) & M, R); rustsecp256k1_v0_8_0_u128_rshift(&d, 52); + VERIFY_BITS_128(&c, 115); + VERIFY_BITS_128(&d, 62); /* [d 0 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - r[1] = c & M; c >>= 52; + r[1] = rustsecp256k1_v0_8_0_u128_to_u64(&c) & M; rustsecp256k1_v0_8_0_u128_rshift(&c, 52); VERIFY_BITS(r[1], 52); - VERIFY_BITS(c, 63); + VERIFY_BITS_128(&c, 63); /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - c += (uint128_t)a0 * b[2] - + (uint128_t)a1 * b[1] - + (uint128_t)a2 * b[0]; - VERIFY_BITS(c, 114); + rustsecp256k1_v0_8_0_u128_accum_mul(&c, a0, b[2]); + rustsecp256k1_v0_8_0_u128_accum_mul(&c, a1, b[1]); + rustsecp256k1_v0_8_0_u128_accum_mul(&c, a2, b[0]); + VERIFY_BITS_128(&c, 114); /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */ - d += (uint128_t)a3 * b[4] - + (uint128_t)a4 * b[3]; - VERIFY_BITS(d, 114); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a3, b[4]); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a4, b[3]); + VERIFY_BITS_128(&d, 114); /* [d 0 0 t4 t3 c t1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - c += (uint128_t)R * (uint64_t)d; d >>= 64; - VERIFY_BITS(c, 115); - VERIFY_BITS(d, 50); + rustsecp256k1_v0_8_0_u128_accum_mul(&c, R, rustsecp256k1_v0_8_0_u128_to_u64(&d)); rustsecp256k1_v0_8_0_u128_rshift(&d, 64); + VERIFY_BITS_128(&c, 115); + VERIFY_BITS_128(&d, 50); /* [(d<<12) 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[2] = c & M; c >>= 52; + r[2] = rustsecp256k1_v0_8_0_u128_to_u64(&c) & M; rustsecp256k1_v0_8_0_u128_rshift(&c, 52); VERIFY_BITS(r[2], 52); - VERIFY_BITS(c, 63); + VERIFY_BITS_128(&c, 63); /* [(d<<12) 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - c += (uint128_t)(R << 12) * (uint64_t)d + t3; - VERIFY_BITS(c, 100); + rustsecp256k1_v0_8_0_u128_accum_mul(&c, R << 12, rustsecp256k1_v0_8_0_u128_to_u64(&d)); + rustsecp256k1_v0_8_0_u128_accum_u64(&c, t3); + VERIFY_BITS_128(&c, 100); /* [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[3] = c & M; c >>= 52; + r[3] = rustsecp256k1_v0_8_0_u128_to_u64(&c) & M; rustsecp256k1_v0_8_0_u128_rshift(&c, 52); VERIFY_BITS(r[3], 52); - VERIFY_BITS(c, 48); + VERIFY_BITS_128(&c, 48); /* [t4+c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - c += t4; - VERIFY_BITS(c, 49); - /* [c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[4] = c; + r[4] = rustsecp256k1_v0_8_0_u128_to_u64(&c) + t4; VERIFY_BITS(r[4], 49); /* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_sqr_inner(uint64_t *r, const uint64_t *a) { - uint128_t c, d; +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_fe_sqr_inner(uint64_t *r, const uint64_t *a) { + rustsecp256k1_v0_8_0_uint128 c, d; uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4]; int64_t t3, t4, tx, u0; const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL; @@ -170,107 +172,105 @@ SECP256K1_INLINE static void rustsecp256k1_v0_7_0_fe_sqr_inner(uint64_t *r, cons * Note that [x 0 0 0 0 0] = [x*R]. */ - d = (uint128_t)(a0*2) * a3 - + (uint128_t)(a1*2) * a2; - VERIFY_BITS(d, 114); + rustsecp256k1_v0_8_0_u128_mul(&d, a0*2, a3); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a1*2, a2); + VERIFY_BITS_128(&d, 114); /* [d 0 0 0] = [p3 0 0 0] */ - c = (uint128_t)a4 * a4; - VERIFY_BITS(c, 112); + rustsecp256k1_v0_8_0_u128_mul(&c, a4, a4); + VERIFY_BITS_128(&c, 112); /* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ - d += (uint128_t)R * (uint64_t)c; c >>= 64; - VERIFY_BITS(d, 115); - VERIFY_BITS(c, 48); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, R, rustsecp256k1_v0_8_0_u128_to_u64(&c)); rustsecp256k1_v0_8_0_u128_rshift(&c, 64); + VERIFY_BITS_128(&d, 115); + VERIFY_BITS_128(&c, 48); /* [(c<<12) 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ - t3 = d & M; d >>= 52; + t3 = rustsecp256k1_v0_8_0_u128_to_u64(&d) & M; rustsecp256k1_v0_8_0_u128_rshift(&d, 52); VERIFY_BITS(t3, 52); - VERIFY_BITS(d, 63); + VERIFY_BITS_128(&d, 63); /* [(c<<12) 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ a4 *= 2; - d += (uint128_t)a0 * a4 - + (uint128_t)(a1*2) * a3 - + (uint128_t)a2 * a2; - VERIFY_BITS(d, 115); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a0, a4); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a1*2, a3); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a2, a2); + VERIFY_BITS_128(&d, 115); /* [(c<<12) 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - d += (uint128_t)(R << 12) * (uint64_t)c; - VERIFY_BITS(d, 116); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, R << 12, rustsecp256k1_v0_8_0_u128_to_u64(&c)); + VERIFY_BITS_128(&d, 116); /* [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - t4 = d & M; d >>= 52; + t4 = rustsecp256k1_v0_8_0_u128_to_u64(&d) & M; rustsecp256k1_v0_8_0_u128_rshift(&d, 52); VERIFY_BITS(t4, 52); - VERIFY_BITS(d, 64); + VERIFY_BITS_128(&d, 64); /* [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ tx = (t4 >> 48); t4 &= (M >> 4); VERIFY_BITS(tx, 4); VERIFY_BITS(t4, 48); /* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - c = (uint128_t)a0 * a0; - VERIFY_BITS(c, 112); + rustsecp256k1_v0_8_0_u128_mul(&c, a0, a0); + VERIFY_BITS_128(&c, 112); /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */ - d += (uint128_t)a1 * a4 - + (uint128_t)(a2*2) * a3; - VERIFY_BITS(d, 114); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a1, a4); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a2*2, a3); + VERIFY_BITS_128(&d, 114); /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - u0 = d & M; d >>= 52; + u0 = rustsecp256k1_v0_8_0_u128_to_u64(&d) & M; rustsecp256k1_v0_8_0_u128_rshift(&d, 52); VERIFY_BITS(u0, 52); - VERIFY_BITS(d, 62); + VERIFY_BITS_128(&d, 62); /* [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ /* [d 0 t4+(tx<<48)+(u0<<52) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ u0 = (u0 << 4) | tx; VERIFY_BITS(u0, 56); /* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - c += (uint128_t)u0 * (R >> 4); - VERIFY_BITS(c, 113); + rustsecp256k1_v0_8_0_u128_accum_mul(&c, u0, R >> 4); + VERIFY_BITS_128(&c, 113); /* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - r[0] = c & M; c >>= 52; + r[0] = rustsecp256k1_v0_8_0_u128_to_u64(&c) & M; rustsecp256k1_v0_8_0_u128_rshift(&c, 52); VERIFY_BITS(r[0], 52); - VERIFY_BITS(c, 61); + VERIFY_BITS_128(&c, 61); /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */ a0 *= 2; - c += (uint128_t)a0 * a1; - VERIFY_BITS(c, 114); + rustsecp256k1_v0_8_0_u128_accum_mul(&c, a0, a1); + VERIFY_BITS_128(&c, 114); /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */ - d += (uint128_t)a2 * a4 - + (uint128_t)a3 * a3; - VERIFY_BITS(d, 114); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a2, a4); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a3, a3); + VERIFY_BITS_128(&d, 114); /* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - c += (d & M) * R; d >>= 52; - VERIFY_BITS(c, 115); - VERIFY_BITS(d, 62); + rustsecp256k1_v0_8_0_u128_accum_mul(&c, rustsecp256k1_v0_8_0_u128_to_u64(&d) & M, R); rustsecp256k1_v0_8_0_u128_rshift(&d, 52); + VERIFY_BITS_128(&c, 115); + VERIFY_BITS_128(&d, 62); /* [d 0 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - r[1] = c & M; c >>= 52; + r[1] = rustsecp256k1_v0_8_0_u128_to_u64(&c) & M; rustsecp256k1_v0_8_0_u128_rshift(&c, 52); VERIFY_BITS(r[1], 52); - VERIFY_BITS(c, 63); + VERIFY_BITS_128(&c, 63); /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - c += (uint128_t)a0 * a2 - + (uint128_t)a1 * a1; - VERIFY_BITS(c, 114); + rustsecp256k1_v0_8_0_u128_accum_mul(&c, a0, a2); + rustsecp256k1_v0_8_0_u128_accum_mul(&c, a1, a1); + VERIFY_BITS_128(&c, 114); /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */ - d += (uint128_t)a3 * a4; - VERIFY_BITS(d, 114); + rustsecp256k1_v0_8_0_u128_accum_mul(&d, a3, a4); + VERIFY_BITS_128(&d, 114); /* [d 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - c += (uint128_t)R * (uint64_t)d; d >>= 64; - VERIFY_BITS(c, 115); - VERIFY_BITS(d, 50); + rustsecp256k1_v0_8_0_u128_accum_mul(&c, R, rustsecp256k1_v0_8_0_u128_to_u64(&d)); rustsecp256k1_v0_8_0_u128_rshift(&d, 64); + VERIFY_BITS_128(&c, 115); + VERIFY_BITS_128(&d, 50); /* [(d<<12) 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[2] = c & M; c >>= 52; + r[2] = rustsecp256k1_v0_8_0_u128_to_u64(&c) & M; rustsecp256k1_v0_8_0_u128_rshift(&c, 52); VERIFY_BITS(r[2], 52); - VERIFY_BITS(c, 63); + VERIFY_BITS_128(&c, 63); /* [(d<<12) 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - c += (uint128_t)(R << 12) * (uint64_t)d + t3; - VERIFY_BITS(c, 100); + rustsecp256k1_v0_8_0_u128_accum_mul(&c, R << 12, rustsecp256k1_v0_8_0_u128_to_u64(&d)); + rustsecp256k1_v0_8_0_u128_accum_u64(&c, t3); + VERIFY_BITS_128(&c, 100); /* [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[3] = c & M; c >>= 52; + r[3] = rustsecp256k1_v0_8_0_u128_to_u64(&c) & M; rustsecp256k1_v0_8_0_u128_rshift(&c, 52); VERIFY_BITS(r[3], 52); - VERIFY_BITS(c, 48); + VERIFY_BITS_128(&c, 48); /* [t4+c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - c += t4; - VERIFY_BITS(c, 49); - /* [c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[4] = c; + r[4] = rustsecp256k1_v0_8_0_u128_to_u64(&c) + t4; VERIFY_BITS(r[4], 49); /* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ } diff --git a/secp256k1-sys/depend/secp256k1/src/field_impl.h b/secp256k1-sys/depend/secp256k1/src/field_impl.h index 1ed66ea72..ddc707486 100644 --- a/secp256k1-sys/depend/secp256k1/src/field_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/field_impl.h @@ -21,21 +21,21 @@ #error "Please select wide multiplication implementation" #endif -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_fe_equal(const rustsecp256k1_v0_7_0_fe *a, const rustsecp256k1_v0_7_0_fe *b) { - rustsecp256k1_v0_7_0_fe na; - rustsecp256k1_v0_7_0_fe_negate(&na, a, 1); - rustsecp256k1_v0_7_0_fe_add(&na, b); - return rustsecp256k1_v0_7_0_fe_normalizes_to_zero(&na); +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_fe_equal(const rustsecp256k1_v0_8_0_fe *a, const rustsecp256k1_v0_8_0_fe *b) { + rustsecp256k1_v0_8_0_fe na; + rustsecp256k1_v0_8_0_fe_negate(&na, a, 1); + rustsecp256k1_v0_8_0_fe_add(&na, b); + return rustsecp256k1_v0_8_0_fe_normalizes_to_zero(&na); } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_fe_equal_var(const rustsecp256k1_v0_7_0_fe *a, const rustsecp256k1_v0_7_0_fe *b) { - rustsecp256k1_v0_7_0_fe na; - rustsecp256k1_v0_7_0_fe_negate(&na, a, 1); - rustsecp256k1_v0_7_0_fe_add(&na, b); - return rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(&na); +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_fe_equal_var(const rustsecp256k1_v0_8_0_fe *a, const rustsecp256k1_v0_8_0_fe *b) { + rustsecp256k1_v0_8_0_fe na; + rustsecp256k1_v0_8_0_fe_negate(&na, a, 1); + rustsecp256k1_v0_8_0_fe_add(&na, b); + return rustsecp256k1_v0_8_0_fe_normalizes_to_zero_var(&na); } -static int rustsecp256k1_v0_7_0_fe_sqrt(rustsecp256k1_v0_7_0_fe *r, const rustsecp256k1_v0_7_0_fe *a) { +static int rustsecp256k1_v0_8_0_fe_sqrt(rustsecp256k1_v0_8_0_fe *r, const rustsecp256k1_v0_8_0_fe *a) { /** Given that p is congruent to 3 mod 4, we can compute the square root of * a mod p as the (p+1)/4'th power of a. * @@ -45,7 +45,7 @@ static int rustsecp256k1_v0_7_0_fe_sqrt(rustsecp256k1_v0_7_0_fe *r, const rustse * Also because (p+1)/4 is an even number, the computed square root is * itself always a square (a ** ((p+1)/4) is the square of a ** ((p+1)/8)). */ - rustsecp256k1_v0_7_0_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; + rustsecp256k1_v0_8_0_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; int j; VERIFY_CHECK(r != a); @@ -55,86 +55,84 @@ static int rustsecp256k1_v0_7_0_fe_sqrt(rustsecp256k1_v0_7_0_fe *r, const rustse * 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] */ - rustsecp256k1_v0_7_0_fe_sqr(&x2, a); - rustsecp256k1_v0_7_0_fe_mul(&x2, &x2, a); + rustsecp256k1_v0_8_0_fe_sqr(&x2, a); + rustsecp256k1_v0_8_0_fe_mul(&x2, &x2, a); - rustsecp256k1_v0_7_0_fe_sqr(&x3, &x2); - rustsecp256k1_v0_7_0_fe_mul(&x3, &x3, a); + rustsecp256k1_v0_8_0_fe_sqr(&x3, &x2); + rustsecp256k1_v0_8_0_fe_mul(&x3, &x3, a); x6 = x3; for (j=0; j<3; j++) { - rustsecp256k1_v0_7_0_fe_sqr(&x6, &x6); + rustsecp256k1_v0_8_0_fe_sqr(&x6, &x6); } - rustsecp256k1_v0_7_0_fe_mul(&x6, &x6, &x3); + rustsecp256k1_v0_8_0_fe_mul(&x6, &x6, &x3); x9 = x6; for (j=0; j<3; j++) { - rustsecp256k1_v0_7_0_fe_sqr(&x9, &x9); + rustsecp256k1_v0_8_0_fe_sqr(&x9, &x9); } - rustsecp256k1_v0_7_0_fe_mul(&x9, &x9, &x3); + rustsecp256k1_v0_8_0_fe_mul(&x9, &x9, &x3); x11 = x9; for (j=0; j<2; j++) { - rustsecp256k1_v0_7_0_fe_sqr(&x11, &x11); + rustsecp256k1_v0_8_0_fe_sqr(&x11, &x11); } - rustsecp256k1_v0_7_0_fe_mul(&x11, &x11, &x2); + rustsecp256k1_v0_8_0_fe_mul(&x11, &x11, &x2); x22 = x11; for (j=0; j<11; j++) { - rustsecp256k1_v0_7_0_fe_sqr(&x22, &x22); + rustsecp256k1_v0_8_0_fe_sqr(&x22, &x22); } - rustsecp256k1_v0_7_0_fe_mul(&x22, &x22, &x11); + rustsecp256k1_v0_8_0_fe_mul(&x22, &x22, &x11); x44 = x22; for (j=0; j<22; j++) { - rustsecp256k1_v0_7_0_fe_sqr(&x44, &x44); + rustsecp256k1_v0_8_0_fe_sqr(&x44, &x44); } - rustsecp256k1_v0_7_0_fe_mul(&x44, &x44, &x22); + rustsecp256k1_v0_8_0_fe_mul(&x44, &x44, &x22); x88 = x44; for (j=0; j<44; j++) { - rustsecp256k1_v0_7_0_fe_sqr(&x88, &x88); + rustsecp256k1_v0_8_0_fe_sqr(&x88, &x88); } - rustsecp256k1_v0_7_0_fe_mul(&x88, &x88, &x44); + rustsecp256k1_v0_8_0_fe_mul(&x88, &x88, &x44); x176 = x88; for (j=0; j<88; j++) { - rustsecp256k1_v0_7_0_fe_sqr(&x176, &x176); + rustsecp256k1_v0_8_0_fe_sqr(&x176, &x176); } - rustsecp256k1_v0_7_0_fe_mul(&x176, &x176, &x88); + rustsecp256k1_v0_8_0_fe_mul(&x176, &x176, &x88); x220 = x176; for (j=0; j<44; j++) { - rustsecp256k1_v0_7_0_fe_sqr(&x220, &x220); + rustsecp256k1_v0_8_0_fe_sqr(&x220, &x220); } - rustsecp256k1_v0_7_0_fe_mul(&x220, &x220, &x44); + rustsecp256k1_v0_8_0_fe_mul(&x220, &x220, &x44); x223 = x220; for (j=0; j<3; j++) { - rustsecp256k1_v0_7_0_fe_sqr(&x223, &x223); + rustsecp256k1_v0_8_0_fe_sqr(&x223, &x223); } - rustsecp256k1_v0_7_0_fe_mul(&x223, &x223, &x3); + rustsecp256k1_v0_8_0_fe_mul(&x223, &x223, &x3); /* The final result is then assembled using a sliding window over the blocks. */ t1 = x223; for (j=0; j<23; j++) { - rustsecp256k1_v0_7_0_fe_sqr(&t1, &t1); + rustsecp256k1_v0_8_0_fe_sqr(&t1, &t1); } - rustsecp256k1_v0_7_0_fe_mul(&t1, &t1, &x22); + rustsecp256k1_v0_8_0_fe_mul(&t1, &t1, &x22); for (j=0; j<6; j++) { - rustsecp256k1_v0_7_0_fe_sqr(&t1, &t1); + rustsecp256k1_v0_8_0_fe_sqr(&t1, &t1); } - rustsecp256k1_v0_7_0_fe_mul(&t1, &t1, &x2); - rustsecp256k1_v0_7_0_fe_sqr(&t1, &t1); - rustsecp256k1_v0_7_0_fe_sqr(r, &t1); + rustsecp256k1_v0_8_0_fe_mul(&t1, &t1, &x2); + rustsecp256k1_v0_8_0_fe_sqr(&t1, &t1); + rustsecp256k1_v0_8_0_fe_sqr(r, &t1); /* Check that a square root was actually calculated */ - rustsecp256k1_v0_7_0_fe_sqr(&t1, r); - return rustsecp256k1_v0_7_0_fe_equal(&t1, a); + rustsecp256k1_v0_8_0_fe_sqr(&t1, r); + return rustsecp256k1_v0_8_0_fe_equal(&t1, a); } -static const rustsecp256k1_v0_7_0_fe rustsecp256k1_v0_7_0_fe_one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); - #endif /* SECP256K1_FIELD_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/group.h b/secp256k1-sys/depend/secp256k1/src/group.h index f5727cf3c..8e64ffd94 100644 --- a/secp256k1-sys/depend/secp256k1/src/group.h +++ b/secp256k1-sys/depend/secp256k1/src/group.h @@ -9,129 +9,149 @@ #include "field.h" -/** A group element of the secp256k1 curve, in affine coordinates. */ +/** A group element in affine coordinates on the secp256k1 curve, + * or occasionally on an isomorphic curve of the form y^2 = x^3 + 7*t^6. + * Note: For exhaustive test mode, secp256k1 is replaced by a small subgroup of a different curve. + */ typedef struct { - rustsecp256k1_v0_7_0_fe x; - rustsecp256k1_v0_7_0_fe y; + rustsecp256k1_v0_8_0_fe x; + rustsecp256k1_v0_8_0_fe y; int infinity; /* whether this represents the point at infinity */ -} rustsecp256k1_v0_7_0_ge; +} rustsecp256k1_v0_8_0_ge; #define SECP256K1_GE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), 0} #define SECP256K1_GE_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1} -/** A group element of the secp256k1 curve, in jacobian coordinates. */ +/** A group element of the secp256k1 curve, in jacobian coordinates. + * Note: For exhastive test mode, secp256k1 is replaced by a small subgroup of a different curve. + */ typedef struct { - rustsecp256k1_v0_7_0_fe x; /* actual X: x/z^2 */ - rustsecp256k1_v0_7_0_fe y; /* actual Y: y/z^3 */ - rustsecp256k1_v0_7_0_fe z; + rustsecp256k1_v0_8_0_fe x; /* actual X: x/z^2 */ + rustsecp256k1_v0_8_0_fe y; /* actual Y: y/z^3 */ + rustsecp256k1_v0_8_0_fe z; int infinity; /* whether this represents the point at infinity */ -} rustsecp256k1_v0_7_0_gej; +} rustsecp256k1_v0_8_0_gej; #define SECP256K1_GEJ_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1), 0} #define SECP256K1_GEJ_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1} typedef struct { - rustsecp256k1_v0_7_0_fe_storage x; - rustsecp256k1_v0_7_0_fe_storage y; -} rustsecp256k1_v0_7_0_ge_storage; + rustsecp256k1_v0_8_0_fe_storage x; + rustsecp256k1_v0_8_0_fe_storage y; +} rustsecp256k1_v0_8_0_ge_storage; #define SECP256K1_GE_STORAGE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_STORAGE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_STORAGE_CONST((i),(j),(k),(l),(m),(n),(o),(p))} #define SECP256K1_GE_STORAGE_CONST_GET(t) SECP256K1_FE_STORAGE_CONST_GET(t.x), SECP256K1_FE_STORAGE_CONST_GET(t.y) /** Set a group element equal to the point with given X and Y coordinates */ -static void rustsecp256k1_v0_7_0_ge_set_xy(rustsecp256k1_v0_7_0_ge *r, const rustsecp256k1_v0_7_0_fe *x, const rustsecp256k1_v0_7_0_fe *y); +static void rustsecp256k1_v0_8_0_ge_set_xy(rustsecp256k1_v0_8_0_ge *r, const rustsecp256k1_v0_8_0_fe *x, const rustsecp256k1_v0_8_0_fe *y); /** Set a group element (affine) equal to the point with the given X coordinate, and given oddness * for Y. Return value indicates whether the result is valid. */ -static int rustsecp256k1_v0_7_0_ge_set_xo_var(rustsecp256k1_v0_7_0_ge *r, const rustsecp256k1_v0_7_0_fe *x, int odd); +static int rustsecp256k1_v0_8_0_ge_set_xo_var(rustsecp256k1_v0_8_0_ge *r, const rustsecp256k1_v0_8_0_fe *x, int odd); /** Check whether a group element is the point at infinity. */ -static int rustsecp256k1_v0_7_0_ge_is_infinity(const rustsecp256k1_v0_7_0_ge *a); +static int rustsecp256k1_v0_8_0_ge_is_infinity(const rustsecp256k1_v0_8_0_ge *a); /** Check whether a group element is valid (i.e., on the curve). */ -static int rustsecp256k1_v0_7_0_ge_is_valid_var(const rustsecp256k1_v0_7_0_ge *a); +static int rustsecp256k1_v0_8_0_ge_is_valid_var(const rustsecp256k1_v0_8_0_ge *a); /** Set r equal to the inverse of a (i.e., mirrored around the X axis) */ -static void rustsecp256k1_v0_7_0_ge_neg(rustsecp256k1_v0_7_0_ge *r, const rustsecp256k1_v0_7_0_ge *a); +static void rustsecp256k1_v0_8_0_ge_neg(rustsecp256k1_v0_8_0_ge *r, const rustsecp256k1_v0_8_0_ge *a); /** Set a group element equal to another which is given in jacobian coordinates. Constant time. */ -static void rustsecp256k1_v0_7_0_ge_set_gej(rustsecp256k1_v0_7_0_ge *r, rustsecp256k1_v0_7_0_gej *a); +static void rustsecp256k1_v0_8_0_ge_set_gej(rustsecp256k1_v0_8_0_ge *r, rustsecp256k1_v0_8_0_gej *a); /** Set a group element equal to another which is given in jacobian coordinates. */ -static void rustsecp256k1_v0_7_0_ge_set_gej_var(rustsecp256k1_v0_7_0_ge *r, rustsecp256k1_v0_7_0_gej *a); +static void rustsecp256k1_v0_8_0_ge_set_gej_var(rustsecp256k1_v0_8_0_ge *r, rustsecp256k1_v0_8_0_gej *a); /** Set a batch of group elements equal to the inputs given in jacobian coordinates */ -static void rustsecp256k1_v0_7_0_ge_set_all_gej_var(rustsecp256k1_v0_7_0_ge *r, const rustsecp256k1_v0_7_0_gej *a, size_t len); +static void rustsecp256k1_v0_8_0_ge_set_all_gej_var(rustsecp256k1_v0_8_0_ge *r, const rustsecp256k1_v0_8_0_gej *a, size_t len); -/** Bring a batch inputs given in jacobian coordinates (with known z-ratios) to - * the same global z "denominator". zr must contain the known z-ratios such - * that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. The x and y - * coordinates of the result are stored in r, the common z coordinate is - * stored in globalz. */ -static void rustsecp256k1_v0_7_0_ge_globalz_set_table_gej(size_t len, rustsecp256k1_v0_7_0_ge *r, rustsecp256k1_v0_7_0_fe *globalz, const rustsecp256k1_v0_7_0_gej *a, const rustsecp256k1_v0_7_0_fe *zr); +/** Bring a batch of inputs to the same global z "denominator", based on ratios between + * (omitted) z coordinates of adjacent elements. + * + * Although the elements a[i] are _ge rather than _gej, they actually represent elements + * in Jacobian coordinates with their z coordinates omitted. + * + * Using the notation z(b) to represent the omitted z coordinate of b, the array zr of + * z coordinate ratios must satisfy zr[i] == z(a[i]) / z(a[i-1]) for 0 < 'i' < len. + * The zr[0] value is unused. + * + * This function adjusts the coordinates of 'a' in place so that for all 'i', z(a[i]) == z(a[len-1]). + * In other words, the initial value of z(a[len-1]) becomes the global z "denominator". Only the + * a[i].x and a[i].y coordinates are explicitly modified; the adjustment of the omitted z coordinate is + * implicit. + * + * The coordinates of the final element a[len-1] are not changed. + */ +static void rustsecp256k1_v0_8_0_ge_table_set_globalz(size_t len, rustsecp256k1_v0_8_0_ge *a, const rustsecp256k1_v0_8_0_fe *zr); /** Set a group element (affine) equal to the point at infinity. */ -static void rustsecp256k1_v0_7_0_ge_set_infinity(rustsecp256k1_v0_7_0_ge *r); +static void rustsecp256k1_v0_8_0_ge_set_infinity(rustsecp256k1_v0_8_0_ge *r); /** Set a group element (jacobian) equal to the point at infinity. */ -static void rustsecp256k1_v0_7_0_gej_set_infinity(rustsecp256k1_v0_7_0_gej *r); +static void rustsecp256k1_v0_8_0_gej_set_infinity(rustsecp256k1_v0_8_0_gej *r); /** Set a group element (jacobian) equal to another which is given in affine coordinates. */ -static void rustsecp256k1_v0_7_0_gej_set_ge(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_ge *a); +static void rustsecp256k1_v0_8_0_gej_set_ge(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_ge *a); + +/** Check two group elements (jacobian) for equality in variable time. */ +static int rustsecp256k1_v0_8_0_gej_eq_var(const rustsecp256k1_v0_8_0_gej *a, const rustsecp256k1_v0_8_0_gej *b); /** Compare the X coordinate of a group element (jacobian). */ -static int rustsecp256k1_v0_7_0_gej_eq_x_var(const rustsecp256k1_v0_7_0_fe *x, const rustsecp256k1_v0_7_0_gej *a); +static int rustsecp256k1_v0_8_0_gej_eq_x_var(const rustsecp256k1_v0_8_0_fe *x, const rustsecp256k1_v0_8_0_gej *a); /** Set r equal to the inverse of a (i.e., mirrored around the X axis) */ -static void rustsecp256k1_v0_7_0_gej_neg(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a); +static void rustsecp256k1_v0_8_0_gej_neg(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a); /** Check whether a group element is the point at infinity. */ -static int rustsecp256k1_v0_7_0_gej_is_infinity(const rustsecp256k1_v0_7_0_gej *a); +static int rustsecp256k1_v0_8_0_gej_is_infinity(const rustsecp256k1_v0_8_0_gej *a); /** Set r equal to the double of a. Constant time. */ -static void rustsecp256k1_v0_7_0_gej_double(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a); +static void rustsecp256k1_v0_8_0_gej_double(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a); /** Set r equal to the double of a. If rzr is not-NULL this sets *rzr such that r->z == a->z * *rzr (where infinity means an implicit z = 0). */ -static void rustsecp256k1_v0_7_0_gej_double_var(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a, rustsecp256k1_v0_7_0_fe *rzr); +static void rustsecp256k1_v0_8_0_gej_double_var(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a, rustsecp256k1_v0_8_0_fe *rzr); /** Set r equal to the sum of a and b. If rzr is non-NULL this sets *rzr such that r->z == a->z * *rzr (a cannot be infinity in that case). */ -static void rustsecp256k1_v0_7_0_gej_add_var(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a, const rustsecp256k1_v0_7_0_gej *b, rustsecp256k1_v0_7_0_fe *rzr); +static void rustsecp256k1_v0_8_0_gej_add_var(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a, const rustsecp256k1_v0_8_0_gej *b, rustsecp256k1_v0_8_0_fe *rzr); /** Set r equal to the sum of a and b (with b given in affine coordinates, and not infinity). */ -static void rustsecp256k1_v0_7_0_gej_add_ge(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a, const rustsecp256k1_v0_7_0_ge *b); +static void rustsecp256k1_v0_8_0_gej_add_ge(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a, const rustsecp256k1_v0_8_0_ge *b); /** Set r equal to the sum of a and b (with b given in affine coordinates). This is more efficient - than rustsecp256k1_v0_7_0_gej_add_var. It is identical to rustsecp256k1_v0_7_0_gej_add_ge but without constant-time + than rustsecp256k1_v0_8_0_gej_add_var. It is identical to rustsecp256k1_v0_8_0_gej_add_ge but without constant-time guarantee, and b is allowed to be infinity. If rzr is non-NULL this sets *rzr such that r->z == a->z * *rzr (a cannot be infinity in that case). */ -static void rustsecp256k1_v0_7_0_gej_add_ge_var(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a, const rustsecp256k1_v0_7_0_ge *b, rustsecp256k1_v0_7_0_fe *rzr); +static void rustsecp256k1_v0_8_0_gej_add_ge_var(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a, const rustsecp256k1_v0_8_0_ge *b, rustsecp256k1_v0_8_0_fe *rzr); /** Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv). */ -static void rustsecp256k1_v0_7_0_gej_add_zinv_var(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a, const rustsecp256k1_v0_7_0_ge *b, const rustsecp256k1_v0_7_0_fe *bzinv); +static void rustsecp256k1_v0_8_0_gej_add_zinv_var(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a, const rustsecp256k1_v0_8_0_ge *b, const rustsecp256k1_v0_8_0_fe *bzinv); /** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */ -static void rustsecp256k1_v0_7_0_ge_mul_lambda(rustsecp256k1_v0_7_0_ge *r, const rustsecp256k1_v0_7_0_ge *a); +static void rustsecp256k1_v0_8_0_ge_mul_lambda(rustsecp256k1_v0_8_0_ge *r, const rustsecp256k1_v0_8_0_ge *a); -/** Clear a rustsecp256k1_v0_7_0_gej to prevent leaking sensitive information. */ -static void rustsecp256k1_v0_7_0_gej_clear(rustsecp256k1_v0_7_0_gej *r); +/** Clear a rustsecp256k1_v0_8_0_gej to prevent leaking sensitive information. */ +static void rustsecp256k1_v0_8_0_gej_clear(rustsecp256k1_v0_8_0_gej *r); -/** Clear a rustsecp256k1_v0_7_0_ge to prevent leaking sensitive information. */ -static void rustsecp256k1_v0_7_0_ge_clear(rustsecp256k1_v0_7_0_ge *r); +/** Clear a rustsecp256k1_v0_8_0_ge to prevent leaking sensitive information. */ +static void rustsecp256k1_v0_8_0_ge_clear(rustsecp256k1_v0_8_0_ge *r); /** Convert a group element to the storage type. */ -static void rustsecp256k1_v0_7_0_ge_to_storage(rustsecp256k1_v0_7_0_ge_storage *r, const rustsecp256k1_v0_7_0_ge *a); +static void rustsecp256k1_v0_8_0_ge_to_storage(rustsecp256k1_v0_8_0_ge_storage *r, const rustsecp256k1_v0_8_0_ge *a); /** Convert a group element back from the storage type. */ -static void rustsecp256k1_v0_7_0_ge_from_storage(rustsecp256k1_v0_7_0_ge *r, const rustsecp256k1_v0_7_0_ge_storage *a); +static void rustsecp256k1_v0_8_0_ge_from_storage(rustsecp256k1_v0_8_0_ge *r, const rustsecp256k1_v0_8_0_ge_storage *a); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/ -static void rustsecp256k1_v0_7_0_gej_cmov(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a, int flag); +static void rustsecp256k1_v0_8_0_gej_cmov(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a, int flag); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/ -static void rustsecp256k1_v0_7_0_ge_storage_cmov(rustsecp256k1_v0_7_0_ge_storage *r, const rustsecp256k1_v0_7_0_ge_storage *a, int flag); +static void rustsecp256k1_v0_8_0_ge_storage_cmov(rustsecp256k1_v0_8_0_ge_storage *r, const rustsecp256k1_v0_8_0_ge_storage *a, int flag); /** Rescale a jacobian point by b which must be non-zero. Constant-time. */ -static void rustsecp256k1_v0_7_0_gej_rescale(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_fe *b); +static void rustsecp256k1_v0_8_0_gej_rescale(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_fe *b); /** Determine if a point (which is assumed to be on the curve) is in the correct (sub)group of the curve. * @@ -142,6 +162,6 @@ static void rustsecp256k1_v0_7_0_gej_rescale(rustsecp256k1_v0_7_0_gej *r, const * (very) small subgroup, and that subgroup is what is used for all cryptographic operations. In that mode, this * function checks whether a point that is on the curve is in fact also in that subgroup. */ -static int rustsecp256k1_v0_7_0_ge_is_in_correct_subgroup(const rustsecp256k1_v0_7_0_ge* ge); +static int rustsecp256k1_v0_8_0_ge_is_in_correct_subgroup(const rustsecp256k1_v0_8_0_ge* ge); #endif /* SECP256K1_GROUP_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/group_impl.h b/secp256k1-sys/depend/secp256k1/src/group_impl.h index 20db8540b..d62d388d5 100644 --- a/secp256k1-sys/depend/secp256k1/src/group_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/group_impl.h @@ -42,16 +42,16 @@ */ #if defined(EXHAUSTIVE_TEST_ORDER) # if EXHAUSTIVE_TEST_ORDER == 13 -static const rustsecp256k1_v0_7_0_ge rustsecp256k1_v0_7_0_ge_const_g = SECP256K1_G_ORDER_13; +static const rustsecp256k1_v0_8_0_ge rustsecp256k1_v0_8_0_ge_const_g = SECP256K1_G_ORDER_13; -static const rustsecp256k1_v0_7_0_fe rustsecp256k1_v0_7_0_fe_const_b = SECP256K1_FE_CONST( +static const rustsecp256k1_v0_8_0_fe rustsecp256k1_v0_8_0_fe_const_b = SECP256K1_FE_CONST( 0x3d3486b2, 0x159a9ca5, 0xc75638be, 0xb23a69bc, 0x946a45ab, 0x24801247, 0xb4ed2b8e, 0x26b6a417 ); # elif EXHAUSTIVE_TEST_ORDER == 199 -static const rustsecp256k1_v0_7_0_ge rustsecp256k1_v0_7_0_ge_const_g = SECP256K1_G_ORDER_199; +static const rustsecp256k1_v0_8_0_ge rustsecp256k1_v0_8_0_ge_const_g = SECP256K1_G_ORDER_199; -static const rustsecp256k1_v0_7_0_fe rustsecp256k1_v0_7_0_fe_const_b = SECP256K1_FE_CONST( +static const rustsecp256k1_v0_8_0_fe rustsecp256k1_v0_8_0_fe_const_b = SECP256K1_FE_CONST( 0x2cca28fa, 0xfc614b80, 0x2a3db42b, 0x00ba00b1, 0xbea8d943, 0xdace9ab2, 0x9536daea, 0x0074defb ); @@ -59,80 +59,80 @@ static const rustsecp256k1_v0_7_0_fe rustsecp256k1_v0_7_0_fe_const_b = SECP256K1 # error No known generator for the specified exhaustive test group order. # endif #else -static const rustsecp256k1_v0_7_0_ge rustsecp256k1_v0_7_0_ge_const_g = SECP256K1_G; +static const rustsecp256k1_v0_8_0_ge rustsecp256k1_v0_8_0_ge_const_g = SECP256K1_G; -static const rustsecp256k1_v0_7_0_fe rustsecp256k1_v0_7_0_fe_const_b = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 7); +static const rustsecp256k1_v0_8_0_fe rustsecp256k1_v0_8_0_fe_const_b = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 7); #endif -static void rustsecp256k1_v0_7_0_ge_set_gej_zinv(rustsecp256k1_v0_7_0_ge *r, const rustsecp256k1_v0_7_0_gej *a, const rustsecp256k1_v0_7_0_fe *zi) { - rustsecp256k1_v0_7_0_fe zi2; - rustsecp256k1_v0_7_0_fe zi3; +static void rustsecp256k1_v0_8_0_ge_set_gej_zinv(rustsecp256k1_v0_8_0_ge *r, const rustsecp256k1_v0_8_0_gej *a, const rustsecp256k1_v0_8_0_fe *zi) { + rustsecp256k1_v0_8_0_fe zi2; + rustsecp256k1_v0_8_0_fe zi3; VERIFY_CHECK(!a->infinity); - rustsecp256k1_v0_7_0_fe_sqr(&zi2, zi); - rustsecp256k1_v0_7_0_fe_mul(&zi3, &zi2, zi); - rustsecp256k1_v0_7_0_fe_mul(&r->x, &a->x, &zi2); - rustsecp256k1_v0_7_0_fe_mul(&r->y, &a->y, &zi3); + rustsecp256k1_v0_8_0_fe_sqr(&zi2, zi); + rustsecp256k1_v0_8_0_fe_mul(&zi3, &zi2, zi); + rustsecp256k1_v0_8_0_fe_mul(&r->x, &a->x, &zi2); + rustsecp256k1_v0_8_0_fe_mul(&r->y, &a->y, &zi3); r->infinity = a->infinity; } -static void rustsecp256k1_v0_7_0_ge_set_xy(rustsecp256k1_v0_7_0_ge *r, const rustsecp256k1_v0_7_0_fe *x, const rustsecp256k1_v0_7_0_fe *y) { +static void rustsecp256k1_v0_8_0_ge_set_xy(rustsecp256k1_v0_8_0_ge *r, const rustsecp256k1_v0_8_0_fe *x, const rustsecp256k1_v0_8_0_fe *y) { r->infinity = 0; r->x = *x; r->y = *y; } -static int rustsecp256k1_v0_7_0_ge_is_infinity(const rustsecp256k1_v0_7_0_ge *a) { +static int rustsecp256k1_v0_8_0_ge_is_infinity(const rustsecp256k1_v0_8_0_ge *a) { return a->infinity; } -static void rustsecp256k1_v0_7_0_ge_neg(rustsecp256k1_v0_7_0_ge *r, const rustsecp256k1_v0_7_0_ge *a) { +static void rustsecp256k1_v0_8_0_ge_neg(rustsecp256k1_v0_8_0_ge *r, const rustsecp256k1_v0_8_0_ge *a) { *r = *a; - rustsecp256k1_v0_7_0_fe_normalize_weak(&r->y); - rustsecp256k1_v0_7_0_fe_negate(&r->y, &r->y, 1); + rustsecp256k1_v0_8_0_fe_normalize_weak(&r->y); + rustsecp256k1_v0_8_0_fe_negate(&r->y, &r->y, 1); } -static void rustsecp256k1_v0_7_0_ge_set_gej(rustsecp256k1_v0_7_0_ge *r, rustsecp256k1_v0_7_0_gej *a) { - rustsecp256k1_v0_7_0_fe z2, z3; +static void rustsecp256k1_v0_8_0_ge_set_gej(rustsecp256k1_v0_8_0_ge *r, rustsecp256k1_v0_8_0_gej *a) { + rustsecp256k1_v0_8_0_fe z2, z3; r->infinity = a->infinity; - rustsecp256k1_v0_7_0_fe_inv(&a->z, &a->z); - rustsecp256k1_v0_7_0_fe_sqr(&z2, &a->z); - rustsecp256k1_v0_7_0_fe_mul(&z3, &a->z, &z2); - rustsecp256k1_v0_7_0_fe_mul(&a->x, &a->x, &z2); - rustsecp256k1_v0_7_0_fe_mul(&a->y, &a->y, &z3); - rustsecp256k1_v0_7_0_fe_set_int(&a->z, 1); + rustsecp256k1_v0_8_0_fe_inv(&a->z, &a->z); + rustsecp256k1_v0_8_0_fe_sqr(&z2, &a->z); + rustsecp256k1_v0_8_0_fe_mul(&z3, &a->z, &z2); + rustsecp256k1_v0_8_0_fe_mul(&a->x, &a->x, &z2); + rustsecp256k1_v0_8_0_fe_mul(&a->y, &a->y, &z3); + rustsecp256k1_v0_8_0_fe_set_int(&a->z, 1); r->x = a->x; r->y = a->y; } -static void rustsecp256k1_v0_7_0_ge_set_gej_var(rustsecp256k1_v0_7_0_ge *r, rustsecp256k1_v0_7_0_gej *a) { - rustsecp256k1_v0_7_0_fe z2, z3; +static void rustsecp256k1_v0_8_0_ge_set_gej_var(rustsecp256k1_v0_8_0_ge *r, rustsecp256k1_v0_8_0_gej *a) { + rustsecp256k1_v0_8_0_fe z2, z3; if (a->infinity) { - rustsecp256k1_v0_7_0_ge_set_infinity(r); + rustsecp256k1_v0_8_0_ge_set_infinity(r); return; } - rustsecp256k1_v0_7_0_fe_inv_var(&a->z, &a->z); - rustsecp256k1_v0_7_0_fe_sqr(&z2, &a->z); - rustsecp256k1_v0_7_0_fe_mul(&z3, &a->z, &z2); - rustsecp256k1_v0_7_0_fe_mul(&a->x, &a->x, &z2); - rustsecp256k1_v0_7_0_fe_mul(&a->y, &a->y, &z3); - rustsecp256k1_v0_7_0_fe_set_int(&a->z, 1); - rustsecp256k1_v0_7_0_ge_set_xy(r, &a->x, &a->y); + rustsecp256k1_v0_8_0_fe_inv_var(&a->z, &a->z); + rustsecp256k1_v0_8_0_fe_sqr(&z2, &a->z); + rustsecp256k1_v0_8_0_fe_mul(&z3, &a->z, &z2); + rustsecp256k1_v0_8_0_fe_mul(&a->x, &a->x, &z2); + rustsecp256k1_v0_8_0_fe_mul(&a->y, &a->y, &z3); + rustsecp256k1_v0_8_0_fe_set_int(&a->z, 1); + rustsecp256k1_v0_8_0_ge_set_xy(r, &a->x, &a->y); } -static void rustsecp256k1_v0_7_0_ge_set_all_gej_var(rustsecp256k1_v0_7_0_ge *r, const rustsecp256k1_v0_7_0_gej *a, size_t len) { - rustsecp256k1_v0_7_0_fe u; +static void rustsecp256k1_v0_8_0_ge_set_all_gej_var(rustsecp256k1_v0_8_0_ge *r, const rustsecp256k1_v0_8_0_gej *a, size_t len) { + rustsecp256k1_v0_8_0_fe u; size_t i; size_t last_i = SIZE_MAX; for (i = 0; i < len; i++) { if (a[i].infinity) { - rustsecp256k1_v0_7_0_ge_set_infinity(&r[i]); + rustsecp256k1_v0_8_0_ge_set_infinity(&r[i]); } else { /* Use destination's x coordinates as scratch space */ if (last_i == SIZE_MAX) { r[i].x = a[i].z; } else { - rustsecp256k1_v0_7_0_fe_mul(&r[i].x, &r[last_i].x, &a[i].z); + rustsecp256k1_v0_8_0_fe_mul(&r[i].x, &r[last_i].x, &a[i].z); } last_i = i; } @@ -140,14 +140,14 @@ static void rustsecp256k1_v0_7_0_ge_set_all_gej_var(rustsecp256k1_v0_7_0_ge *r, if (last_i == SIZE_MAX) { return; } - rustsecp256k1_v0_7_0_fe_inv_var(&u, &r[last_i].x); + rustsecp256k1_v0_8_0_fe_inv_var(&u, &r[last_i].x); i = last_i; while (i > 0) { i--; if (!a[i].infinity) { - rustsecp256k1_v0_7_0_fe_mul(&r[last_i].x, &r[i].x, &u); - rustsecp256k1_v0_7_0_fe_mul(&u, &u, &a[last_i].z); + rustsecp256k1_v0_8_0_fe_mul(&r[last_i].x, &r[i].x, &u); + rustsecp256k1_v0_8_0_fe_mul(&u, &u, &a[last_i].z); last_i = i; } } @@ -156,156 +156,160 @@ static void rustsecp256k1_v0_7_0_ge_set_all_gej_var(rustsecp256k1_v0_7_0_ge *r, for (i = 0; i < len; i++) { if (!a[i].infinity) { - rustsecp256k1_v0_7_0_ge_set_gej_zinv(&r[i], &a[i], &r[i].x); + rustsecp256k1_v0_8_0_ge_set_gej_zinv(&r[i], &a[i], &r[i].x); } } } -static void rustsecp256k1_v0_7_0_ge_globalz_set_table_gej(size_t len, rustsecp256k1_v0_7_0_ge *r, rustsecp256k1_v0_7_0_fe *globalz, const rustsecp256k1_v0_7_0_gej *a, const rustsecp256k1_v0_7_0_fe *zr) { +static void rustsecp256k1_v0_8_0_ge_table_set_globalz(size_t len, rustsecp256k1_v0_8_0_ge *a, const rustsecp256k1_v0_8_0_fe *zr) { size_t i = len - 1; - rustsecp256k1_v0_7_0_fe zs; + rustsecp256k1_v0_8_0_fe zs; if (len > 0) { - /* The z of the final point gives us the "global Z" for the table. */ - r[i].x = a[i].x; - r[i].y = a[i].y; /* Ensure all y values are in weak normal form for fast negation of points */ - rustsecp256k1_v0_7_0_fe_normalize_weak(&r[i].y); - *globalz = a[i].z; - r[i].infinity = 0; + rustsecp256k1_v0_8_0_fe_normalize_weak(&a[i].y); zs = zr[i]; /* Work our way backwards, using the z-ratios to scale the x/y values. */ while (i > 0) { + rustsecp256k1_v0_8_0_gej tmpa; if (i != len - 1) { - rustsecp256k1_v0_7_0_fe_mul(&zs, &zs, &zr[i]); + rustsecp256k1_v0_8_0_fe_mul(&zs, &zs, &zr[i]); } i--; - rustsecp256k1_v0_7_0_ge_set_gej_zinv(&r[i], &a[i], &zs); + tmpa.x = a[i].x; + tmpa.y = a[i].y; + tmpa.infinity = 0; + rustsecp256k1_v0_8_0_ge_set_gej_zinv(&a[i], &tmpa, &zs); } } } -static void rustsecp256k1_v0_7_0_gej_set_infinity(rustsecp256k1_v0_7_0_gej *r) { +static void rustsecp256k1_v0_8_0_gej_set_infinity(rustsecp256k1_v0_8_0_gej *r) { r->infinity = 1; - rustsecp256k1_v0_7_0_fe_clear(&r->x); - rustsecp256k1_v0_7_0_fe_clear(&r->y); - rustsecp256k1_v0_7_0_fe_clear(&r->z); + rustsecp256k1_v0_8_0_fe_clear(&r->x); + rustsecp256k1_v0_8_0_fe_clear(&r->y); + rustsecp256k1_v0_8_0_fe_clear(&r->z); } -static void rustsecp256k1_v0_7_0_ge_set_infinity(rustsecp256k1_v0_7_0_ge *r) { +static void rustsecp256k1_v0_8_0_ge_set_infinity(rustsecp256k1_v0_8_0_ge *r) { r->infinity = 1; - rustsecp256k1_v0_7_0_fe_clear(&r->x); - rustsecp256k1_v0_7_0_fe_clear(&r->y); + rustsecp256k1_v0_8_0_fe_clear(&r->x); + rustsecp256k1_v0_8_0_fe_clear(&r->y); } -static void rustsecp256k1_v0_7_0_gej_clear(rustsecp256k1_v0_7_0_gej *r) { +static void rustsecp256k1_v0_8_0_gej_clear(rustsecp256k1_v0_8_0_gej *r) { r->infinity = 0; - rustsecp256k1_v0_7_0_fe_clear(&r->x); - rustsecp256k1_v0_7_0_fe_clear(&r->y); - rustsecp256k1_v0_7_0_fe_clear(&r->z); + rustsecp256k1_v0_8_0_fe_clear(&r->x); + rustsecp256k1_v0_8_0_fe_clear(&r->y); + rustsecp256k1_v0_8_0_fe_clear(&r->z); } -static void rustsecp256k1_v0_7_0_ge_clear(rustsecp256k1_v0_7_0_ge *r) { +static void rustsecp256k1_v0_8_0_ge_clear(rustsecp256k1_v0_8_0_ge *r) { r->infinity = 0; - rustsecp256k1_v0_7_0_fe_clear(&r->x); - rustsecp256k1_v0_7_0_fe_clear(&r->y); + rustsecp256k1_v0_8_0_fe_clear(&r->x); + rustsecp256k1_v0_8_0_fe_clear(&r->y); } -static int rustsecp256k1_v0_7_0_ge_set_xo_var(rustsecp256k1_v0_7_0_ge *r, const rustsecp256k1_v0_7_0_fe *x, int odd) { - rustsecp256k1_v0_7_0_fe x2, x3; +static int rustsecp256k1_v0_8_0_ge_set_xo_var(rustsecp256k1_v0_8_0_ge *r, const rustsecp256k1_v0_8_0_fe *x, int odd) { + rustsecp256k1_v0_8_0_fe x2, x3; r->x = *x; - rustsecp256k1_v0_7_0_fe_sqr(&x2, x); - rustsecp256k1_v0_7_0_fe_mul(&x3, x, &x2); + rustsecp256k1_v0_8_0_fe_sqr(&x2, x); + rustsecp256k1_v0_8_0_fe_mul(&x3, x, &x2); r->infinity = 0; - rustsecp256k1_v0_7_0_fe_add(&x3, &rustsecp256k1_v0_7_0_fe_const_b); - if (!rustsecp256k1_v0_7_0_fe_sqrt(&r->y, &x3)) { + rustsecp256k1_v0_8_0_fe_add(&x3, &rustsecp256k1_v0_8_0_fe_const_b); + if (!rustsecp256k1_v0_8_0_fe_sqrt(&r->y, &x3)) { return 0; } - rustsecp256k1_v0_7_0_fe_normalize_var(&r->y); - if (rustsecp256k1_v0_7_0_fe_is_odd(&r->y) != odd) { - rustsecp256k1_v0_7_0_fe_negate(&r->y, &r->y, 1); + rustsecp256k1_v0_8_0_fe_normalize_var(&r->y); + if (rustsecp256k1_v0_8_0_fe_is_odd(&r->y) != odd) { + rustsecp256k1_v0_8_0_fe_negate(&r->y, &r->y, 1); } return 1; } -static void rustsecp256k1_v0_7_0_gej_set_ge(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_ge *a) { +static void rustsecp256k1_v0_8_0_gej_set_ge(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_ge *a) { r->infinity = a->infinity; r->x = a->x; r->y = a->y; - rustsecp256k1_v0_7_0_fe_set_int(&r->z, 1); + rustsecp256k1_v0_8_0_fe_set_int(&r->z, 1); } -static int rustsecp256k1_v0_7_0_gej_eq_x_var(const rustsecp256k1_v0_7_0_fe *x, const rustsecp256k1_v0_7_0_gej *a) { - rustsecp256k1_v0_7_0_fe r, r2; +static int rustsecp256k1_v0_8_0_gej_eq_var(const rustsecp256k1_v0_8_0_gej *a, const rustsecp256k1_v0_8_0_gej *b) { + rustsecp256k1_v0_8_0_gej tmp; + rustsecp256k1_v0_8_0_gej_neg(&tmp, a); + rustsecp256k1_v0_8_0_gej_add_var(&tmp, &tmp, b, NULL); + return rustsecp256k1_v0_8_0_gej_is_infinity(&tmp); +} + +static int rustsecp256k1_v0_8_0_gej_eq_x_var(const rustsecp256k1_v0_8_0_fe *x, const rustsecp256k1_v0_8_0_gej *a) { + rustsecp256k1_v0_8_0_fe r, r2; VERIFY_CHECK(!a->infinity); - rustsecp256k1_v0_7_0_fe_sqr(&r, &a->z); rustsecp256k1_v0_7_0_fe_mul(&r, &r, x); - r2 = a->x; rustsecp256k1_v0_7_0_fe_normalize_weak(&r2); - return rustsecp256k1_v0_7_0_fe_equal_var(&r, &r2); + rustsecp256k1_v0_8_0_fe_sqr(&r, &a->z); rustsecp256k1_v0_8_0_fe_mul(&r, &r, x); + r2 = a->x; rustsecp256k1_v0_8_0_fe_normalize_weak(&r2); + return rustsecp256k1_v0_8_0_fe_equal_var(&r, &r2); } -static void rustsecp256k1_v0_7_0_gej_neg(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a) { +static void rustsecp256k1_v0_8_0_gej_neg(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a) { r->infinity = a->infinity; r->x = a->x; r->y = a->y; r->z = a->z; - rustsecp256k1_v0_7_0_fe_normalize_weak(&r->y); - rustsecp256k1_v0_7_0_fe_negate(&r->y, &r->y, 1); + rustsecp256k1_v0_8_0_fe_normalize_weak(&r->y); + rustsecp256k1_v0_8_0_fe_negate(&r->y, &r->y, 1); } -static int rustsecp256k1_v0_7_0_gej_is_infinity(const rustsecp256k1_v0_7_0_gej *a) { +static int rustsecp256k1_v0_8_0_gej_is_infinity(const rustsecp256k1_v0_8_0_gej *a) { return a->infinity; } -static int rustsecp256k1_v0_7_0_ge_is_valid_var(const rustsecp256k1_v0_7_0_ge *a) { - rustsecp256k1_v0_7_0_fe y2, x3; +static int rustsecp256k1_v0_8_0_ge_is_valid_var(const rustsecp256k1_v0_8_0_ge *a) { + rustsecp256k1_v0_8_0_fe y2, x3; if (a->infinity) { return 0; } /* y^2 = x^3 + 7 */ - rustsecp256k1_v0_7_0_fe_sqr(&y2, &a->y); - rustsecp256k1_v0_7_0_fe_sqr(&x3, &a->x); rustsecp256k1_v0_7_0_fe_mul(&x3, &x3, &a->x); - rustsecp256k1_v0_7_0_fe_add(&x3, &rustsecp256k1_v0_7_0_fe_const_b); - rustsecp256k1_v0_7_0_fe_normalize_weak(&x3); - return rustsecp256k1_v0_7_0_fe_equal_var(&y2, &x3); + rustsecp256k1_v0_8_0_fe_sqr(&y2, &a->y); + rustsecp256k1_v0_8_0_fe_sqr(&x3, &a->x); rustsecp256k1_v0_8_0_fe_mul(&x3, &x3, &a->x); + rustsecp256k1_v0_8_0_fe_add(&x3, &rustsecp256k1_v0_8_0_fe_const_b); + rustsecp256k1_v0_8_0_fe_normalize_weak(&x3); + return rustsecp256k1_v0_8_0_fe_equal_var(&y2, &x3); } -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_gej_double(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a) { - /* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate. - * - * Note that there is an implementation described at - * https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l - * which trades a multiply for a square, but in practice this is actually slower, - * mainly because it requires more normalizations. - */ - rustsecp256k1_v0_7_0_fe t1,t2,t3,t4; +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_gej_double(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a) { + /* Operations: 3 mul, 4 sqr, 8 add/half/mul_int/negate */ + rustsecp256k1_v0_8_0_fe l, s, t; r->infinity = a->infinity; - rustsecp256k1_v0_7_0_fe_mul(&r->z, &a->z, &a->y); - rustsecp256k1_v0_7_0_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */ - rustsecp256k1_v0_7_0_fe_sqr(&t1, &a->x); - rustsecp256k1_v0_7_0_fe_mul_int(&t1, 3); /* T1 = 3*X^2 (3) */ - rustsecp256k1_v0_7_0_fe_sqr(&t2, &t1); /* T2 = 9*X^4 (1) */ - rustsecp256k1_v0_7_0_fe_sqr(&t3, &a->y); - rustsecp256k1_v0_7_0_fe_mul_int(&t3, 2); /* T3 = 2*Y^2 (2) */ - rustsecp256k1_v0_7_0_fe_sqr(&t4, &t3); - rustsecp256k1_v0_7_0_fe_mul_int(&t4, 2); /* T4 = 8*Y^4 (2) */ - rustsecp256k1_v0_7_0_fe_mul(&t3, &t3, &a->x); /* T3 = 2*X*Y^2 (1) */ - r->x = t3; - rustsecp256k1_v0_7_0_fe_mul_int(&r->x, 4); /* X' = 8*X*Y^2 (4) */ - rustsecp256k1_v0_7_0_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */ - rustsecp256k1_v0_7_0_fe_add(&r->x, &t2); /* X' = 9*X^4 - 8*X*Y^2 (6) */ - rustsecp256k1_v0_7_0_fe_negate(&t2, &t2, 1); /* T2 = -9*X^4 (2) */ - rustsecp256k1_v0_7_0_fe_mul_int(&t3, 6); /* T3 = 12*X*Y^2 (6) */ - rustsecp256k1_v0_7_0_fe_add(&t3, &t2); /* T3 = 12*X*Y^2 - 9*X^4 (8) */ - rustsecp256k1_v0_7_0_fe_mul(&r->y, &t1, &t3); /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */ - rustsecp256k1_v0_7_0_fe_negate(&t2, &t4, 2); /* T2 = -8*Y^4 (3) */ - rustsecp256k1_v0_7_0_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */ -} - -static void rustsecp256k1_v0_7_0_gej_double_var(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a, rustsecp256k1_v0_7_0_fe *rzr) { + /* Formula used: + * L = (3/2) * X1^2 + * S = Y1^2 + * T = -X1*S + * X3 = L^2 + 2*T + * Y3 = -(L*(X3 + T) + S^2) + * Z3 = Y1*Z1 + */ + + rustsecp256k1_v0_8_0_fe_mul(&r->z, &a->z, &a->y); /* Z3 = Y1*Z1 (1) */ + rustsecp256k1_v0_8_0_fe_sqr(&s, &a->y); /* S = Y1^2 (1) */ + rustsecp256k1_v0_8_0_fe_sqr(&l, &a->x); /* L = X1^2 (1) */ + rustsecp256k1_v0_8_0_fe_mul_int(&l, 3); /* L = 3*X1^2 (3) */ + rustsecp256k1_v0_8_0_fe_half(&l); /* L = 3/2*X1^2 (2) */ + rustsecp256k1_v0_8_0_fe_negate(&t, &s, 1); /* T = -S (2) */ + rustsecp256k1_v0_8_0_fe_mul(&t, &t, &a->x); /* T = -X1*S (1) */ + rustsecp256k1_v0_8_0_fe_sqr(&r->x, &l); /* X3 = L^2 (1) */ + rustsecp256k1_v0_8_0_fe_add(&r->x, &t); /* X3 = L^2 + T (2) */ + rustsecp256k1_v0_8_0_fe_add(&r->x, &t); /* X3 = L^2 + 2*T (3) */ + rustsecp256k1_v0_8_0_fe_sqr(&s, &s); /* S' = S^2 (1) */ + rustsecp256k1_v0_8_0_fe_add(&t, &r->x); /* T' = X3 + T (4) */ + rustsecp256k1_v0_8_0_fe_mul(&r->y, &t, &l); /* Y3 = L*(X3 + T) (1) */ + rustsecp256k1_v0_8_0_fe_add(&r->y, &s); /* Y3 = L*(X3 + T) + S^2 (2) */ + rustsecp256k1_v0_8_0_fe_negate(&r->y, &r->y, 2); /* Y3 = -(L*(X3 + T) + S^2) (3) */ +} + +static void rustsecp256k1_v0_8_0_gej_double_var(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a, rustsecp256k1_v0_8_0_fe *rzr) { /** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity, * Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have * y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p. @@ -317,143 +321,156 @@ static void rustsecp256k1_v0_7_0_gej_double_var(rustsecp256k1_v0_7_0_gej *r, con * point will be gibberish (z = 0 but infinity = 0). */ if (a->infinity) { - rustsecp256k1_v0_7_0_gej_set_infinity(r); + rustsecp256k1_v0_8_0_gej_set_infinity(r); if (rzr != NULL) { - rustsecp256k1_v0_7_0_fe_set_int(rzr, 1); + rustsecp256k1_v0_8_0_fe_set_int(rzr, 1); } return; } if (rzr != NULL) { *rzr = a->y; - rustsecp256k1_v0_7_0_fe_normalize_weak(rzr); - rustsecp256k1_v0_7_0_fe_mul_int(rzr, 2); + rustsecp256k1_v0_8_0_fe_normalize_weak(rzr); } - rustsecp256k1_v0_7_0_gej_double(r, a); + rustsecp256k1_v0_8_0_gej_double(r, a); } -static void rustsecp256k1_v0_7_0_gej_add_var(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a, const rustsecp256k1_v0_7_0_gej *b, rustsecp256k1_v0_7_0_fe *rzr) { - /* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */ - rustsecp256k1_v0_7_0_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; +static void rustsecp256k1_v0_8_0_gej_add_var(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a, const rustsecp256k1_v0_8_0_gej *b, rustsecp256k1_v0_8_0_fe *rzr) { + /* 12 mul, 4 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */ + rustsecp256k1_v0_8_0_fe z22, z12, u1, u2, s1, s2, h, i, h2, h3, t; if (a->infinity) { VERIFY_CHECK(rzr == NULL); *r = *b; return; } - if (b->infinity) { if (rzr != NULL) { - rustsecp256k1_v0_7_0_fe_set_int(rzr, 1); + rustsecp256k1_v0_8_0_fe_set_int(rzr, 1); } *r = *a; return; } - r->infinity = 0; - rustsecp256k1_v0_7_0_fe_sqr(&z22, &b->z); - rustsecp256k1_v0_7_0_fe_sqr(&z12, &a->z); - rustsecp256k1_v0_7_0_fe_mul(&u1, &a->x, &z22); - rustsecp256k1_v0_7_0_fe_mul(&u2, &b->x, &z12); - rustsecp256k1_v0_7_0_fe_mul(&s1, &a->y, &z22); rustsecp256k1_v0_7_0_fe_mul(&s1, &s1, &b->z); - rustsecp256k1_v0_7_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_7_0_fe_mul(&s2, &s2, &a->z); - rustsecp256k1_v0_7_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_7_0_fe_add(&h, &u2); - rustsecp256k1_v0_7_0_fe_negate(&i, &s1, 1); rustsecp256k1_v0_7_0_fe_add(&i, &s2); - if (rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(&h)) { - if (rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(&i)) { - rustsecp256k1_v0_7_0_gej_double_var(r, a, rzr); + rustsecp256k1_v0_8_0_fe_sqr(&z22, &b->z); + rustsecp256k1_v0_8_0_fe_sqr(&z12, &a->z); + rustsecp256k1_v0_8_0_fe_mul(&u1, &a->x, &z22); + rustsecp256k1_v0_8_0_fe_mul(&u2, &b->x, &z12); + rustsecp256k1_v0_8_0_fe_mul(&s1, &a->y, &z22); rustsecp256k1_v0_8_0_fe_mul(&s1, &s1, &b->z); + rustsecp256k1_v0_8_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_8_0_fe_mul(&s2, &s2, &a->z); + rustsecp256k1_v0_8_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_8_0_fe_add(&h, &u2); + rustsecp256k1_v0_8_0_fe_negate(&i, &s2, 1); rustsecp256k1_v0_8_0_fe_add(&i, &s1); + if (rustsecp256k1_v0_8_0_fe_normalizes_to_zero_var(&h)) { + if (rustsecp256k1_v0_8_0_fe_normalizes_to_zero_var(&i)) { + rustsecp256k1_v0_8_0_gej_double_var(r, a, rzr); } else { if (rzr != NULL) { - rustsecp256k1_v0_7_0_fe_set_int(rzr, 0); + rustsecp256k1_v0_8_0_fe_set_int(rzr, 0); } - rustsecp256k1_v0_7_0_gej_set_infinity(r); + rustsecp256k1_v0_8_0_gej_set_infinity(r); } return; } - rustsecp256k1_v0_7_0_fe_sqr(&i2, &i); - rustsecp256k1_v0_7_0_fe_sqr(&h2, &h); - rustsecp256k1_v0_7_0_fe_mul(&h3, &h, &h2); - rustsecp256k1_v0_7_0_fe_mul(&h, &h, &b->z); + + r->infinity = 0; + rustsecp256k1_v0_8_0_fe_mul(&t, &h, &b->z); if (rzr != NULL) { - *rzr = h; + *rzr = t; } - rustsecp256k1_v0_7_0_fe_mul(&r->z, &a->z, &h); - rustsecp256k1_v0_7_0_fe_mul(&t, &u1, &h2); - r->x = t; rustsecp256k1_v0_7_0_fe_mul_int(&r->x, 2); rustsecp256k1_v0_7_0_fe_add(&r->x, &h3); rustsecp256k1_v0_7_0_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_7_0_fe_add(&r->x, &i2); - rustsecp256k1_v0_7_0_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_7_0_fe_add(&r->y, &t); rustsecp256k1_v0_7_0_fe_mul(&r->y, &r->y, &i); - rustsecp256k1_v0_7_0_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_7_0_fe_negate(&h3, &h3, 1); - rustsecp256k1_v0_7_0_fe_add(&r->y, &h3); + rustsecp256k1_v0_8_0_fe_mul(&r->z, &a->z, &t); + + rustsecp256k1_v0_8_0_fe_sqr(&h2, &h); + rustsecp256k1_v0_8_0_fe_negate(&h2, &h2, 1); + rustsecp256k1_v0_8_0_fe_mul(&h3, &h2, &h); + rustsecp256k1_v0_8_0_fe_mul(&t, &u1, &h2); + + rustsecp256k1_v0_8_0_fe_sqr(&r->x, &i); + rustsecp256k1_v0_8_0_fe_add(&r->x, &h3); + rustsecp256k1_v0_8_0_fe_add(&r->x, &t); + rustsecp256k1_v0_8_0_fe_add(&r->x, &t); + + rustsecp256k1_v0_8_0_fe_add(&t, &r->x); + rustsecp256k1_v0_8_0_fe_mul(&r->y, &t, &i); + rustsecp256k1_v0_8_0_fe_mul(&h3, &h3, &s1); + rustsecp256k1_v0_8_0_fe_add(&r->y, &h3); } -static void rustsecp256k1_v0_7_0_gej_add_ge_var(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a, const rustsecp256k1_v0_7_0_ge *b, rustsecp256k1_v0_7_0_fe *rzr) { - /* 8 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */ - rustsecp256k1_v0_7_0_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; +static void rustsecp256k1_v0_8_0_gej_add_ge_var(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a, const rustsecp256k1_v0_8_0_ge *b, rustsecp256k1_v0_8_0_fe *rzr) { + /* 8 mul, 3 sqr, 13 add/negate/normalize_weak/normalizes_to_zero (ignoring special cases) */ + rustsecp256k1_v0_8_0_fe z12, u1, u2, s1, s2, h, i, h2, h3, t; if (a->infinity) { VERIFY_CHECK(rzr == NULL); - rustsecp256k1_v0_7_0_gej_set_ge(r, b); + rustsecp256k1_v0_8_0_gej_set_ge(r, b); return; } if (b->infinity) { if (rzr != NULL) { - rustsecp256k1_v0_7_0_fe_set_int(rzr, 1); + rustsecp256k1_v0_8_0_fe_set_int(rzr, 1); } *r = *a; return; } - r->infinity = 0; - rustsecp256k1_v0_7_0_fe_sqr(&z12, &a->z); - u1 = a->x; rustsecp256k1_v0_7_0_fe_normalize_weak(&u1); - rustsecp256k1_v0_7_0_fe_mul(&u2, &b->x, &z12); - s1 = a->y; rustsecp256k1_v0_7_0_fe_normalize_weak(&s1); - rustsecp256k1_v0_7_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_7_0_fe_mul(&s2, &s2, &a->z); - rustsecp256k1_v0_7_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_7_0_fe_add(&h, &u2); - rustsecp256k1_v0_7_0_fe_negate(&i, &s1, 1); rustsecp256k1_v0_7_0_fe_add(&i, &s2); - if (rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(&h)) { - if (rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(&i)) { - rustsecp256k1_v0_7_0_gej_double_var(r, a, rzr); + rustsecp256k1_v0_8_0_fe_sqr(&z12, &a->z); + u1 = a->x; rustsecp256k1_v0_8_0_fe_normalize_weak(&u1); + rustsecp256k1_v0_8_0_fe_mul(&u2, &b->x, &z12); + s1 = a->y; rustsecp256k1_v0_8_0_fe_normalize_weak(&s1); + rustsecp256k1_v0_8_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_8_0_fe_mul(&s2, &s2, &a->z); + rustsecp256k1_v0_8_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_8_0_fe_add(&h, &u2); + rustsecp256k1_v0_8_0_fe_negate(&i, &s2, 1); rustsecp256k1_v0_8_0_fe_add(&i, &s1); + if (rustsecp256k1_v0_8_0_fe_normalizes_to_zero_var(&h)) { + if (rustsecp256k1_v0_8_0_fe_normalizes_to_zero_var(&i)) { + rustsecp256k1_v0_8_0_gej_double_var(r, a, rzr); } else { if (rzr != NULL) { - rustsecp256k1_v0_7_0_fe_set_int(rzr, 0); + rustsecp256k1_v0_8_0_fe_set_int(rzr, 0); } - rustsecp256k1_v0_7_0_gej_set_infinity(r); + rustsecp256k1_v0_8_0_gej_set_infinity(r); } return; } - rustsecp256k1_v0_7_0_fe_sqr(&i2, &i); - rustsecp256k1_v0_7_0_fe_sqr(&h2, &h); - rustsecp256k1_v0_7_0_fe_mul(&h3, &h, &h2); + + r->infinity = 0; if (rzr != NULL) { *rzr = h; } - rustsecp256k1_v0_7_0_fe_mul(&r->z, &a->z, &h); - rustsecp256k1_v0_7_0_fe_mul(&t, &u1, &h2); - r->x = t; rustsecp256k1_v0_7_0_fe_mul_int(&r->x, 2); rustsecp256k1_v0_7_0_fe_add(&r->x, &h3); rustsecp256k1_v0_7_0_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_7_0_fe_add(&r->x, &i2); - rustsecp256k1_v0_7_0_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_7_0_fe_add(&r->y, &t); rustsecp256k1_v0_7_0_fe_mul(&r->y, &r->y, &i); - rustsecp256k1_v0_7_0_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_7_0_fe_negate(&h3, &h3, 1); - rustsecp256k1_v0_7_0_fe_add(&r->y, &h3); + rustsecp256k1_v0_8_0_fe_mul(&r->z, &a->z, &h); + + rustsecp256k1_v0_8_0_fe_sqr(&h2, &h); + rustsecp256k1_v0_8_0_fe_negate(&h2, &h2, 1); + rustsecp256k1_v0_8_0_fe_mul(&h3, &h2, &h); + rustsecp256k1_v0_8_0_fe_mul(&t, &u1, &h2); + + rustsecp256k1_v0_8_0_fe_sqr(&r->x, &i); + rustsecp256k1_v0_8_0_fe_add(&r->x, &h3); + rustsecp256k1_v0_8_0_fe_add(&r->x, &t); + rustsecp256k1_v0_8_0_fe_add(&r->x, &t); + + rustsecp256k1_v0_8_0_fe_add(&t, &r->x); + rustsecp256k1_v0_8_0_fe_mul(&r->y, &t, &i); + rustsecp256k1_v0_8_0_fe_mul(&h3, &h3, &s1); + rustsecp256k1_v0_8_0_fe_add(&r->y, &h3); } -static void rustsecp256k1_v0_7_0_gej_add_zinv_var(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a, const rustsecp256k1_v0_7_0_ge *b, const rustsecp256k1_v0_7_0_fe *bzinv) { - /* 9 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */ - rustsecp256k1_v0_7_0_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; +static void rustsecp256k1_v0_8_0_gej_add_zinv_var(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a, const rustsecp256k1_v0_8_0_ge *b, const rustsecp256k1_v0_8_0_fe *bzinv) { + /* 9 mul, 3 sqr, 13 add/negate/normalize_weak/normalizes_to_zero (ignoring special cases) */ + rustsecp256k1_v0_8_0_fe az, z12, u1, u2, s1, s2, h, i, h2, h3, t; - if (b->infinity) { - *r = *a; - return; - } if (a->infinity) { - rustsecp256k1_v0_7_0_fe bzinv2, bzinv3; + rustsecp256k1_v0_8_0_fe bzinv2, bzinv3; r->infinity = b->infinity; - rustsecp256k1_v0_7_0_fe_sqr(&bzinv2, bzinv); - rustsecp256k1_v0_7_0_fe_mul(&bzinv3, &bzinv2, bzinv); - rustsecp256k1_v0_7_0_fe_mul(&r->x, &b->x, &bzinv2); - rustsecp256k1_v0_7_0_fe_mul(&r->y, &b->y, &bzinv3); - rustsecp256k1_v0_7_0_fe_set_int(&r->z, 1); + rustsecp256k1_v0_8_0_fe_sqr(&bzinv2, bzinv); + rustsecp256k1_v0_8_0_fe_mul(&bzinv3, &bzinv2, bzinv); + rustsecp256k1_v0_8_0_fe_mul(&r->x, &b->x, &bzinv2); + rustsecp256k1_v0_8_0_fe_mul(&r->y, &b->y, &bzinv3); + rustsecp256k1_v0_8_0_fe_set_int(&r->z, 1); + return; + } + if (b->infinity) { + *r = *a; return; } - r->infinity = 0; /** We need to calculate (rx,ry,rz) = (ax,ay,az) + (bx,by,1/bzinv). Due to * secp256k1's isomorphism we can multiply the Z coordinates on both sides @@ -463,40 +480,48 @@ static void rustsecp256k1_v0_7_0_gej_add_zinv_var(rustsecp256k1_v0_7_0_gej *r, c * The variable az below holds the modified Z coordinate for a, which is used * for the computation of rx and ry, but not for rz. */ - rustsecp256k1_v0_7_0_fe_mul(&az, &a->z, bzinv); - - rustsecp256k1_v0_7_0_fe_sqr(&z12, &az); - u1 = a->x; rustsecp256k1_v0_7_0_fe_normalize_weak(&u1); - rustsecp256k1_v0_7_0_fe_mul(&u2, &b->x, &z12); - s1 = a->y; rustsecp256k1_v0_7_0_fe_normalize_weak(&s1); - rustsecp256k1_v0_7_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_7_0_fe_mul(&s2, &s2, &az); - rustsecp256k1_v0_7_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_7_0_fe_add(&h, &u2); - rustsecp256k1_v0_7_0_fe_negate(&i, &s1, 1); rustsecp256k1_v0_7_0_fe_add(&i, &s2); - if (rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(&h)) { - if (rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(&i)) { - rustsecp256k1_v0_7_0_gej_double_var(r, a, NULL); + rustsecp256k1_v0_8_0_fe_mul(&az, &a->z, bzinv); + + rustsecp256k1_v0_8_0_fe_sqr(&z12, &az); + u1 = a->x; rustsecp256k1_v0_8_0_fe_normalize_weak(&u1); + rustsecp256k1_v0_8_0_fe_mul(&u2, &b->x, &z12); + s1 = a->y; rustsecp256k1_v0_8_0_fe_normalize_weak(&s1); + rustsecp256k1_v0_8_0_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_8_0_fe_mul(&s2, &s2, &az); + rustsecp256k1_v0_8_0_fe_negate(&h, &u1, 1); rustsecp256k1_v0_8_0_fe_add(&h, &u2); + rustsecp256k1_v0_8_0_fe_negate(&i, &s2, 1); rustsecp256k1_v0_8_0_fe_add(&i, &s1); + if (rustsecp256k1_v0_8_0_fe_normalizes_to_zero_var(&h)) { + if (rustsecp256k1_v0_8_0_fe_normalizes_to_zero_var(&i)) { + rustsecp256k1_v0_8_0_gej_double_var(r, a, NULL); } else { - rustsecp256k1_v0_7_0_gej_set_infinity(r); + rustsecp256k1_v0_8_0_gej_set_infinity(r); } return; } - rustsecp256k1_v0_7_0_fe_sqr(&i2, &i); - rustsecp256k1_v0_7_0_fe_sqr(&h2, &h); - rustsecp256k1_v0_7_0_fe_mul(&h3, &h, &h2); - r->z = a->z; rustsecp256k1_v0_7_0_fe_mul(&r->z, &r->z, &h); - rustsecp256k1_v0_7_0_fe_mul(&t, &u1, &h2); - r->x = t; rustsecp256k1_v0_7_0_fe_mul_int(&r->x, 2); rustsecp256k1_v0_7_0_fe_add(&r->x, &h3); rustsecp256k1_v0_7_0_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_7_0_fe_add(&r->x, &i2); - rustsecp256k1_v0_7_0_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_7_0_fe_add(&r->y, &t); rustsecp256k1_v0_7_0_fe_mul(&r->y, &r->y, &i); - rustsecp256k1_v0_7_0_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_7_0_fe_negate(&h3, &h3, 1); - rustsecp256k1_v0_7_0_fe_add(&r->y, &h3); -} - - -static void rustsecp256k1_v0_7_0_gej_add_ge(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a, const rustsecp256k1_v0_7_0_ge *b) { - /* Operations: 7 mul, 5 sqr, 4 normalize, 21 mul_int/add/negate/cmov */ - static const rustsecp256k1_v0_7_0_fe fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); - rustsecp256k1_v0_7_0_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr; - rustsecp256k1_v0_7_0_fe m_alt, rr_alt; + + r->infinity = 0; + rustsecp256k1_v0_8_0_fe_mul(&r->z, &a->z, &h); + + rustsecp256k1_v0_8_0_fe_sqr(&h2, &h); + rustsecp256k1_v0_8_0_fe_negate(&h2, &h2, 1); + rustsecp256k1_v0_8_0_fe_mul(&h3, &h2, &h); + rustsecp256k1_v0_8_0_fe_mul(&t, &u1, &h2); + + rustsecp256k1_v0_8_0_fe_sqr(&r->x, &i); + rustsecp256k1_v0_8_0_fe_add(&r->x, &h3); + rustsecp256k1_v0_8_0_fe_add(&r->x, &t); + rustsecp256k1_v0_8_0_fe_add(&r->x, &t); + + rustsecp256k1_v0_8_0_fe_add(&t, &r->x); + rustsecp256k1_v0_8_0_fe_mul(&r->y, &t, &i); + rustsecp256k1_v0_8_0_fe_mul(&h3, &h3, &s1); + rustsecp256k1_v0_8_0_fe_add(&r->y, &h3); +} + + +static void rustsecp256k1_v0_8_0_gej_add_ge(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a, const rustsecp256k1_v0_8_0_ge *b) { + /* Operations: 7 mul, 5 sqr, 24 add/cmov/half/mul_int/negate/normalize_weak/normalizes_to_zero */ + rustsecp256k1_v0_8_0_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr; + rustsecp256k1_v0_8_0_fe m_alt, rr_alt; int infinity, degenerate; VERIFY_CHECK(!b->infinity); VERIFY_CHECK(a->infinity == 0 || a->infinity == 1); @@ -515,11 +540,11 @@ static void rustsecp256k1_v0_7_0_gej_add_ge(rustsecp256k1_v0_7_0_gej *r, const r * Z = Z1*Z2 * T = U1+U2 * M = S1+S2 - * Q = T*M^2 + * Q = -T*M^2 * R = T^2-U1*U2 - * X3 = 4*(R^2-Q) - * Y3 = 4*(R*(3*Q-2*R^2)-M^4) - * Z3 = 2*M*Z + * X3 = R^2+Q + * Y3 = -(R*(2*X3+Q)+M^4)/2 + * Z3 = M*Z * (Note that the paper uses xi = Xi / Zi and yi = Yi / Zi instead.) * * This formula has the benefit of being the same for both addition @@ -551,133 +576,125 @@ static void rustsecp256k1_v0_7_0_gej_add_ge(rustsecp256k1_v0_7_0_gej *r, const r * so this covers everything. */ - rustsecp256k1_v0_7_0_fe_sqr(&zz, &a->z); /* z = Z1^2 */ - u1 = a->x; rustsecp256k1_v0_7_0_fe_normalize_weak(&u1); /* u1 = U1 = X1*Z2^2 (1) */ - rustsecp256k1_v0_7_0_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */ - s1 = a->y; rustsecp256k1_v0_7_0_fe_normalize_weak(&s1); /* s1 = S1 = Y1*Z2^3 (1) */ - rustsecp256k1_v0_7_0_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z1^2 (1) */ - rustsecp256k1_v0_7_0_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */ - t = u1; rustsecp256k1_v0_7_0_fe_add(&t, &u2); /* t = T = U1+U2 (2) */ - m = s1; rustsecp256k1_v0_7_0_fe_add(&m, &s2); /* m = M = S1+S2 (2) */ - rustsecp256k1_v0_7_0_fe_sqr(&rr, &t); /* rr = T^2 (1) */ - rustsecp256k1_v0_7_0_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 */ - rustsecp256k1_v0_7_0_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (2) */ - rustsecp256k1_v0_7_0_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (3) */ + rustsecp256k1_v0_8_0_fe_sqr(&zz, &a->z); /* z = Z1^2 */ + u1 = a->x; rustsecp256k1_v0_8_0_fe_normalize_weak(&u1); /* u1 = U1 = X1*Z2^2 (1) */ + rustsecp256k1_v0_8_0_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */ + s1 = a->y; rustsecp256k1_v0_8_0_fe_normalize_weak(&s1); /* s1 = S1 = Y1*Z2^3 (1) */ + rustsecp256k1_v0_8_0_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z1^2 (1) */ + rustsecp256k1_v0_8_0_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */ + t = u1; rustsecp256k1_v0_8_0_fe_add(&t, &u2); /* t = T = U1+U2 (2) */ + m = s1; rustsecp256k1_v0_8_0_fe_add(&m, &s2); /* m = M = S1+S2 (2) */ + rustsecp256k1_v0_8_0_fe_sqr(&rr, &t); /* rr = T^2 (1) */ + rustsecp256k1_v0_8_0_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 */ + rustsecp256k1_v0_8_0_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (2) */ + rustsecp256k1_v0_8_0_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (3) */ /** If lambda = R/M = 0/0 we have a problem (except in the "trivial" * case that Z = z1z2 = 0, and this is special-cased later on). */ - degenerate = rustsecp256k1_v0_7_0_fe_normalizes_to_zero(&m) & - rustsecp256k1_v0_7_0_fe_normalizes_to_zero(&rr); + degenerate = rustsecp256k1_v0_8_0_fe_normalizes_to_zero(&m) & + rustsecp256k1_v0_8_0_fe_normalizes_to_zero(&rr); /* This only occurs when y1 == -y2 and x1^3 == x2^3, but x1 != x2. * This means either x1 == beta*x2 or beta*x1 == x2, where beta is * a nontrivial cube root of one. In either case, an alternate * non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2), * so we set R/M equal to this. */ rr_alt = s1; - rustsecp256k1_v0_7_0_fe_mul_int(&rr_alt, 2); /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */ - rustsecp256k1_v0_7_0_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 */ + rustsecp256k1_v0_8_0_fe_mul_int(&rr_alt, 2); /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */ + rustsecp256k1_v0_8_0_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 */ - rustsecp256k1_v0_7_0_fe_cmov(&rr_alt, &rr, !degenerate); - rustsecp256k1_v0_7_0_fe_cmov(&m_alt, &m, !degenerate); + rustsecp256k1_v0_8_0_fe_cmov(&rr_alt, &rr, !degenerate); + rustsecp256k1_v0_8_0_fe_cmov(&m_alt, &m, !degenerate); /* Now Ralt / Malt = lambda and is guaranteed not to be 0/0. * From here on out Ralt and Malt represent the numerator * and denominator of lambda; R and M represent the explicit * expressions x1^2 + x2^2 + x1x2 and y1 + y2. */ - rustsecp256k1_v0_7_0_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */ - rustsecp256k1_v0_7_0_fe_mul(&q, &n, &t); /* q = Q = T*Malt^2 (1) */ + rustsecp256k1_v0_8_0_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */ + rustsecp256k1_v0_8_0_fe_negate(&q, &t, 2); /* q = -T (3) */ + rustsecp256k1_v0_8_0_fe_mul(&q, &q, &n); /* q = Q = -T*Malt^2 (1) */ /* These two lines use the observation that either M == Malt or M == 0, * so M^3 * Malt is either Malt^4 (which is computed by squaring), or * zero (which is "computed" by cmov). So the cost is one squaring * versus two multiplications. */ - rustsecp256k1_v0_7_0_fe_sqr(&n, &n); - rustsecp256k1_v0_7_0_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */ - rustsecp256k1_v0_7_0_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */ - rustsecp256k1_v0_7_0_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Malt*Z (1) */ - infinity = rustsecp256k1_v0_7_0_fe_normalizes_to_zero(&r->z) & ~a->infinity; - rustsecp256k1_v0_7_0_fe_mul_int(&r->z, 2); /* r->z = Z3 = 2*Malt*Z (2) */ - rustsecp256k1_v0_7_0_fe_negate(&q, &q, 1); /* q = -Q (2) */ - rustsecp256k1_v0_7_0_fe_add(&t, &q); /* t = Ralt^2-Q (3) */ - rustsecp256k1_v0_7_0_fe_normalize_weak(&t); - r->x = t; /* r->x = Ralt^2-Q (1) */ - rustsecp256k1_v0_7_0_fe_mul_int(&t, 2); /* t = 2*x3 (2) */ - rustsecp256k1_v0_7_0_fe_add(&t, &q); /* t = 2*x3 - Q: (4) */ - rustsecp256k1_v0_7_0_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*x3 - Q) (1) */ - rustsecp256k1_v0_7_0_fe_add(&t, &n); /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */ - rustsecp256k1_v0_7_0_fe_negate(&r->y, &t, 3); /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */ - rustsecp256k1_v0_7_0_fe_normalize_weak(&r->y); - rustsecp256k1_v0_7_0_fe_mul_int(&r->x, 4); /* r->x = X3 = 4*(Ralt^2-Q) */ - rustsecp256k1_v0_7_0_fe_mul_int(&r->y, 4); /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */ + rustsecp256k1_v0_8_0_fe_sqr(&n, &n); + rustsecp256k1_v0_8_0_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */ + rustsecp256k1_v0_8_0_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */ + rustsecp256k1_v0_8_0_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Z3 = Malt*Z (1) */ + infinity = rustsecp256k1_v0_8_0_fe_normalizes_to_zero(&r->z) & ~a->infinity; + rustsecp256k1_v0_8_0_fe_add(&t, &q); /* t = Ralt^2 + Q (2) */ + r->x = t; /* r->x = X3 = Ralt^2 + Q (2) */ + rustsecp256k1_v0_8_0_fe_mul_int(&t, 2); /* t = 2*X3 (4) */ + rustsecp256k1_v0_8_0_fe_add(&t, &q); /* t = 2*X3 + Q (5) */ + rustsecp256k1_v0_8_0_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*X3 + Q) (1) */ + rustsecp256k1_v0_8_0_fe_add(&t, &n); /* t = Ralt*(2*X3 + Q) + M^3*Malt (3) */ + rustsecp256k1_v0_8_0_fe_negate(&r->y, &t, 3); /* r->y = -(Ralt*(2*X3 + Q) + M^3*Malt) (4) */ + rustsecp256k1_v0_8_0_fe_half(&r->y); /* r->y = Y3 = -(Ralt*(2*X3 + Q) + M^3*Malt)/2 (3) */ /** In case a->infinity == 1, replace r with (b->x, b->y, 1). */ - rustsecp256k1_v0_7_0_fe_cmov(&r->x, &b->x, a->infinity); - rustsecp256k1_v0_7_0_fe_cmov(&r->y, &b->y, a->infinity); - rustsecp256k1_v0_7_0_fe_cmov(&r->z, &fe_1, a->infinity); + rustsecp256k1_v0_8_0_fe_cmov(&r->x, &b->x, a->infinity); + rustsecp256k1_v0_8_0_fe_cmov(&r->y, &b->y, a->infinity); + rustsecp256k1_v0_8_0_fe_cmov(&r->z, &rustsecp256k1_v0_8_0_fe_one, a->infinity); r->infinity = infinity; } -static void rustsecp256k1_v0_7_0_gej_rescale(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_fe *s) { +static void rustsecp256k1_v0_8_0_gej_rescale(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_fe *s) { /* Operations: 4 mul, 1 sqr */ - rustsecp256k1_v0_7_0_fe zz; - VERIFY_CHECK(!rustsecp256k1_v0_7_0_fe_is_zero(s)); - rustsecp256k1_v0_7_0_fe_sqr(&zz, s); - rustsecp256k1_v0_7_0_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */ - rustsecp256k1_v0_7_0_fe_mul(&r->y, &r->y, &zz); - rustsecp256k1_v0_7_0_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */ - rustsecp256k1_v0_7_0_fe_mul(&r->z, &r->z, s); /* r->z *= s */ + rustsecp256k1_v0_8_0_fe zz; + VERIFY_CHECK(!rustsecp256k1_v0_8_0_fe_is_zero(s)); + rustsecp256k1_v0_8_0_fe_sqr(&zz, s); + rustsecp256k1_v0_8_0_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */ + rustsecp256k1_v0_8_0_fe_mul(&r->y, &r->y, &zz); + rustsecp256k1_v0_8_0_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */ + rustsecp256k1_v0_8_0_fe_mul(&r->z, &r->z, s); /* r->z *= s */ } -static void rustsecp256k1_v0_7_0_ge_to_storage(rustsecp256k1_v0_7_0_ge_storage *r, const rustsecp256k1_v0_7_0_ge *a) { - rustsecp256k1_v0_7_0_fe x, y; +static void rustsecp256k1_v0_8_0_ge_to_storage(rustsecp256k1_v0_8_0_ge_storage *r, const rustsecp256k1_v0_8_0_ge *a) { + rustsecp256k1_v0_8_0_fe x, y; VERIFY_CHECK(!a->infinity); x = a->x; - rustsecp256k1_v0_7_0_fe_normalize(&x); + rustsecp256k1_v0_8_0_fe_normalize(&x); y = a->y; - rustsecp256k1_v0_7_0_fe_normalize(&y); - rustsecp256k1_v0_7_0_fe_to_storage(&r->x, &x); - rustsecp256k1_v0_7_0_fe_to_storage(&r->y, &y); + rustsecp256k1_v0_8_0_fe_normalize(&y); + rustsecp256k1_v0_8_0_fe_to_storage(&r->x, &x); + rustsecp256k1_v0_8_0_fe_to_storage(&r->y, &y); } -static void rustsecp256k1_v0_7_0_ge_from_storage(rustsecp256k1_v0_7_0_ge *r, const rustsecp256k1_v0_7_0_ge_storage *a) { - rustsecp256k1_v0_7_0_fe_from_storage(&r->x, &a->x); - rustsecp256k1_v0_7_0_fe_from_storage(&r->y, &a->y); +static void rustsecp256k1_v0_8_0_ge_from_storage(rustsecp256k1_v0_8_0_ge *r, const rustsecp256k1_v0_8_0_ge_storage *a) { + rustsecp256k1_v0_8_0_fe_from_storage(&r->x, &a->x); + rustsecp256k1_v0_8_0_fe_from_storage(&r->y, &a->y); r->infinity = 0; } -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_gej_cmov(rustsecp256k1_v0_7_0_gej *r, const rustsecp256k1_v0_7_0_gej *a, int flag) { - rustsecp256k1_v0_7_0_fe_cmov(&r->x, &a->x, flag); - rustsecp256k1_v0_7_0_fe_cmov(&r->y, &a->y, flag); - rustsecp256k1_v0_7_0_fe_cmov(&r->z, &a->z, flag); +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_gej_cmov(rustsecp256k1_v0_8_0_gej *r, const rustsecp256k1_v0_8_0_gej *a, int flag) { + rustsecp256k1_v0_8_0_fe_cmov(&r->x, &a->x, flag); + rustsecp256k1_v0_8_0_fe_cmov(&r->y, &a->y, flag); + rustsecp256k1_v0_8_0_fe_cmov(&r->z, &a->z, flag); r->infinity ^= (r->infinity ^ a->infinity) & flag; } -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_ge_storage_cmov(rustsecp256k1_v0_7_0_ge_storage *r, const rustsecp256k1_v0_7_0_ge_storage *a, int flag) { - rustsecp256k1_v0_7_0_fe_storage_cmov(&r->x, &a->x, flag); - rustsecp256k1_v0_7_0_fe_storage_cmov(&r->y, &a->y, flag); +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_ge_storage_cmov(rustsecp256k1_v0_8_0_ge_storage *r, const rustsecp256k1_v0_8_0_ge_storage *a, int flag) { + rustsecp256k1_v0_8_0_fe_storage_cmov(&r->x, &a->x, flag); + rustsecp256k1_v0_8_0_fe_storage_cmov(&r->y, &a->y, flag); } -static void rustsecp256k1_v0_7_0_ge_mul_lambda(rustsecp256k1_v0_7_0_ge *r, const rustsecp256k1_v0_7_0_ge *a) { - static const rustsecp256k1_v0_7_0_fe beta = SECP256K1_FE_CONST( - 0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul, - 0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul - ); +static void rustsecp256k1_v0_8_0_ge_mul_lambda(rustsecp256k1_v0_8_0_ge *r, const rustsecp256k1_v0_8_0_ge *a) { *r = *a; - rustsecp256k1_v0_7_0_fe_mul(&r->x, &r->x, &beta); + rustsecp256k1_v0_8_0_fe_mul(&r->x, &r->x, &rustsecp256k1_v0_8_0_const_beta); } -static int rustsecp256k1_v0_7_0_ge_is_in_correct_subgroup(const rustsecp256k1_v0_7_0_ge* ge) { +static int rustsecp256k1_v0_8_0_ge_is_in_correct_subgroup(const rustsecp256k1_v0_8_0_ge* ge) { #ifdef EXHAUSTIVE_TEST_ORDER - rustsecp256k1_v0_7_0_gej out; + rustsecp256k1_v0_8_0_gej out; int i; /* A very simple EC multiplication ladder that avoids a dependency on ecmult. */ - rustsecp256k1_v0_7_0_gej_set_infinity(&out); + rustsecp256k1_v0_8_0_gej_set_infinity(&out); for (i = 0; i < 32; ++i) { - rustsecp256k1_v0_7_0_gej_double_var(&out, &out, NULL); + rustsecp256k1_v0_8_0_gej_double_var(&out, &out, NULL); if ((((uint32_t)EXHAUSTIVE_TEST_ORDER) >> (31 - i)) & 1) { - rustsecp256k1_v0_7_0_gej_add_ge_var(&out, &out, ge, NULL); + rustsecp256k1_v0_8_0_gej_add_ge_var(&out, &out, ge, NULL); } } - return rustsecp256k1_v0_7_0_gej_is_infinity(&out); + return rustsecp256k1_v0_8_0_gej_is_infinity(&out); #else (void)ge; /* The real secp256k1 group has cofactor 1, so the subgroup is the entire curve. */ diff --git a/secp256k1-sys/depend/secp256k1/src/hash.h b/secp256k1-sys/depend/secp256k1/src/hash.h index 3514443e9..6f84b0538 100644 --- a/secp256k1-sys/depend/secp256k1/src/hash.h +++ b/secp256k1-sys/depend/secp256k1/src/hash.h @@ -12,30 +12,30 @@ typedef struct { uint32_t s[8]; - uint32_t buf[16]; /* In big endian */ - size_t bytes; -} rustsecp256k1_v0_7_0_sha256; + unsigned char buf[64]; + uint64_t bytes; +} rustsecp256k1_v0_8_0_sha256; -static void rustsecp256k1_v0_7_0_sha256_initialize(rustsecp256k1_v0_7_0_sha256 *hash); -static void rustsecp256k1_v0_7_0_sha256_write(rustsecp256k1_v0_7_0_sha256 *hash, const unsigned char *data, size_t size); -static void rustsecp256k1_v0_7_0_sha256_finalize(rustsecp256k1_v0_7_0_sha256 *hash, unsigned char *out32); +static void rustsecp256k1_v0_8_0_sha256_initialize(rustsecp256k1_v0_8_0_sha256 *hash); +static void rustsecp256k1_v0_8_0_sha256_write(rustsecp256k1_v0_8_0_sha256 *hash, const unsigned char *data, size_t size); +static void rustsecp256k1_v0_8_0_sha256_finalize(rustsecp256k1_v0_8_0_sha256 *hash, unsigned char *out32); typedef struct { - rustsecp256k1_v0_7_0_sha256 inner, outer; -} rustsecp256k1_v0_7_0_hmac_sha256; + rustsecp256k1_v0_8_0_sha256 inner, outer; +} rustsecp256k1_v0_8_0_hmac_sha256; -static void rustsecp256k1_v0_7_0_hmac_sha256_initialize(rustsecp256k1_v0_7_0_hmac_sha256 *hash, const unsigned char *key, size_t size); -static void rustsecp256k1_v0_7_0_hmac_sha256_write(rustsecp256k1_v0_7_0_hmac_sha256 *hash, const unsigned char *data, size_t size); -static void rustsecp256k1_v0_7_0_hmac_sha256_finalize(rustsecp256k1_v0_7_0_hmac_sha256 *hash, unsigned char *out32); +static void rustsecp256k1_v0_8_0_hmac_sha256_initialize(rustsecp256k1_v0_8_0_hmac_sha256 *hash, const unsigned char *key, size_t size); +static void rustsecp256k1_v0_8_0_hmac_sha256_write(rustsecp256k1_v0_8_0_hmac_sha256 *hash, const unsigned char *data, size_t size); +static void rustsecp256k1_v0_8_0_hmac_sha256_finalize(rustsecp256k1_v0_8_0_hmac_sha256 *hash, unsigned char *out32); typedef struct { unsigned char v[32]; unsigned char k[32]; int retry; -} rustsecp256k1_v0_7_0_rfc6979_hmac_sha256; +} rustsecp256k1_v0_8_0_rfc6979_hmac_sha256; -static void rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_7_0_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen); -static void rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_7_0_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen); -static void rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_7_0_rfc6979_hmac_sha256 *rng); +static void rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_8_0_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen); +static void rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_8_0_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen); +static void rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_8_0_rfc6979_hmac_sha256 *rng); #endif /* SECP256K1_HASH_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/hash_impl.h b/secp256k1-sys/depend/secp256k1/src/hash_impl.h index 5e3357a14..1008fd90a 100644 --- a/secp256k1-sys/depend/secp256k1/src/hash_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/hash_impl.h @@ -28,13 +28,7 @@ (h) = t1 + t2; \ } while(0) -#if defined(SECP256K1_BIG_ENDIAN) -#define BE32(x) (x) -#elif defined(SECP256K1_LITTLE_ENDIAN) -#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) -#endif - -static void rustsecp256k1_v0_7_0_sha256_initialize(rustsecp256k1_v0_7_0_sha256 *hash) { +static void rustsecp256k1_v0_8_0_sha256_initialize(rustsecp256k1_v0_8_0_sha256 *hash) { hash->s[0] = 0x6a09e667ul; hash->s[1] = 0xbb67ae85ul; hash->s[2] = 0x3c6ef372ul; @@ -47,26 +41,26 @@ static void rustsecp256k1_v0_7_0_sha256_initialize(rustsecp256k1_v0_7_0_sha256 * } /** Perform one SHA-256 transformation, processing 16 big endian 32-bit words. */ -static void rustsecp256k1_v0_7_0_sha256_transform(uint32_t* s, const uint32_t* chunk) { +static void rustsecp256k1_v0_8_0_sha256_transform(uint32_t* s, const unsigned char* buf) { uint32_t a = s[0], b = s[1], c = s[2], d = s[3], e = s[4], f = s[5], g = s[6], h = s[7]; uint32_t w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15; - Round(a, b, c, d, e, f, g, h, 0x428a2f98, w0 = BE32(chunk[0])); - Round(h, a, b, c, d, e, f, g, 0x71374491, w1 = BE32(chunk[1])); - Round(g, h, a, b, c, d, e, f, 0xb5c0fbcf, w2 = BE32(chunk[2])); - Round(f, g, h, a, b, c, d, e, 0xe9b5dba5, w3 = BE32(chunk[3])); - Round(e, f, g, h, a, b, c, d, 0x3956c25b, w4 = BE32(chunk[4])); - Round(d, e, f, g, h, a, b, c, 0x59f111f1, w5 = BE32(chunk[5])); - Round(c, d, e, f, g, h, a, b, 0x923f82a4, w6 = BE32(chunk[6])); - Round(b, c, d, e, f, g, h, a, 0xab1c5ed5, w7 = BE32(chunk[7])); - Round(a, b, c, d, e, f, g, h, 0xd807aa98, w8 = BE32(chunk[8])); - Round(h, a, b, c, d, e, f, g, 0x12835b01, w9 = BE32(chunk[9])); - Round(g, h, a, b, c, d, e, f, 0x243185be, w10 = BE32(chunk[10])); - Round(f, g, h, a, b, c, d, e, 0x550c7dc3, w11 = BE32(chunk[11])); - Round(e, f, g, h, a, b, c, d, 0x72be5d74, w12 = BE32(chunk[12])); - Round(d, e, f, g, h, a, b, c, 0x80deb1fe, w13 = BE32(chunk[13])); - Round(c, d, e, f, g, h, a, b, 0x9bdc06a7, w14 = BE32(chunk[14])); - Round(b, c, d, e, f, g, h, a, 0xc19bf174, w15 = BE32(chunk[15])); + Round(a, b, c, d, e, f, g, h, 0x428a2f98, w0 = rustsecp256k1_v0_8_0_read_be32(&buf[0])); + Round(h, a, b, c, d, e, f, g, 0x71374491, w1 = rustsecp256k1_v0_8_0_read_be32(&buf[4])); + Round(g, h, a, b, c, d, e, f, 0xb5c0fbcf, w2 = rustsecp256k1_v0_8_0_read_be32(&buf[8])); + Round(f, g, h, a, b, c, d, e, 0xe9b5dba5, w3 = rustsecp256k1_v0_8_0_read_be32(&buf[12])); + Round(e, f, g, h, a, b, c, d, 0x3956c25b, w4 = rustsecp256k1_v0_8_0_read_be32(&buf[16])); + Round(d, e, f, g, h, a, b, c, 0x59f111f1, w5 = rustsecp256k1_v0_8_0_read_be32(&buf[20])); + Round(c, d, e, f, g, h, a, b, 0x923f82a4, w6 = rustsecp256k1_v0_8_0_read_be32(&buf[24])); + Round(b, c, d, e, f, g, h, a, 0xab1c5ed5, w7 = rustsecp256k1_v0_8_0_read_be32(&buf[28])); + Round(a, b, c, d, e, f, g, h, 0xd807aa98, w8 = rustsecp256k1_v0_8_0_read_be32(&buf[32])); + Round(h, a, b, c, d, e, f, g, 0x12835b01, w9 = rustsecp256k1_v0_8_0_read_be32(&buf[36])); + Round(g, h, a, b, c, d, e, f, 0x243185be, w10 = rustsecp256k1_v0_8_0_read_be32(&buf[40])); + Round(f, g, h, a, b, c, d, e, 0x550c7dc3, w11 = rustsecp256k1_v0_8_0_read_be32(&buf[44])); + Round(e, f, g, h, a, b, c, d, 0x72be5d74, w12 = rustsecp256k1_v0_8_0_read_be32(&buf[48])); + Round(d, e, f, g, h, a, b, c, 0x80deb1fe, w13 = rustsecp256k1_v0_8_0_read_be32(&buf[52])); + Round(c, d, e, f, g, h, a, b, 0x9bdc06a7, w14 = rustsecp256k1_v0_8_0_read_be32(&buf[56])); + Round(b, c, d, e, f, g, h, a, 0xc19bf174, w15 = rustsecp256k1_v0_8_0_read_be32(&buf[60])); Round(a, b, c, d, e, f, g, h, 0xe49b69c1, w0 += sigma1(w14) + w9 + sigma0(w1)); Round(h, a, b, c, d, e, f, g, 0xefbe4786, w1 += sigma1(w15) + w10 + sigma0(w2)); @@ -129,17 +123,17 @@ static void rustsecp256k1_v0_7_0_sha256_transform(uint32_t* s, const uint32_t* c s[7] += h; } -static void rustsecp256k1_v0_7_0_sha256_write(rustsecp256k1_v0_7_0_sha256 *hash, const unsigned char *data, size_t len) { +static void rustsecp256k1_v0_8_0_sha256_write(rustsecp256k1_v0_8_0_sha256 *hash, const unsigned char *data, size_t len) { size_t bufsize = hash->bytes & 0x3F; hash->bytes += len; VERIFY_CHECK(hash->bytes >= len); while (len >= 64 - bufsize) { /* Fill the buffer, and process it. */ size_t chunk_len = 64 - bufsize; - memcpy(((unsigned char*)hash->buf) + bufsize, data, chunk_len); + memcpy(hash->buf + bufsize, data, chunk_len); data += chunk_len; len -= chunk_len; - rustsecp256k1_v0_7_0_sha256_transform(hash->s, hash->buf); + rustsecp256k1_v0_8_0_sha256_transform(hash->s, hash->buf); bufsize = 0; } if (len) { @@ -148,78 +142,78 @@ static void rustsecp256k1_v0_7_0_sha256_write(rustsecp256k1_v0_7_0_sha256 *hash, } } -static void rustsecp256k1_v0_7_0_sha256_finalize(rustsecp256k1_v0_7_0_sha256 *hash, unsigned char *out32) { - static const unsigned char pad[64] = {0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - uint32_t sizedesc[2]; - uint32_t out[8]; - int i = 0; - sizedesc[0] = BE32(hash->bytes >> 29); - sizedesc[1] = BE32(hash->bytes << 3); - rustsecp256k1_v0_7_0_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64)); - rustsecp256k1_v0_7_0_sha256_write(hash, (const unsigned char*)sizedesc, 8); +static void rustsecp256k1_v0_8_0_sha256_finalize(rustsecp256k1_v0_8_0_sha256 *hash, unsigned char *out32) { + static const unsigned char pad[64] = {0x80}; + unsigned char sizedesc[8]; + int i; + /* The maximum message size of SHA256 is 2^64-1 bits. */ + VERIFY_CHECK(hash->bytes < ((uint64_t)1 << 61)); + rustsecp256k1_v0_8_0_write_be32(&sizedesc[0], hash->bytes >> 29); + rustsecp256k1_v0_8_0_write_be32(&sizedesc[4], hash->bytes << 3); + rustsecp256k1_v0_8_0_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64)); + rustsecp256k1_v0_8_0_sha256_write(hash, sizedesc, 8); for (i = 0; i < 8; i++) { - out[i] = BE32(hash->s[i]); + rustsecp256k1_v0_8_0_write_be32(&out32[4*i], hash->s[i]); hash->s[i] = 0; } - memcpy(out32, (const unsigned char*)out, 32); } /* Initializes a sha256 struct and writes the 64 byte string * SHA256(tag)||SHA256(tag) into it. */ -static void rustsecp256k1_v0_7_0_sha256_initialize_tagged(rustsecp256k1_v0_7_0_sha256 *hash, const unsigned char *tag, size_t taglen) { +static void rustsecp256k1_v0_8_0_sha256_initialize_tagged(rustsecp256k1_v0_8_0_sha256 *hash, const unsigned char *tag, size_t taglen) { unsigned char buf[32]; - rustsecp256k1_v0_7_0_sha256_initialize(hash); - rustsecp256k1_v0_7_0_sha256_write(hash, tag, taglen); - rustsecp256k1_v0_7_0_sha256_finalize(hash, buf); + rustsecp256k1_v0_8_0_sha256_initialize(hash); + rustsecp256k1_v0_8_0_sha256_write(hash, tag, taglen); + rustsecp256k1_v0_8_0_sha256_finalize(hash, buf); - rustsecp256k1_v0_7_0_sha256_initialize(hash); - rustsecp256k1_v0_7_0_sha256_write(hash, buf, 32); - rustsecp256k1_v0_7_0_sha256_write(hash, buf, 32); + rustsecp256k1_v0_8_0_sha256_initialize(hash); + rustsecp256k1_v0_8_0_sha256_write(hash, buf, 32); + rustsecp256k1_v0_8_0_sha256_write(hash, buf, 32); } -static void rustsecp256k1_v0_7_0_hmac_sha256_initialize(rustsecp256k1_v0_7_0_hmac_sha256 *hash, const unsigned char *key, size_t keylen) { +static void rustsecp256k1_v0_8_0_hmac_sha256_initialize(rustsecp256k1_v0_8_0_hmac_sha256 *hash, const unsigned char *key, size_t keylen) { size_t n; unsigned char rkey[64]; if (keylen <= sizeof(rkey)) { memcpy(rkey, key, keylen); memset(rkey + keylen, 0, sizeof(rkey) - keylen); } else { - rustsecp256k1_v0_7_0_sha256 sha256; - rustsecp256k1_v0_7_0_sha256_initialize(&sha256); - rustsecp256k1_v0_7_0_sha256_write(&sha256, key, keylen); - rustsecp256k1_v0_7_0_sha256_finalize(&sha256, rkey); + rustsecp256k1_v0_8_0_sha256 sha256; + rustsecp256k1_v0_8_0_sha256_initialize(&sha256); + rustsecp256k1_v0_8_0_sha256_write(&sha256, key, keylen); + rustsecp256k1_v0_8_0_sha256_finalize(&sha256, rkey); memset(rkey + 32, 0, 32); } - rustsecp256k1_v0_7_0_sha256_initialize(&hash->outer); + rustsecp256k1_v0_8_0_sha256_initialize(&hash->outer); for (n = 0; n < sizeof(rkey); n++) { rkey[n] ^= 0x5c; } - rustsecp256k1_v0_7_0_sha256_write(&hash->outer, rkey, sizeof(rkey)); + rustsecp256k1_v0_8_0_sha256_write(&hash->outer, rkey, sizeof(rkey)); - rustsecp256k1_v0_7_0_sha256_initialize(&hash->inner); + rustsecp256k1_v0_8_0_sha256_initialize(&hash->inner); for (n = 0; n < sizeof(rkey); n++) { rkey[n] ^= 0x5c ^ 0x36; } - rustsecp256k1_v0_7_0_sha256_write(&hash->inner, rkey, sizeof(rkey)); + rustsecp256k1_v0_8_0_sha256_write(&hash->inner, rkey, sizeof(rkey)); memset(rkey, 0, sizeof(rkey)); } -static void rustsecp256k1_v0_7_0_hmac_sha256_write(rustsecp256k1_v0_7_0_hmac_sha256 *hash, const unsigned char *data, size_t size) { - rustsecp256k1_v0_7_0_sha256_write(&hash->inner, data, size); +static void rustsecp256k1_v0_8_0_hmac_sha256_write(rustsecp256k1_v0_8_0_hmac_sha256 *hash, const unsigned char *data, size_t size) { + rustsecp256k1_v0_8_0_sha256_write(&hash->inner, data, size); } -static void rustsecp256k1_v0_7_0_hmac_sha256_finalize(rustsecp256k1_v0_7_0_hmac_sha256 *hash, unsigned char *out32) { +static void rustsecp256k1_v0_8_0_hmac_sha256_finalize(rustsecp256k1_v0_8_0_hmac_sha256 *hash, unsigned char *out32) { unsigned char temp[32]; - rustsecp256k1_v0_7_0_sha256_finalize(&hash->inner, temp); - rustsecp256k1_v0_7_0_sha256_write(&hash->outer, temp, 32); + rustsecp256k1_v0_8_0_sha256_finalize(&hash->inner, temp); + rustsecp256k1_v0_8_0_sha256_write(&hash->outer, temp, 32); memset(temp, 0, 32); - rustsecp256k1_v0_7_0_sha256_finalize(&hash->outer, out32); + rustsecp256k1_v0_8_0_sha256_finalize(&hash->outer, out32); } -static void rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_7_0_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen) { - rustsecp256k1_v0_7_0_hmac_sha256 hmac; +static void rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0_8_0_rfc6979_hmac_sha256 *rng, const unsigned char *key, size_t keylen) { + rustsecp256k1_v0_8_0_hmac_sha256 hmac; static const unsigned char zero[1] = {0x00}; static const unsigned char one[1] = {0x01}; @@ -227,47 +221,47 @@ static void rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_initialize(rustsecp256k1_v0 memset(rng->k, 0x00, 32); /* RFC6979 3.2.c. */ /* RFC6979 3.2.d. */ - rustsecp256k1_v0_7_0_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_7_0_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_7_0_hmac_sha256_write(&hmac, zero, 1); - rustsecp256k1_v0_7_0_hmac_sha256_write(&hmac, key, keylen); - rustsecp256k1_v0_7_0_hmac_sha256_finalize(&hmac, rng->k); - rustsecp256k1_v0_7_0_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_7_0_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_7_0_hmac_sha256_finalize(&hmac, rng->v); + rustsecp256k1_v0_8_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_8_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_8_0_hmac_sha256_write(&hmac, zero, 1); + rustsecp256k1_v0_8_0_hmac_sha256_write(&hmac, key, keylen); + rustsecp256k1_v0_8_0_hmac_sha256_finalize(&hmac, rng->k); + rustsecp256k1_v0_8_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_8_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_8_0_hmac_sha256_finalize(&hmac, rng->v); /* RFC6979 3.2.f. */ - rustsecp256k1_v0_7_0_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_7_0_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_7_0_hmac_sha256_write(&hmac, one, 1); - rustsecp256k1_v0_7_0_hmac_sha256_write(&hmac, key, keylen); - rustsecp256k1_v0_7_0_hmac_sha256_finalize(&hmac, rng->k); - rustsecp256k1_v0_7_0_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_7_0_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_7_0_hmac_sha256_finalize(&hmac, rng->v); + rustsecp256k1_v0_8_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_8_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_8_0_hmac_sha256_write(&hmac, one, 1); + rustsecp256k1_v0_8_0_hmac_sha256_write(&hmac, key, keylen); + rustsecp256k1_v0_8_0_hmac_sha256_finalize(&hmac, rng->k); + rustsecp256k1_v0_8_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_8_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_8_0_hmac_sha256_finalize(&hmac, rng->v); rng->retry = 0; } -static void rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_7_0_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen) { +static void rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_8_0_rfc6979_hmac_sha256 *rng, unsigned char *out, size_t outlen) { /* RFC6979 3.2.h. */ static const unsigned char zero[1] = {0x00}; if (rng->retry) { - rustsecp256k1_v0_7_0_hmac_sha256 hmac; - rustsecp256k1_v0_7_0_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_7_0_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_7_0_hmac_sha256_write(&hmac, zero, 1); - rustsecp256k1_v0_7_0_hmac_sha256_finalize(&hmac, rng->k); - rustsecp256k1_v0_7_0_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_7_0_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_7_0_hmac_sha256_finalize(&hmac, rng->v); + rustsecp256k1_v0_8_0_hmac_sha256 hmac; + rustsecp256k1_v0_8_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_8_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_8_0_hmac_sha256_write(&hmac, zero, 1); + rustsecp256k1_v0_8_0_hmac_sha256_finalize(&hmac, rng->k); + rustsecp256k1_v0_8_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_8_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_8_0_hmac_sha256_finalize(&hmac, rng->v); } while (outlen > 0) { - rustsecp256k1_v0_7_0_hmac_sha256 hmac; + rustsecp256k1_v0_8_0_hmac_sha256 hmac; int now = outlen; - rustsecp256k1_v0_7_0_hmac_sha256_initialize(&hmac, rng->k, 32); - rustsecp256k1_v0_7_0_hmac_sha256_write(&hmac, rng->v, 32); - rustsecp256k1_v0_7_0_hmac_sha256_finalize(&hmac, rng->v); + rustsecp256k1_v0_8_0_hmac_sha256_initialize(&hmac, rng->k, 32); + rustsecp256k1_v0_8_0_hmac_sha256_write(&hmac, rng->v, 32); + rustsecp256k1_v0_8_0_hmac_sha256_finalize(&hmac, rng->v); if (now > 32) { now = 32; } @@ -279,13 +273,12 @@ static void rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_generate(rustsecp256k1_v0_7 rng->retry = 1; } -static void rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_7_0_rfc6979_hmac_sha256 *rng) { +static void rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_finalize(rustsecp256k1_v0_8_0_rfc6979_hmac_sha256 *rng) { memset(rng->k, 0, 32); memset(rng->v, 0, 32); rng->retry = 0; } -#undef BE32 #undef Round #undef sigma1 #undef sigma0 diff --git a/secp256k1-sys/depend/secp256k1/src/int128.h b/secp256k1-sys/depend/secp256k1/src/int128.h new file mode 100644 index 000000000..1aaf1acda --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/src/int128.h @@ -0,0 +1,85 @@ +#ifndef SECP256K1_INT128_H +#define SECP256K1_INT128_H + +#include "util.h" + +#if defined(SECP256K1_WIDEMUL_INT128) +# if defined(SECP256K1_INT128_NATIVE) +# include "int128_native.h" +# elif defined(SECP256K1_INT128_STRUCT) +# include "int128_struct.h" +# else +# error "Please select int128 implementation" +# endif + +/* Construct an unsigned 128-bit value from a high and a low 64-bit value. */ +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_load(rustsecp256k1_v0_8_0_uint128 *r, uint64_t hi, uint64_t lo); + +/* Multiply two unsigned 64-bit values a and b and write the result to r. */ +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_mul(rustsecp256k1_v0_8_0_uint128 *r, uint64_t a, uint64_t b); + +/* Multiply two unsigned 64-bit values a and b and add the result to r. + * The final result is taken modulo 2^128. + */ +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_accum_mul(rustsecp256k1_v0_8_0_uint128 *r, uint64_t a, uint64_t b); + +/* Add an unsigned 64-bit value a to r. + * The final result is taken modulo 2^128. + */ +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_accum_u64(rustsecp256k1_v0_8_0_uint128 *r, uint64_t a); + +/* Unsigned (logical) right shift. + * Non-constant time in n. + */ +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_rshift(rustsecp256k1_v0_8_0_uint128 *r, unsigned int n); + +/* Return the low 64-bits of a 128-bit value as an unsigned 64-bit value. */ +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_0_u128_to_u64(const rustsecp256k1_v0_8_0_uint128 *a); + +/* Return the high 64-bits of a 128-bit value as an unsigned 64-bit value. */ +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_0_u128_hi_u64(const rustsecp256k1_v0_8_0_uint128 *a); + +/* Write an unsigned 64-bit value to r. */ +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_from_u64(rustsecp256k1_v0_8_0_uint128 *r, uint64_t a); + +/* Tests if r is strictly less than to 2^n. + * n must be strictly less than 128. + */ +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_u128_check_bits(const rustsecp256k1_v0_8_0_uint128 *r, unsigned int n); + +/* Construct an signed 128-bit value from a high and a low 64-bit value. */ +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_load(rustsecp256k1_v0_8_0_int128 *r, int64_t hi, uint64_t lo); + +/* Multiply two signed 64-bit values a and b and write the result to r. */ +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_mul(rustsecp256k1_v0_8_0_int128 *r, int64_t a, int64_t b); + +/* Multiply two signed 64-bit values a and b and add the result to r. + * Overflow or underflow from the addition is undefined behaviour. + */ +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_accum_mul(rustsecp256k1_v0_8_0_int128 *r, int64_t a, int64_t b); + +/* Compute a*d - b*c from signed 64-bit values and write the result to r. */ +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_det(rustsecp256k1_v0_8_0_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d); + +/* Signed (arithmetic) right shift. + * Non-constant time in b. + */ +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_rshift(rustsecp256k1_v0_8_0_int128 *r, unsigned int b); + +/* Return the low 64-bits of a 128-bit value interpreted as an signed 64-bit value. */ +static SECP256K1_INLINE int64_t rustsecp256k1_v0_8_0_i128_to_i64(const rustsecp256k1_v0_8_0_int128 *a); + +/* Write a signed 64-bit value to r. */ +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_from_i64(rustsecp256k1_v0_8_0_int128 *r, int64_t a); + +/* Compare two 128-bit values for equality. */ +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_i128_eq_var(const rustsecp256k1_v0_8_0_int128 *a, const rustsecp256k1_v0_8_0_int128 *b); + +/* Tests if r is equal to 2^n. + * n must be strictly less than 127. + */ +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_i128_check_pow2(const rustsecp256k1_v0_8_0_int128 *r, unsigned int n); + +#endif + +#endif diff --git a/secp256k1-sys/depend/secp256k1/src/int128_impl.h b/secp256k1-sys/depend/secp256k1/src/int128_impl.h new file mode 100644 index 000000000..cfc573408 --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/src/int128_impl.h @@ -0,0 +1,18 @@ +#ifndef SECP256K1_INT128_IMPL_H +#define SECP256K1_INT128_IMPL_H + +#include "util.h" + +#include "int128.h" + +#if defined(SECP256K1_WIDEMUL_INT128) +# if defined(SECP256K1_INT128_NATIVE) +# include "int128_native_impl.h" +# elif defined(SECP256K1_INT128_STRUCT) +# include "int128_struct_impl.h" +# else +# error "Please select int128 implementation" +# endif +#endif + +#endif diff --git a/secp256k1-sys/depend/secp256k1/src/int128_native.h b/secp256k1-sys/depend/secp256k1/src/int128_native.h new file mode 100644 index 000000000..735a0cbdf --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/src/int128_native.h @@ -0,0 +1,19 @@ +#ifndef SECP256K1_INT128_NATIVE_H +#define SECP256K1_INT128_NATIVE_H + +#include +#include "util.h" + +#if !defined(UINT128_MAX) && defined(__SIZEOF_INT128__) +SECP256K1_GNUC_EXT typedef unsigned __int128 uint128_t; +SECP256K1_GNUC_EXT typedef __int128 int128_t; +# define UINT128_MAX ((uint128_t)(-1)) +# define INT128_MAX ((int128_t)(UINT128_MAX >> 1)) +# define INT128_MIN (-INT128_MAX - 1) +/* No (U)INT128_C macros because compilers providing __int128 do not support 128-bit literals. */ +#endif + +typedef uint128_t rustsecp256k1_v0_8_0_uint128; +typedef int128_t rustsecp256k1_v0_8_0_int128; + +#endif diff --git a/secp256k1-sys/depend/secp256k1/src/int128_native_impl.h b/secp256k1-sys/depend/secp256k1/src/int128_native_impl.h new file mode 100644 index 000000000..3119742c1 --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/src/int128_native_impl.h @@ -0,0 +1,87 @@ +#ifndef SECP256K1_INT128_NATIVE_IMPL_H +#define SECP256K1_INT128_NATIVE_IMPL_H + +#include "int128.h" + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_load(rustsecp256k1_v0_8_0_uint128 *r, uint64_t hi, uint64_t lo) { + *r = (((uint128_t)hi) << 64) + lo; +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_mul(rustsecp256k1_v0_8_0_uint128 *r, uint64_t a, uint64_t b) { + *r = (uint128_t)a * b; +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_accum_mul(rustsecp256k1_v0_8_0_uint128 *r, uint64_t a, uint64_t b) { + *r += (uint128_t)a * b; +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_accum_u64(rustsecp256k1_v0_8_0_uint128 *r, uint64_t a) { + *r += a; +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_rshift(rustsecp256k1_v0_8_0_uint128 *r, unsigned int n) { + VERIFY_CHECK(n < 128); + *r >>= n; +} + +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_0_u128_to_u64(const rustsecp256k1_v0_8_0_uint128 *a) { + return (uint64_t)(*a); +} + +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_0_u128_hi_u64(const rustsecp256k1_v0_8_0_uint128 *a) { + return (uint64_t)(*a >> 64); +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_from_u64(rustsecp256k1_v0_8_0_uint128 *r, uint64_t a) { + *r = a; +} + +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_u128_check_bits(const rustsecp256k1_v0_8_0_uint128 *r, unsigned int n) { + VERIFY_CHECK(n < 128); + return (*r >> n == 0); +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_load(rustsecp256k1_v0_8_0_int128 *r, int64_t hi, uint64_t lo) { + *r = (((uint128_t)(uint64_t)hi) << 64) + lo; +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_mul(rustsecp256k1_v0_8_0_int128 *r, int64_t a, int64_t b) { + *r = (int128_t)a * b; +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_accum_mul(rustsecp256k1_v0_8_0_int128 *r, int64_t a, int64_t b) { + int128_t ab = (int128_t)a * b; + VERIFY_CHECK(0 <= ab ? *r <= INT128_MAX - ab : INT128_MIN - ab <= *r); + *r += ab; +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_det(rustsecp256k1_v0_8_0_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d) { + int128_t ad = (int128_t)a * d; + int128_t bc = (int128_t)b * c; + VERIFY_CHECK(0 <= bc ? INT128_MIN + bc <= ad : ad <= INT128_MAX + bc); + *r = ad - bc; +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_rshift(rustsecp256k1_v0_8_0_int128 *r, unsigned int n) { + VERIFY_CHECK(n < 128); + *r >>= n; +} + +static SECP256K1_INLINE int64_t rustsecp256k1_v0_8_0_i128_to_i64(const rustsecp256k1_v0_8_0_int128 *a) { + return *a; +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_from_i64(rustsecp256k1_v0_8_0_int128 *r, int64_t a) { + *r = a; +} + +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_i128_eq_var(const rustsecp256k1_v0_8_0_int128 *a, const rustsecp256k1_v0_8_0_int128 *b) { + return *a == *b; +} + +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_i128_check_pow2(const rustsecp256k1_v0_8_0_int128 *r, unsigned int n) { + VERIFY_CHECK(n < 127); + return (*r == (int128_t)1 << n); +} + +#endif diff --git a/secp256k1-sys/depend/secp256k1/src/int128_struct.h b/secp256k1-sys/depend/secp256k1/src/int128_struct.h new file mode 100644 index 000000000..5850b88f2 --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/src/int128_struct.h @@ -0,0 +1,14 @@ +#ifndef SECP256K1_INT128_STRUCT_H +#define SECP256K1_INT128_STRUCT_H + +#include +#include "util.h" + +typedef struct { + uint64_t lo; + uint64_t hi; +} rustsecp256k1_v0_8_0_uint128; + +typedef rustsecp256k1_v0_8_0_uint128 rustsecp256k1_v0_8_0_int128; + +#endif diff --git a/secp256k1-sys/depend/secp256k1/src/int128_struct_impl.h b/secp256k1-sys/depend/secp256k1/src/int128_struct_impl.h new file mode 100644 index 000000000..ebd876fac --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/src/int128_struct_impl.h @@ -0,0 +1,192 @@ +#ifndef SECP256K1_INT128_STRUCT_IMPL_H +#define SECP256K1_INT128_STRUCT_IMPL_H + +#include "int128.h" + +#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) /* MSVC */ +# include +# if defined(_M_ARM64) || defined(SECP256K1_MSVC_MULH_TEST_OVERRIDE) +/* On ARM64 MSVC, use __(u)mulh for the upper half of 64x64 multiplications. + (Define SECP256K1_MSVC_MULH_TEST_OVERRIDE to test this code path on X64, + which supports both __(u)mulh and _umul128.) */ +# if defined(SECP256K1_MSVC_MULH_TEST_OVERRIDE) +# pragma message(__FILE__ ": SECP256K1_MSVC_MULH_TEST_OVERRIDE is defined, forcing use of __(u)mulh.") +# endif +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_0_umul128(uint64_t a, uint64_t b, uint64_t* hi) { + *hi = __umulh(a, b); + return a * b; +} + +static SECP256K1_INLINE int64_t rustsecp256k1_v0_8_0_mul128(int64_t a, int64_t b, int64_t* hi) { + *hi = __mulh(a, b); + return (uint64_t)a * (uint64_t)b; +} +# else +/* On x84_64 MSVC, use native _(u)mul128 for 64x64->128 multiplications. */ +# define rustsecp256k1_v0_8_0_umul128 _umul128 +# define rustsecp256k1_v0_8_0_mul128 _mul128 +# endif +#else +/* On other systems, emulate 64x64->128 multiplications using 32x32->64 multiplications. */ +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_0_umul128(uint64_t a, uint64_t b, uint64_t* hi) { + uint64_t ll = (uint64_t)(uint32_t)a * (uint32_t)b; + uint64_t lh = (uint32_t)a * (b >> 32); + uint64_t hl = (a >> 32) * (uint32_t)b; + uint64_t hh = (a >> 32) * (b >> 32); + uint64_t mid34 = (ll >> 32) + (uint32_t)lh + (uint32_t)hl; + *hi = hh + (lh >> 32) + (hl >> 32) + (mid34 >> 32); + return (mid34 << 32) + (uint32_t)ll; +} + +static SECP256K1_INLINE int64_t rustsecp256k1_v0_8_0_mul128(int64_t a, int64_t b, int64_t* hi) { + uint64_t ll = (uint64_t)(uint32_t)a * (uint32_t)b; + int64_t lh = (uint32_t)a * (b >> 32); + int64_t hl = (a >> 32) * (uint32_t)b; + int64_t hh = (a >> 32) * (b >> 32); + uint64_t mid34 = (ll >> 32) + (uint32_t)lh + (uint32_t)hl; + *hi = hh + (lh >> 32) + (hl >> 32) + (mid34 >> 32); + return (mid34 << 32) + (uint32_t)ll; +} +#endif + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_load(rustsecp256k1_v0_8_0_uint128 *r, uint64_t hi, uint64_t lo) { + r->hi = hi; + r->lo = lo; +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_mul(rustsecp256k1_v0_8_0_uint128 *r, uint64_t a, uint64_t b) { + r->lo = rustsecp256k1_v0_8_0_umul128(a, b, &r->hi); +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_accum_mul(rustsecp256k1_v0_8_0_uint128 *r, uint64_t a, uint64_t b) { + uint64_t lo, hi; + lo = rustsecp256k1_v0_8_0_umul128(a, b, &hi); + r->lo += lo; + r->hi += hi + (r->lo < lo); +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_accum_u64(rustsecp256k1_v0_8_0_uint128 *r, uint64_t a) { + r->lo += a; + r->hi += r->lo < a; +} + +/* Unsigned (logical) right shift. + * Non-constant time in n. + */ +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_rshift(rustsecp256k1_v0_8_0_uint128 *r, unsigned int n) { + VERIFY_CHECK(n < 128); + if (n >= 64) { + r->lo = r->hi >> (n-64); + r->hi = 0; + } else if (n > 0) { + r->lo = ((1U * r->hi) << (64-n)) | r->lo >> n; + r->hi >>= n; + } +} + +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_0_u128_to_u64(const rustsecp256k1_v0_8_0_uint128 *a) { + return a->lo; +} + +static SECP256K1_INLINE uint64_t rustsecp256k1_v0_8_0_u128_hi_u64(const rustsecp256k1_v0_8_0_uint128 *a) { + return a->hi; +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_u128_from_u64(rustsecp256k1_v0_8_0_uint128 *r, uint64_t a) { + r->hi = 0; + r->lo = a; +} + +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_u128_check_bits(const rustsecp256k1_v0_8_0_uint128 *r, unsigned int n) { + VERIFY_CHECK(n < 128); + return n >= 64 ? r->hi >> (n - 64) == 0 + : r->hi == 0 && r->lo >> n == 0; +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_load(rustsecp256k1_v0_8_0_int128 *r, int64_t hi, uint64_t lo) { + r->hi = hi; + r->lo = lo; +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_mul(rustsecp256k1_v0_8_0_int128 *r, int64_t a, int64_t b) { + int64_t hi; + r->lo = (uint64_t)rustsecp256k1_v0_8_0_mul128(a, b, &hi); + r->hi = (uint64_t)hi; +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_accum_mul(rustsecp256k1_v0_8_0_int128 *r, int64_t a, int64_t b) { + int64_t hi; + uint64_t lo = (uint64_t)rustsecp256k1_v0_8_0_mul128(a, b, &hi); + r->lo += lo; + hi += r->lo < lo; + /* Verify no overflow. + * If r represents a positive value (the sign bit is not set) and the value we are adding is a positive value (the sign bit is not set), + * then we require that the resulting value also be positive (the sign bit is not set). + * Note that (X <= Y) means (X implies Y) when X and Y are boolean values (i.e. 0 or 1). + */ + VERIFY_CHECK((r->hi <= 0x7fffffffffffffffu && (uint64_t)hi <= 0x7fffffffffffffffu) <= (r->hi + (uint64_t)hi <= 0x7fffffffffffffffu)); + /* Verify no underflow. + * If r represents a negative value (the sign bit is set) and the value we are adding is a negative value (the sign bit is set), + * then we require that the resulting value also be negative (the sign bit is set). + */ + VERIFY_CHECK((r->hi > 0x7fffffffffffffffu && (uint64_t)hi > 0x7fffffffffffffffu) <= (r->hi + (uint64_t)hi > 0x7fffffffffffffffu)); + r->hi += hi; +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_dissip_mul(rustsecp256k1_v0_8_0_int128 *r, int64_t a, int64_t b) { + int64_t hi; + uint64_t lo = (uint64_t)rustsecp256k1_v0_8_0_mul128(a, b, &hi); + hi += r->lo < lo; + /* Verify no overflow. + * If r represents a positive value (the sign bit is not set) and the value we are subtracting is a negative value (the sign bit is set), + * then we require that the resulting value also be positive (the sign bit is not set). + */ + VERIFY_CHECK((r->hi <= 0x7fffffffffffffffu && (uint64_t)hi > 0x7fffffffffffffffu) <= (r->hi - (uint64_t)hi <= 0x7fffffffffffffffu)); + /* Verify no underflow. + * If r represents a negative value (the sign bit is set) and the value we are subtracting is a positive value (the sign sign bit is not set), + * then we require that the resulting value also be negative (the sign bit is set). + */ + VERIFY_CHECK((r->hi > 0x7fffffffffffffffu && (uint64_t)hi <= 0x7fffffffffffffffu) <= (r->hi - (uint64_t)hi > 0x7fffffffffffffffu)); + r->hi -= hi; + r->lo -= lo; +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_det(rustsecp256k1_v0_8_0_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d) { + rustsecp256k1_v0_8_0_i128_mul(r, a, d); + rustsecp256k1_v0_8_0_i128_dissip_mul(r, b, c); +} + +/* Signed (arithmetic) right shift. + * Non-constant time in n. + */ +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_rshift(rustsecp256k1_v0_8_0_int128 *r, unsigned int n) { + VERIFY_CHECK(n < 128); + if (n >= 64) { + r->lo = (uint64_t)((int64_t)(r->hi) >> (n-64)); + r->hi = (uint64_t)((int64_t)(r->hi) >> 63); + } else if (n > 0) { + r->lo = ((1U * r->hi) << (64-n)) | r->lo >> n; + r->hi = (uint64_t)((int64_t)(r->hi) >> n); + } +} + +static SECP256K1_INLINE int64_t rustsecp256k1_v0_8_0_i128_to_i64(const rustsecp256k1_v0_8_0_int128 *a) { + return (int64_t)a->lo; +} + +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_i128_from_i64(rustsecp256k1_v0_8_0_int128 *r, int64_t a) { + r->hi = (uint64_t)(a >> 63); + r->lo = (uint64_t)a; +} + +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_i128_eq_var(const rustsecp256k1_v0_8_0_int128 *a, const rustsecp256k1_v0_8_0_int128 *b) { + return a->hi == b->hi && a->lo == b->lo; +} + +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_i128_check_pow2(const rustsecp256k1_v0_8_0_int128 *r, unsigned int n) { + VERIFY_CHECK(n < 127); + return n >= 64 ? r->hi == (uint64_t)1 << (n - 64) && r->lo == 0 + : r->hi == 0 && r->lo == (uint64_t)1 << n; +} + +#endif diff --git a/secp256k1-sys/depend/secp256k1/src/modinv32.h b/secp256k1-sys/depend/secp256k1/src/modinv32.h index 3f70f8607..205e26f6e 100644 --- a/secp256k1-sys/depend/secp256k1/src/modinv32.h +++ b/secp256k1-sys/depend/secp256k1/src/modinv32.h @@ -18,15 +18,15 @@ * Its value is sum(v[i] * 2^(30*i), i=0..8). */ typedef struct { int32_t v[9]; -} rustsecp256k1_v0_7_0_modinv32_signed30; +} rustsecp256k1_v0_8_0_modinv32_signed30; typedef struct { /* The modulus in signed30 notation, must be odd and in [3, 2^256]. */ - rustsecp256k1_v0_7_0_modinv32_signed30 modulus; + rustsecp256k1_v0_8_0_modinv32_signed30 modulus; /* modulus^{-1} mod 2^30 */ uint32_t modulus_inv30; -} rustsecp256k1_v0_7_0_modinv32_modinfo; +} rustsecp256k1_v0_8_0_modinv32_modinfo; /* Replace x with its modular inverse mod modinfo->modulus. x must be in range [0, modulus). * If x is zero, the result will be zero as well. If not, the inverse must exist (i.e., the gcd of @@ -34,9 +34,9 @@ typedef struct { * * On output, all of x's limbs will be in [0, 2^30). */ -static void rustsecp256k1_v0_7_0_modinv32_var(rustsecp256k1_v0_7_0_modinv32_signed30 *x, const rustsecp256k1_v0_7_0_modinv32_modinfo *modinfo); +static void rustsecp256k1_v0_8_0_modinv32_var(rustsecp256k1_v0_8_0_modinv32_signed30 *x, const rustsecp256k1_v0_8_0_modinv32_modinfo *modinfo); -/* Same as rustsecp256k1_v0_7_0_modinv32_var, but constant time in x (not in the modulus). */ -static void rustsecp256k1_v0_7_0_modinv32(rustsecp256k1_v0_7_0_modinv32_signed30 *x, const rustsecp256k1_v0_7_0_modinv32_modinfo *modinfo); +/* Same as rustsecp256k1_v0_8_0_modinv32_var, but constant time in x (not in the modulus). */ +static void rustsecp256k1_v0_8_0_modinv32(rustsecp256k1_v0_8_0_modinv32_signed30 *x, const rustsecp256k1_v0_8_0_modinv32_modinfo *modinfo); #endif /* SECP256K1_MODINV32_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/modinv32_impl.h b/secp256k1-sys/depend/secp256k1/src/modinv32_impl.h index 6029288ce..fe51f74cb 100644 --- a/secp256k1-sys/depend/secp256k1/src/modinv32_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modinv32_impl.h @@ -21,10 +21,10 @@ */ #ifdef VERIFY -static const rustsecp256k1_v0_7_0_modinv32_signed30 SECP256K1_SIGNED30_ONE = {{1}}; +static const rustsecp256k1_v0_8_0_modinv32_signed30 SECP256K1_SIGNED30_ONE = {{1}}; /* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^30). */ -static void rustsecp256k1_v0_7_0_modinv32_mul_30(rustsecp256k1_v0_7_0_modinv32_signed30 *r, const rustsecp256k1_v0_7_0_modinv32_signed30 *a, int alen, int32_t factor) { +static void rustsecp256k1_v0_8_0_modinv32_mul_30(rustsecp256k1_v0_8_0_modinv32_signed30 *r, const rustsecp256k1_v0_8_0_modinv32_signed30 *a, int alen, int32_t factor) { const int32_t M30 = (int32_t)(UINT32_MAX >> 2); int64_t c = 0; int i; @@ -38,11 +38,11 @@ static void rustsecp256k1_v0_7_0_modinv32_mul_30(rustsecp256k1_v0_7_0_modinv32_s } /* Return -1 for ab*factor. A consists of alen limbs; b has 9. */ -static int rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(const rustsecp256k1_v0_7_0_modinv32_signed30 *a, int alen, const rustsecp256k1_v0_7_0_modinv32_signed30 *b, int32_t factor) { +static int rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(const rustsecp256k1_v0_8_0_modinv32_signed30 *a, int alen, const rustsecp256k1_v0_8_0_modinv32_signed30 *b, int32_t factor) { int i; - rustsecp256k1_v0_7_0_modinv32_signed30 am, bm; - rustsecp256k1_v0_7_0_modinv32_mul_30(&am, a, alen, 1); /* Normalize all but the top limb of a. */ - rustsecp256k1_v0_7_0_modinv32_mul_30(&bm, b, 9, factor); + rustsecp256k1_v0_8_0_modinv32_signed30 am, bm; + rustsecp256k1_v0_8_0_modinv32_mul_30(&am, a, alen, 1); /* Normalize all but the top limb of a. */ + rustsecp256k1_v0_8_0_modinv32_mul_30(&bm, b, 9, factor); for (i = 0; i < 8; ++i) { /* Verify that all but the top limb of a and b are normalized. */ VERIFY_CHECK(am.v[i] >> 30 == 0); @@ -60,7 +60,7 @@ static int rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(const rustsecp256k1_v0_7_0_m * to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the * process. The input must have limbs in range (-2^30,2^30). The output will have limbs in range * [0,2^30). */ -static void rustsecp256k1_v0_7_0_modinv32_normalize_30(rustsecp256k1_v0_7_0_modinv32_signed30 *r, int32_t sign, const rustsecp256k1_v0_7_0_modinv32_modinfo *modinfo) { +static void rustsecp256k1_v0_8_0_modinv32_normalize_30(rustsecp256k1_v0_8_0_modinv32_signed30 *r, int32_t sign, const rustsecp256k1_v0_8_0_modinv32_modinfo *modinfo) { const int32_t M30 = (int32_t)(UINT32_MAX >> 2); int32_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4], r5 = r->v[5], r6 = r->v[6], r7 = r->v[7], r8 = r->v[8]; @@ -73,8 +73,8 @@ static void rustsecp256k1_v0_7_0_modinv32_normalize_30(rustsecp256k1_v0_7_0_modi VERIFY_CHECK(r->v[i] >= -M30); VERIFY_CHECK(r->v[i] <= M30); } - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ #endif /* In a first step, add the modulus if the input is negative, and then negate if requested. @@ -154,8 +154,8 @@ static void rustsecp256k1_v0_7_0_modinv32_normalize_30(rustsecp256k1_v0_7_0_modi VERIFY_CHECK(r6 >> 30 == 0); VERIFY_CHECK(r7 >> 30 == 0); VERIFY_CHECK(r8 >> 30 == 0); - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 0) >= 0); /* r >= 0 */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 0) >= 0); /* r >= 0 */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ #endif } @@ -166,7 +166,7 @@ static void rustsecp256k1_v0_7_0_modinv32_normalize_30(rustsecp256k1_v0_7_0_modi */ typedef struct { int32_t u, v, q, r; -} rustsecp256k1_v0_7_0_modinv32_trans2x2; +} rustsecp256k1_v0_8_0_modinv32_trans2x2; /* Compute the transition matrix and zeta for 30 divsteps. * @@ -178,7 +178,7 @@ typedef struct { * * Implements the divsteps_n_matrix function from the explanation. */ -static int32_t rustsecp256k1_v0_7_0_modinv32_divsteps_30(int32_t zeta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_7_0_modinv32_trans2x2 *t) { +static int32_t rustsecp256k1_v0_8_0_modinv32_divsteps_30(int32_t zeta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_8_0_modinv32_trans2x2 *t) { /* u,v,q,r are the elements of the transformation matrix being built up, * starting with the identity matrix. Semantically they are signed integers * in range [-2^30,2^30], but here represented as unsigned mod 2^32. This @@ -242,7 +242,7 @@ static int32_t rustsecp256k1_v0_7_0_modinv32_divsteps_30(int32_t zeta, uint32_t * * Implements the divsteps_n_matrix_var function from the explanation. */ -static int32_t rustsecp256k1_v0_7_0_modinv32_divsteps_30_var(int32_t eta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_7_0_modinv32_trans2x2 *t) { +static int32_t rustsecp256k1_v0_8_0_modinv32_divsteps_30_var(int32_t eta, uint32_t f0, uint32_t g0, rustsecp256k1_v0_8_0_modinv32_trans2x2 *t) { /* inv256[i] = -(2*i+1)^-1 (mod 256) */ static const uint8_t inv256[128] = { 0xFF, 0x55, 0x33, 0x49, 0xC7, 0x5D, 0x3B, 0x11, 0x0F, 0xE5, 0xC3, 0x59, @@ -258,7 +258,7 @@ static int32_t rustsecp256k1_v0_7_0_modinv32_divsteps_30_var(int32_t eta, uint32 0xEF, 0xC5, 0xA3, 0x39, 0xB7, 0xCD, 0xAB, 0x01 }; - /* Transformation matrix; see comments in rustsecp256k1_v0_7_0_modinv32_divsteps_30. */ + /* Transformation matrix; see comments in rustsecp256k1_v0_8_0_modinv32_divsteps_30. */ uint32_t u = 1, v = 0, q = 0, r = 1; uint32_t f = f0, g = g0, m; uint16_t w; @@ -266,7 +266,7 @@ static int32_t rustsecp256k1_v0_7_0_modinv32_divsteps_30_var(int32_t eta, uint32 for (;;) { /* Use a sentinel bit to count zeros only up to i. */ - zeros = rustsecp256k1_v0_7_0_ctz32_var(g | (UINT32_MAX << i)); + zeros = rustsecp256k1_v0_8_0_ctz32_var(g | (UINT32_MAX << i)); /* Perform zeros divsteps at once; they all just divide g by two. */ g >>= zeros; u <<= zeros; @@ -324,17 +324,17 @@ static int32_t rustsecp256k1_v0_7_0_modinv32_divsteps_30_var(int32_t eta, uint32 * * This implements the update_de function from the explanation. */ -static void rustsecp256k1_v0_7_0_modinv32_update_de_30(rustsecp256k1_v0_7_0_modinv32_signed30 *d, rustsecp256k1_v0_7_0_modinv32_signed30 *e, const rustsecp256k1_v0_7_0_modinv32_trans2x2 *t, const rustsecp256k1_v0_7_0_modinv32_modinfo* modinfo) { +static void rustsecp256k1_v0_8_0_modinv32_update_de_30(rustsecp256k1_v0_8_0_modinv32_signed30 *d, rustsecp256k1_v0_8_0_modinv32_signed30 *e, const rustsecp256k1_v0_8_0_modinv32_trans2x2 *t, const rustsecp256k1_v0_8_0_modinv32_modinfo* modinfo) { const int32_t M30 = (int32_t)(UINT32_MAX >> 2); const int32_t u = t->u, v = t->v, q = t->q, r = t->r; int32_t di, ei, md, me, sd, se; int64_t cd, ce; int i; #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ VERIFY_CHECK((labs(u) + labs(v)) >= 0); /* |u|+|v| doesn't overflow */ VERIFY_CHECK((labs(q) + labs(r)) >= 0); /* |q|+|r| doesn't overflow */ VERIFY_CHECK((labs(u) + labs(v)) <= M30 + 1); /* |u|+|v| <= 2^30 */ @@ -375,10 +375,10 @@ static void rustsecp256k1_v0_7_0_modinv32_update_de_30(rustsecp256k1_v0_7_0_modi d->v[8] = (int32_t)cd; e->v[8] = (int32_t)ce; #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ #endif } @@ -386,7 +386,7 @@ static void rustsecp256k1_v0_7_0_modinv32_update_de_30(rustsecp256k1_v0_7_0_modi * * This implements the update_fg function from the explanation. */ -static void rustsecp256k1_v0_7_0_modinv32_update_fg_30(rustsecp256k1_v0_7_0_modinv32_signed30 *f, rustsecp256k1_v0_7_0_modinv32_signed30 *g, const rustsecp256k1_v0_7_0_modinv32_trans2x2 *t) { +static void rustsecp256k1_v0_8_0_modinv32_update_fg_30(rustsecp256k1_v0_8_0_modinv32_signed30 *f, rustsecp256k1_v0_8_0_modinv32_signed30 *g, const rustsecp256k1_v0_8_0_modinv32_trans2x2 *t) { const int32_t M30 = (int32_t)(UINT32_MAX >> 2); const int32_t u = t->u, v = t->v, q = t->q, r = t->r; int32_t fi, gi; @@ -421,7 +421,7 @@ static void rustsecp256k1_v0_7_0_modinv32_update_fg_30(rustsecp256k1_v0_7_0_modi * * This implements the update_fg function from the explanation in modinv64_impl.h. */ -static void rustsecp256k1_v0_7_0_modinv32_update_fg_30_var(int len, rustsecp256k1_v0_7_0_modinv32_signed30 *f, rustsecp256k1_v0_7_0_modinv32_signed30 *g, const rustsecp256k1_v0_7_0_modinv32_trans2x2 *t) { +static void rustsecp256k1_v0_8_0_modinv32_update_fg_30_var(int len, rustsecp256k1_v0_8_0_modinv32_signed30 *f, rustsecp256k1_v0_8_0_modinv32_signed30 *g, const rustsecp256k1_v0_8_0_modinv32_trans2x2 *t) { const int32_t M30 = (int32_t)(UINT32_MAX >> 2); const int32_t u = t->u, v = t->v, q = t->q, r = t->r; int32_t fi, gi; @@ -452,35 +452,35 @@ static void rustsecp256k1_v0_7_0_modinv32_update_fg_30_var(int len, rustsecp256k } /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */ -static void rustsecp256k1_v0_7_0_modinv32(rustsecp256k1_v0_7_0_modinv32_signed30 *x, const rustsecp256k1_v0_7_0_modinv32_modinfo *modinfo) { +static void rustsecp256k1_v0_8_0_modinv32(rustsecp256k1_v0_8_0_modinv32_signed30 *x, const rustsecp256k1_v0_8_0_modinv32_modinfo *modinfo) { /* Start with d=0, e=1, f=modulus, g=x, zeta=-1. */ - rustsecp256k1_v0_7_0_modinv32_signed30 d = {{0}}; - rustsecp256k1_v0_7_0_modinv32_signed30 e = {{1}}; - rustsecp256k1_v0_7_0_modinv32_signed30 f = modinfo->modulus; - rustsecp256k1_v0_7_0_modinv32_signed30 g = *x; + rustsecp256k1_v0_8_0_modinv32_signed30 d = {{0}}; + rustsecp256k1_v0_8_0_modinv32_signed30 e = {{1}}; + rustsecp256k1_v0_8_0_modinv32_signed30 f = modinfo->modulus; + rustsecp256k1_v0_8_0_modinv32_signed30 g = *x; int i; int32_t zeta = -1; /* zeta = -(delta+1/2); delta is initially 1/2. */ /* Do 20 iterations of 30 divsteps each = 600 divsteps. 590 suffices for 256-bit inputs. */ for (i = 0; i < 20; ++i) { /* Compute transition matrix and new zeta after 30 divsteps. */ - rustsecp256k1_v0_7_0_modinv32_trans2x2 t; - zeta = rustsecp256k1_v0_7_0_modinv32_divsteps_30(zeta, f.v[0], g.v[0], &t); + rustsecp256k1_v0_8_0_modinv32_trans2x2 t; + zeta = rustsecp256k1_v0_8_0_modinv32_divsteps_30(zeta, f.v[0], g.v[0], &t); /* Update d,e using that transition matrix. */ - rustsecp256k1_v0_7_0_modinv32_update_de_30(&d, &e, &t, modinfo); + rustsecp256k1_v0_8_0_modinv32_update_de_30(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ #endif - rustsecp256k1_v0_7_0_modinv32_update_fg_30(&f, &g, &t); + rustsecp256k1_v0_8_0_modinv32_update_fg_30(&f, &g, &t); #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ #endif } @@ -489,28 +489,28 @@ static void rustsecp256k1_v0_7_0_modinv32(rustsecp256k1_v0_7_0_modinv32_signed30 * values i.e. +/- 1, and d now contains +/- the modular inverse. */ #ifdef VERIFY /* g == 0 */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&g, 9, &SECP256K1_SIGNED30_ONE, 0) == 0); + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&g, 9, &SECP256K1_SIGNED30_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, -1) == 0 || - rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, 1) == 0 || - (rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && - rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && - (rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) == 0 || - rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) == 0))); + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, -1) == 0 || + rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, 1) == 0 || + (rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + (rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) == 0 || + rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) == 0))); #endif /* Optionally negate d, normalize to [0,modulus), and return it. */ - rustsecp256k1_v0_7_0_modinv32_normalize_30(&d, f.v[8], modinfo); + rustsecp256k1_v0_8_0_modinv32_normalize_30(&d, f.v[8], modinfo); *x = d; } /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */ -static void rustsecp256k1_v0_7_0_modinv32_var(rustsecp256k1_v0_7_0_modinv32_signed30 *x, const rustsecp256k1_v0_7_0_modinv32_modinfo *modinfo) { +static void rustsecp256k1_v0_8_0_modinv32_var(rustsecp256k1_v0_8_0_modinv32_signed30 *x, const rustsecp256k1_v0_8_0_modinv32_modinfo *modinfo) { /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */ - rustsecp256k1_v0_7_0_modinv32_signed30 d = {{0, 0, 0, 0, 0, 0, 0, 0, 0}}; - rustsecp256k1_v0_7_0_modinv32_signed30 e = {{1, 0, 0, 0, 0, 0, 0, 0, 0}}; - rustsecp256k1_v0_7_0_modinv32_signed30 f = modinfo->modulus; - rustsecp256k1_v0_7_0_modinv32_signed30 g = *x; + rustsecp256k1_v0_8_0_modinv32_signed30 d = {{0, 0, 0, 0, 0, 0, 0, 0, 0}}; + rustsecp256k1_v0_8_0_modinv32_signed30 e = {{1, 0, 0, 0, 0, 0, 0, 0, 0}}; + rustsecp256k1_v0_8_0_modinv32_signed30 f = modinfo->modulus; + rustsecp256k1_v0_8_0_modinv32_signed30 g = *x; #ifdef VERIFY int i = 0; #endif @@ -521,18 +521,18 @@ static void rustsecp256k1_v0_7_0_modinv32_var(rustsecp256k1_v0_7_0_modinv32_sign /* Do iterations of 30 divsteps each until g=0. */ while (1) { /* Compute transition matrix and new eta after 30 divsteps. */ - rustsecp256k1_v0_7_0_modinv32_trans2x2 t; - eta = rustsecp256k1_v0_7_0_modinv32_divsteps_30_var(eta, f.v[0], g.v[0], &t); + rustsecp256k1_v0_8_0_modinv32_trans2x2 t; + eta = rustsecp256k1_v0_8_0_modinv32_divsteps_30_var(eta, f.v[0], g.v[0], &t); /* Update d,e using that transition matrix. */ - rustsecp256k1_v0_7_0_modinv32_update_de_30(&d, &e, &t, modinfo); + rustsecp256k1_v0_8_0_modinv32_update_de_30(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ #endif - rustsecp256k1_v0_7_0_modinv32_update_fg_30_var(len, &f, &g, &t); + rustsecp256k1_v0_8_0_modinv32_update_fg_30_var(len, &f, &g, &t); /* If the bottom limb of g is 0, there is a chance g=0. */ if (g.v[0] == 0) { cond = 0; @@ -558,10 +558,10 @@ static void rustsecp256k1_v0_7_0_modinv32_var(rustsecp256k1_v0_7_0_modinv32_sign } #ifdef VERIFY VERIFY_CHECK(++i < 25); /* We should never need more than 25*30 = 750 divsteps */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ #endif } @@ -569,18 +569,18 @@ static void rustsecp256k1_v0_7_0_modinv32_var(rustsecp256k1_v0_7_0_modinv32_sign * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ #ifdef VERIFY /* g == 0 */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&g, len, &SECP256K1_SIGNED30_ONE, 0) == 0); + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&g, len, &SECP256K1_SIGNED30_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, -1) == 0 || - rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, 1) == 0 || - (rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && - rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && - (rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) == 0 || - rustsecp256k1_v0_7_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) == 0))); + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, -1) == 0 || + rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, 1) == 0 || + (rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + (rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) == 0 || + rustsecp256k1_v0_8_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) == 0))); #endif /* Optionally negate d, normalize to [0,modulus), and return it. */ - rustsecp256k1_v0_7_0_modinv32_normalize_30(&d, f.v[len - 1], modinfo); + rustsecp256k1_v0_8_0_modinv32_normalize_30(&d, f.v[len - 1], modinfo); *x = d; } diff --git a/secp256k1-sys/depend/secp256k1/src/modinv64.h b/secp256k1-sys/depend/secp256k1/src/modinv64.h index 46cdb013f..12bbf77b2 100644 --- a/secp256k1-sys/depend/secp256k1/src/modinv64.h +++ b/secp256k1-sys/depend/secp256k1/src/modinv64.h @@ -22,15 +22,15 @@ * Its value is sum(v[i] * 2^(62*i), i=0..4). */ typedef struct { int64_t v[5]; -} rustsecp256k1_v0_7_0_modinv64_signed62; +} rustsecp256k1_v0_8_0_modinv64_signed62; typedef struct { /* The modulus in signed62 notation, must be odd and in [3, 2^256]. */ - rustsecp256k1_v0_7_0_modinv64_signed62 modulus; + rustsecp256k1_v0_8_0_modinv64_signed62 modulus; /* modulus^{-1} mod 2^62 */ uint64_t modulus_inv62; -} rustsecp256k1_v0_7_0_modinv64_modinfo; +} rustsecp256k1_v0_8_0_modinv64_modinfo; /* Replace x with its modular inverse mod modinfo->modulus. x must be in range [0, modulus). * If x is zero, the result will be zero as well. If not, the inverse must exist (i.e., the gcd of @@ -38,9 +38,9 @@ typedef struct { * * On output, all of x's limbs will be in [0, 2^62). */ -static void rustsecp256k1_v0_7_0_modinv64_var(rustsecp256k1_v0_7_0_modinv64_signed62 *x, const rustsecp256k1_v0_7_0_modinv64_modinfo *modinfo); +static void rustsecp256k1_v0_8_0_modinv64_var(rustsecp256k1_v0_8_0_modinv64_signed62 *x, const rustsecp256k1_v0_8_0_modinv64_modinfo *modinfo); -/* Same as rustsecp256k1_v0_7_0_modinv64_var, but constant time in x (not in the modulus). */ -static void rustsecp256k1_v0_7_0_modinv64(rustsecp256k1_v0_7_0_modinv64_signed62 *x, const rustsecp256k1_v0_7_0_modinv64_modinfo *modinfo); +/* Same as rustsecp256k1_v0_8_0_modinv64_var, but constant time in x (not in the modulus). */ +static void rustsecp256k1_v0_8_0_modinv64(rustsecp256k1_v0_8_0_modinv64_signed62 *x, const rustsecp256k1_v0_8_0_modinv64_modinfo *modinfo); #endif /* SECP256K1_MODINV64_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/modinv64_impl.h b/secp256k1-sys/depend/secp256k1/src/modinv64_impl.h index 01f005814..56904b1c5 100644 --- a/secp256k1-sys/depend/secp256k1/src/modinv64_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modinv64_impl.h @@ -7,10 +7,9 @@ #ifndef SECP256K1_MODINV64_IMPL_H #define SECP256K1_MODINV64_IMPL_H +#include "int128.h" #include "modinv64.h" -#include "util.h" - /* This file implements modular inversion based on the paper "Fast constant-time gcd computation and * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang. * @@ -18,37 +17,48 @@ * implementation for N=62, using 62-bit signed limbs represented as int64_t. */ +/* Data type for transition matrices (see section 3 of explanation). + * + * t = [ u v ] + * [ q r ] + */ +typedef struct { + int64_t u, v, q, r; +} rustsecp256k1_v0_8_0_modinv64_trans2x2; + #ifdef VERIFY /* Helper function to compute the absolute value of an int64_t. * (we don't use abs/labs/llabs as it depends on the int sizes). */ -static int64_t rustsecp256k1_v0_7_0_modinv64_abs(int64_t v) { +static int64_t rustsecp256k1_v0_8_0_modinv64_abs(int64_t v) { VERIFY_CHECK(v > INT64_MIN); if (v < 0) return -v; return v; } -static const rustsecp256k1_v0_7_0_modinv64_signed62 SECP256K1_SIGNED62_ONE = {{1}}; +static const rustsecp256k1_v0_8_0_modinv64_signed62 SECP256K1_SIGNED62_ONE = {{1}}; /* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^62). */ -static void rustsecp256k1_v0_7_0_modinv64_mul_62(rustsecp256k1_v0_7_0_modinv64_signed62 *r, const rustsecp256k1_v0_7_0_modinv64_signed62 *a, int alen, int64_t factor) { +static void rustsecp256k1_v0_8_0_modinv64_mul_62(rustsecp256k1_v0_8_0_modinv64_signed62 *r, const rustsecp256k1_v0_8_0_modinv64_signed62 *a, int alen, int64_t factor) { const int64_t M62 = (int64_t)(UINT64_MAX >> 2); - int128_t c = 0; + rustsecp256k1_v0_8_0_int128 c, d; int i; + rustsecp256k1_v0_8_0_i128_from_i64(&c, 0); for (i = 0; i < 4; ++i) { - if (i < alen) c += (int128_t)a->v[i] * factor; - r->v[i] = (int64_t)c & M62; c >>= 62; + if (i < alen) rustsecp256k1_v0_8_0_i128_accum_mul(&c, a->v[i], factor); + r->v[i] = rustsecp256k1_v0_8_0_i128_to_i64(&c) & M62; rustsecp256k1_v0_8_0_i128_rshift(&c, 62); } - if (4 < alen) c += (int128_t)a->v[4] * factor; - VERIFY_CHECK(c == (int64_t)c); - r->v[4] = (int64_t)c; + if (4 < alen) rustsecp256k1_v0_8_0_i128_accum_mul(&c, a->v[4], factor); + rustsecp256k1_v0_8_0_i128_from_i64(&d, rustsecp256k1_v0_8_0_i128_to_i64(&c)); + VERIFY_CHECK(rustsecp256k1_v0_8_0_i128_eq_var(&c, &d)); + r->v[4] = rustsecp256k1_v0_8_0_i128_to_i64(&c); } /* Return -1 for ab*factor. A has alen limbs; b has 5. */ -static int rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(const rustsecp256k1_v0_7_0_modinv64_signed62 *a, int alen, const rustsecp256k1_v0_7_0_modinv64_signed62 *b, int64_t factor) { +static int rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(const rustsecp256k1_v0_8_0_modinv64_signed62 *a, int alen, const rustsecp256k1_v0_8_0_modinv64_signed62 *b, int64_t factor) { int i; - rustsecp256k1_v0_7_0_modinv64_signed62 am, bm; - rustsecp256k1_v0_7_0_modinv64_mul_62(&am, a, alen, 1); /* Normalize all but the top limb of a. */ - rustsecp256k1_v0_7_0_modinv64_mul_62(&bm, b, 5, factor); + rustsecp256k1_v0_8_0_modinv64_signed62 am, bm; + rustsecp256k1_v0_8_0_modinv64_mul_62(&am, a, alen, 1); /* Normalize all but the top limb of a. */ + rustsecp256k1_v0_8_0_modinv64_mul_62(&bm, b, 5, factor); for (i = 0; i < 4; ++i) { /* Verify that all but the top limb of a and b are normalized. */ VERIFY_CHECK(am.v[i] >> 62 == 0); @@ -60,13 +70,20 @@ static int rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(const rustsecp256k1_v0_7_0_m } return 0; } + +/* Check if the determinant of t is equal to 1 << n. */ +static int rustsecp256k1_v0_8_0_modinv64_det_check_pow2(const rustsecp256k1_v0_8_0_modinv64_trans2x2 *t, unsigned int n) { + rustsecp256k1_v0_8_0_int128 a; + rustsecp256k1_v0_8_0_i128_det(&a, t->u, t->v, t->q, t->r); + return rustsecp256k1_v0_8_0_i128_check_pow2(&a, n); +} #endif /* Take as input a signed62 number in range (-2*modulus,modulus), and add a multiple of the modulus * to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the * process. The input must have limbs in range (-2^62,2^62). The output will have limbs in range * [0,2^62). */ -static void rustsecp256k1_v0_7_0_modinv64_normalize_62(rustsecp256k1_v0_7_0_modinv64_signed62 *r, int64_t sign, const rustsecp256k1_v0_7_0_modinv64_modinfo *modinfo) { +static void rustsecp256k1_v0_8_0_modinv64_normalize_62(rustsecp256k1_v0_8_0_modinv64_signed62 *r, int64_t sign, const rustsecp256k1_v0_8_0_modinv64_modinfo *modinfo) { const int64_t M62 = (int64_t)(UINT64_MAX >> 2); int64_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4]; int64_t cond_add, cond_negate; @@ -78,8 +95,8 @@ static void rustsecp256k1_v0_7_0_modinv64_normalize_62(rustsecp256k1_v0_7_0_modi VERIFY_CHECK(r->v[i] >= -M62); VERIFY_CHECK(r->v[i] <= M62); } - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ #endif /* In a first step, add the modulus if the input is negative, and then negate if requested. @@ -131,20 +148,11 @@ static void rustsecp256k1_v0_7_0_modinv64_normalize_62(rustsecp256k1_v0_7_0_modi VERIFY_CHECK(r2 >> 62 == 0); VERIFY_CHECK(r3 >> 62 == 0); VERIFY_CHECK(r4 >> 62 == 0); - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ #endif } -/* Data type for transition matrices (see section 3 of explanation). - * - * t = [ u v ] - * [ q r ] - */ -typedef struct { - int64_t u, v, q, r; -} rustsecp256k1_v0_7_0_modinv64_trans2x2; - /* Compute the transition matrix and eta for 59 divsteps (where zeta=-(delta+1/2)). * Note that the transformation matrix is scaled by 2^62 and not 2^59. * @@ -156,7 +164,7 @@ typedef struct { * * Implements the divsteps_n_matrix function from the explanation. */ -static int64_t rustsecp256k1_v0_7_0_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_t g0, rustsecp256k1_v0_7_0_modinv64_trans2x2 *t) { +static int64_t rustsecp256k1_v0_8_0_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_t g0, rustsecp256k1_v0_8_0_modinv64_trans2x2 *t) { /* u,v,q,r are the elements of the transformation matrix being built up, * starting with the identity matrix times 8 (because the caller expects * a result scaled by 2^62). Semantically they are signed integers @@ -203,13 +211,15 @@ static int64_t rustsecp256k1_v0_7_0_modinv64_divsteps_59(int64_t zeta, uint64_t t->v = (int64_t)v; t->q = (int64_t)q; t->r = (int64_t)r; +#ifdef VERIFY /* The determinant of t must be a power of two. This guarantees that multiplication with t * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which * will be divided out again). As each divstep's individual matrix has determinant 2, the * aggregate of 59 of them will have determinant 2^59. Multiplying with the initial * 8*identity (which has determinant 2^6) means the overall outputs has determinant * 2^65. */ - VERIFY_CHECK((int128_t)t->u * t->r - (int128_t)t->v * t->q == ((int128_t)1) << 65); + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_det_check_pow2(t, 65)); +#endif return zeta; } @@ -223,8 +233,8 @@ static int64_t rustsecp256k1_v0_7_0_modinv64_divsteps_59(int64_t zeta, uint64_t * * Implements the divsteps_n_matrix_var function from the explanation. */ -static int64_t rustsecp256k1_v0_7_0_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, rustsecp256k1_v0_7_0_modinv64_trans2x2 *t) { - /* Transformation matrix; see comments in rustsecp256k1_v0_7_0_modinv64_divsteps_62. */ +static int64_t rustsecp256k1_v0_8_0_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, rustsecp256k1_v0_8_0_modinv64_trans2x2 *t) { + /* Transformation matrix; see comments in rustsecp256k1_v0_8_0_modinv64_divsteps_62. */ uint64_t u = 1, v = 0, q = 0, r = 1; uint64_t f = f0, g = g0, m; uint32_t w; @@ -232,7 +242,7 @@ static int64_t rustsecp256k1_v0_7_0_modinv64_divsteps_62_var(int64_t eta, uint64 for (;;) { /* Use a sentinel bit to count zeros only up to i. */ - zeros = rustsecp256k1_v0_7_0_ctz64_var(g | (UINT64_MAX << i)); + zeros = rustsecp256k1_v0_8_0_ctz64_var(g | (UINT64_MAX << i)); /* Perform zeros divsteps at once; they all just divide g by two. */ g >>= zeros; u <<= zeros; @@ -286,11 +296,13 @@ static int64_t rustsecp256k1_v0_7_0_modinv64_divsteps_62_var(int64_t eta, uint64 t->v = (int64_t)v; t->q = (int64_t)q; t->r = (int64_t)r; +#ifdef VERIFY /* The determinant of t must be a power of two. This guarantees that multiplication with t * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which * will be divided out again). As each divstep's individual matrix has determinant 2, the * aggregate of 62 of them will have determinant 2^62. */ - VERIFY_CHECK((int128_t)t->u * t->r - (int128_t)t->v * t->q == ((int128_t)1) << 62); + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_det_check_pow2(t, 62)); +#endif return eta; } @@ -301,22 +313,22 @@ static int64_t rustsecp256k1_v0_7_0_modinv64_divsteps_62_var(int64_t eta, uint64 * * This implements the update_de function from the explanation. */ -static void rustsecp256k1_v0_7_0_modinv64_update_de_62(rustsecp256k1_v0_7_0_modinv64_signed62 *d, rustsecp256k1_v0_7_0_modinv64_signed62 *e, const rustsecp256k1_v0_7_0_modinv64_trans2x2 *t, const rustsecp256k1_v0_7_0_modinv64_modinfo* modinfo) { +static void rustsecp256k1_v0_8_0_modinv64_update_de_62(rustsecp256k1_v0_8_0_modinv64_signed62 *d, rustsecp256k1_v0_8_0_modinv64_signed62 *e, const rustsecp256k1_v0_8_0_modinv64_trans2x2 *t, const rustsecp256k1_v0_8_0_modinv64_modinfo* modinfo) { const int64_t M62 = (int64_t)(UINT64_MAX >> 2); const int64_t d0 = d->v[0], d1 = d->v[1], d2 = d->v[2], d3 = d->v[3], d4 = d->v[4]; const int64_t e0 = e->v[0], e1 = e->v[1], e2 = e->v[2], e3 = e->v[3], e4 = e->v[4]; const int64_t u = t->u, v = t->v, q = t->q, r = t->r; int64_t md, me, sd, se; - int128_t cd, ce; + rustsecp256k1_v0_8_0_int128 cd, ce; #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ - VERIFY_CHECK((rustsecp256k1_v0_7_0_modinv64_abs(u) + rustsecp256k1_v0_7_0_modinv64_abs(v)) >= 0); /* |u|+|v| doesn't overflow */ - VERIFY_CHECK((rustsecp256k1_v0_7_0_modinv64_abs(q) + rustsecp256k1_v0_7_0_modinv64_abs(r)) >= 0); /* |q|+|r| doesn't overflow */ - VERIFY_CHECK((rustsecp256k1_v0_7_0_modinv64_abs(u) + rustsecp256k1_v0_7_0_modinv64_abs(v)) <= M62 + 1); /* |u|+|v| <= 2^62 */ - VERIFY_CHECK((rustsecp256k1_v0_7_0_modinv64_abs(q) + rustsecp256k1_v0_7_0_modinv64_abs(r)) <= M62 + 1); /* |q|+|r| <= 2^62 */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ + VERIFY_CHECK((rustsecp256k1_v0_8_0_modinv64_abs(u) + rustsecp256k1_v0_8_0_modinv64_abs(v)) >= 0); /* |u|+|v| doesn't overflow */ + VERIFY_CHECK((rustsecp256k1_v0_8_0_modinv64_abs(q) + rustsecp256k1_v0_8_0_modinv64_abs(r)) >= 0); /* |q|+|r| doesn't overflow */ + VERIFY_CHECK((rustsecp256k1_v0_8_0_modinv64_abs(u) + rustsecp256k1_v0_8_0_modinv64_abs(v)) <= M62 + 1); /* |u|+|v| <= 2^62 */ + VERIFY_CHECK((rustsecp256k1_v0_8_0_modinv64_abs(q) + rustsecp256k1_v0_8_0_modinv64_abs(r)) <= M62 + 1); /* |q|+|r| <= 2^62 */ #endif /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */ sd = d4 >> 63; @@ -324,59 +336,69 @@ static void rustsecp256k1_v0_7_0_modinv64_update_de_62(rustsecp256k1_v0_7_0_modi md = (u & sd) + (v & se); me = (q & sd) + (r & se); /* Begin computing t*[d,e]. */ - cd = (int128_t)u * d0 + (int128_t)v * e0; - ce = (int128_t)q * d0 + (int128_t)r * e0; + rustsecp256k1_v0_8_0_i128_mul(&cd, u, d0); + rustsecp256k1_v0_8_0_i128_accum_mul(&cd, v, e0); + rustsecp256k1_v0_8_0_i128_mul(&ce, q, d0); + rustsecp256k1_v0_8_0_i128_accum_mul(&ce, r, e0); /* Correct md,me so that t*[d,e]+modulus*[md,me] has 62 zero bottom bits. */ - md -= (modinfo->modulus_inv62 * (uint64_t)cd + md) & M62; - me -= (modinfo->modulus_inv62 * (uint64_t)ce + me) & M62; + md -= (modinfo->modulus_inv62 * (uint64_t)rustsecp256k1_v0_8_0_i128_to_i64(&cd) + md) & M62; + me -= (modinfo->modulus_inv62 * (uint64_t)rustsecp256k1_v0_8_0_i128_to_i64(&ce) + me) & M62; /* Update the beginning of computation for t*[d,e]+modulus*[md,me] now md,me are known. */ - cd += (int128_t)modinfo->modulus.v[0] * md; - ce += (int128_t)modinfo->modulus.v[0] * me; + rustsecp256k1_v0_8_0_i128_accum_mul(&cd, modinfo->modulus.v[0], md); + rustsecp256k1_v0_8_0_i128_accum_mul(&ce, modinfo->modulus.v[0], me); /* Verify that the low 62 bits of the computation are indeed zero, and then throw them away. */ - VERIFY_CHECK(((int64_t)cd & M62) == 0); cd >>= 62; - VERIFY_CHECK(((int64_t)ce & M62) == 0); ce >>= 62; + VERIFY_CHECK((rustsecp256k1_v0_8_0_i128_to_i64(&cd) & M62) == 0); rustsecp256k1_v0_8_0_i128_rshift(&cd, 62); + VERIFY_CHECK((rustsecp256k1_v0_8_0_i128_to_i64(&ce) & M62) == 0); rustsecp256k1_v0_8_0_i128_rshift(&ce, 62); /* Compute limb 1 of t*[d,e]+modulus*[md,me], and store it as output limb 0 (= down shift). */ - cd += (int128_t)u * d1 + (int128_t)v * e1; - ce += (int128_t)q * d1 + (int128_t)r * e1; + rustsecp256k1_v0_8_0_i128_accum_mul(&cd, u, d1); + rustsecp256k1_v0_8_0_i128_accum_mul(&cd, v, e1); + rustsecp256k1_v0_8_0_i128_accum_mul(&ce, q, d1); + rustsecp256k1_v0_8_0_i128_accum_mul(&ce, r, e1); if (modinfo->modulus.v[1]) { /* Optimize for the case where limb of modulus is zero. */ - cd += (int128_t)modinfo->modulus.v[1] * md; - ce += (int128_t)modinfo->modulus.v[1] * me; + rustsecp256k1_v0_8_0_i128_accum_mul(&cd, modinfo->modulus.v[1], md); + rustsecp256k1_v0_8_0_i128_accum_mul(&ce, modinfo->modulus.v[1], me); } - d->v[0] = (int64_t)cd & M62; cd >>= 62; - e->v[0] = (int64_t)ce & M62; ce >>= 62; + d->v[0] = rustsecp256k1_v0_8_0_i128_to_i64(&cd) & M62; rustsecp256k1_v0_8_0_i128_rshift(&cd, 62); + e->v[0] = rustsecp256k1_v0_8_0_i128_to_i64(&ce) & M62; rustsecp256k1_v0_8_0_i128_rshift(&ce, 62); /* Compute limb 2 of t*[d,e]+modulus*[md,me], and store it as output limb 1. */ - cd += (int128_t)u * d2 + (int128_t)v * e2; - ce += (int128_t)q * d2 + (int128_t)r * e2; + rustsecp256k1_v0_8_0_i128_accum_mul(&cd, u, d2); + rustsecp256k1_v0_8_0_i128_accum_mul(&cd, v, e2); + rustsecp256k1_v0_8_0_i128_accum_mul(&ce, q, d2); + rustsecp256k1_v0_8_0_i128_accum_mul(&ce, r, e2); if (modinfo->modulus.v[2]) { /* Optimize for the case where limb of modulus is zero. */ - cd += (int128_t)modinfo->modulus.v[2] * md; - ce += (int128_t)modinfo->modulus.v[2] * me; + rustsecp256k1_v0_8_0_i128_accum_mul(&cd, modinfo->modulus.v[2], md); + rustsecp256k1_v0_8_0_i128_accum_mul(&ce, modinfo->modulus.v[2], me); } - d->v[1] = (int64_t)cd & M62; cd >>= 62; - e->v[1] = (int64_t)ce & M62; ce >>= 62; + d->v[1] = rustsecp256k1_v0_8_0_i128_to_i64(&cd) & M62; rustsecp256k1_v0_8_0_i128_rshift(&cd, 62); + e->v[1] = rustsecp256k1_v0_8_0_i128_to_i64(&ce) & M62; rustsecp256k1_v0_8_0_i128_rshift(&ce, 62); /* Compute limb 3 of t*[d,e]+modulus*[md,me], and store it as output limb 2. */ - cd += (int128_t)u * d3 + (int128_t)v * e3; - ce += (int128_t)q * d3 + (int128_t)r * e3; + rustsecp256k1_v0_8_0_i128_accum_mul(&cd, u, d3); + rustsecp256k1_v0_8_0_i128_accum_mul(&cd, v, e3); + rustsecp256k1_v0_8_0_i128_accum_mul(&ce, q, d3); + rustsecp256k1_v0_8_0_i128_accum_mul(&ce, r, e3); if (modinfo->modulus.v[3]) { /* Optimize for the case where limb of modulus is zero. */ - cd += (int128_t)modinfo->modulus.v[3] * md; - ce += (int128_t)modinfo->modulus.v[3] * me; + rustsecp256k1_v0_8_0_i128_accum_mul(&cd, modinfo->modulus.v[3], md); + rustsecp256k1_v0_8_0_i128_accum_mul(&ce, modinfo->modulus.v[3], me); } - d->v[2] = (int64_t)cd & M62; cd >>= 62; - e->v[2] = (int64_t)ce & M62; ce >>= 62; + d->v[2] = rustsecp256k1_v0_8_0_i128_to_i64(&cd) & M62; rustsecp256k1_v0_8_0_i128_rshift(&cd, 62); + e->v[2] = rustsecp256k1_v0_8_0_i128_to_i64(&ce) & M62; rustsecp256k1_v0_8_0_i128_rshift(&ce, 62); /* Compute limb 4 of t*[d,e]+modulus*[md,me], and store it as output limb 3. */ - cd += (int128_t)u * d4 + (int128_t)v * e4; - ce += (int128_t)q * d4 + (int128_t)r * e4; - cd += (int128_t)modinfo->modulus.v[4] * md; - ce += (int128_t)modinfo->modulus.v[4] * me; - d->v[3] = (int64_t)cd & M62; cd >>= 62; - e->v[3] = (int64_t)ce & M62; ce >>= 62; + rustsecp256k1_v0_8_0_i128_accum_mul(&cd, u, d4); + rustsecp256k1_v0_8_0_i128_accum_mul(&cd, v, e4); + rustsecp256k1_v0_8_0_i128_accum_mul(&ce, q, d4); + rustsecp256k1_v0_8_0_i128_accum_mul(&ce, r, e4); + rustsecp256k1_v0_8_0_i128_accum_mul(&cd, modinfo->modulus.v[4], md); + rustsecp256k1_v0_8_0_i128_accum_mul(&ce, modinfo->modulus.v[4], me); + d->v[3] = rustsecp256k1_v0_8_0_i128_to_i64(&cd) & M62; rustsecp256k1_v0_8_0_i128_rshift(&cd, 62); + e->v[3] = rustsecp256k1_v0_8_0_i128_to_i64(&ce) & M62; rustsecp256k1_v0_8_0_i128_rshift(&ce, 62); /* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */ - d->v[4] = (int64_t)cd; - e->v[4] = (int64_t)ce; + d->v[4] = rustsecp256k1_v0_8_0_i128_to_i64(&cd); + e->v[4] = rustsecp256k1_v0_8_0_i128_to_i64(&ce); #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ #endif } @@ -384,41 +406,51 @@ static void rustsecp256k1_v0_7_0_modinv64_update_de_62(rustsecp256k1_v0_7_0_modi * * This implements the update_fg function from the explanation. */ -static void rustsecp256k1_v0_7_0_modinv64_update_fg_62(rustsecp256k1_v0_7_0_modinv64_signed62 *f, rustsecp256k1_v0_7_0_modinv64_signed62 *g, const rustsecp256k1_v0_7_0_modinv64_trans2x2 *t) { +static void rustsecp256k1_v0_8_0_modinv64_update_fg_62(rustsecp256k1_v0_8_0_modinv64_signed62 *f, rustsecp256k1_v0_8_0_modinv64_signed62 *g, const rustsecp256k1_v0_8_0_modinv64_trans2x2 *t) { const int64_t M62 = (int64_t)(UINT64_MAX >> 2); const int64_t f0 = f->v[0], f1 = f->v[1], f2 = f->v[2], f3 = f->v[3], f4 = f->v[4]; const int64_t g0 = g->v[0], g1 = g->v[1], g2 = g->v[2], g3 = g->v[3], g4 = g->v[4]; const int64_t u = t->u, v = t->v, q = t->q, r = t->r; - int128_t cf, cg; + rustsecp256k1_v0_8_0_int128 cf, cg; /* Start computing t*[f,g]. */ - cf = (int128_t)u * f0 + (int128_t)v * g0; - cg = (int128_t)q * f0 + (int128_t)r * g0; + rustsecp256k1_v0_8_0_i128_mul(&cf, u, f0); + rustsecp256k1_v0_8_0_i128_accum_mul(&cf, v, g0); + rustsecp256k1_v0_8_0_i128_mul(&cg, q, f0); + rustsecp256k1_v0_8_0_i128_accum_mul(&cg, r, g0); /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */ - VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62; - VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62; + VERIFY_CHECK((rustsecp256k1_v0_8_0_i128_to_i64(&cf) & M62) == 0); rustsecp256k1_v0_8_0_i128_rshift(&cf, 62); + VERIFY_CHECK((rustsecp256k1_v0_8_0_i128_to_i64(&cg) & M62) == 0); rustsecp256k1_v0_8_0_i128_rshift(&cg, 62); /* Compute limb 1 of t*[f,g], and store it as output limb 0 (= down shift). */ - cf += (int128_t)u * f1 + (int128_t)v * g1; - cg += (int128_t)q * f1 + (int128_t)r * g1; - f->v[0] = (int64_t)cf & M62; cf >>= 62; - g->v[0] = (int64_t)cg & M62; cg >>= 62; + rustsecp256k1_v0_8_0_i128_accum_mul(&cf, u, f1); + rustsecp256k1_v0_8_0_i128_accum_mul(&cf, v, g1); + rustsecp256k1_v0_8_0_i128_accum_mul(&cg, q, f1); + rustsecp256k1_v0_8_0_i128_accum_mul(&cg, r, g1); + f->v[0] = rustsecp256k1_v0_8_0_i128_to_i64(&cf) & M62; rustsecp256k1_v0_8_0_i128_rshift(&cf, 62); + g->v[0] = rustsecp256k1_v0_8_0_i128_to_i64(&cg) & M62; rustsecp256k1_v0_8_0_i128_rshift(&cg, 62); /* Compute limb 2 of t*[f,g], and store it as output limb 1. */ - cf += (int128_t)u * f2 + (int128_t)v * g2; - cg += (int128_t)q * f2 + (int128_t)r * g2; - f->v[1] = (int64_t)cf & M62; cf >>= 62; - g->v[1] = (int64_t)cg & M62; cg >>= 62; + rustsecp256k1_v0_8_0_i128_accum_mul(&cf, u, f2); + rustsecp256k1_v0_8_0_i128_accum_mul(&cf, v, g2); + rustsecp256k1_v0_8_0_i128_accum_mul(&cg, q, f2); + rustsecp256k1_v0_8_0_i128_accum_mul(&cg, r, g2); + f->v[1] = rustsecp256k1_v0_8_0_i128_to_i64(&cf) & M62; rustsecp256k1_v0_8_0_i128_rshift(&cf, 62); + g->v[1] = rustsecp256k1_v0_8_0_i128_to_i64(&cg) & M62; rustsecp256k1_v0_8_0_i128_rshift(&cg, 62); /* Compute limb 3 of t*[f,g], and store it as output limb 2. */ - cf += (int128_t)u * f3 + (int128_t)v * g3; - cg += (int128_t)q * f3 + (int128_t)r * g3; - f->v[2] = (int64_t)cf & M62; cf >>= 62; - g->v[2] = (int64_t)cg & M62; cg >>= 62; + rustsecp256k1_v0_8_0_i128_accum_mul(&cf, u, f3); + rustsecp256k1_v0_8_0_i128_accum_mul(&cf, v, g3); + rustsecp256k1_v0_8_0_i128_accum_mul(&cg, q, f3); + rustsecp256k1_v0_8_0_i128_accum_mul(&cg, r, g3); + f->v[2] = rustsecp256k1_v0_8_0_i128_to_i64(&cf) & M62; rustsecp256k1_v0_8_0_i128_rshift(&cf, 62); + g->v[2] = rustsecp256k1_v0_8_0_i128_to_i64(&cg) & M62; rustsecp256k1_v0_8_0_i128_rshift(&cg, 62); /* Compute limb 4 of t*[f,g], and store it as output limb 3. */ - cf += (int128_t)u * f4 + (int128_t)v * g4; - cg += (int128_t)q * f4 + (int128_t)r * g4; - f->v[3] = (int64_t)cf & M62; cf >>= 62; - g->v[3] = (int64_t)cg & M62; cg >>= 62; + rustsecp256k1_v0_8_0_i128_accum_mul(&cf, u, f4); + rustsecp256k1_v0_8_0_i128_accum_mul(&cf, v, g4); + rustsecp256k1_v0_8_0_i128_accum_mul(&cg, q, f4); + rustsecp256k1_v0_8_0_i128_accum_mul(&cg, r, g4); + f->v[3] = rustsecp256k1_v0_8_0_i128_to_i64(&cf) & M62; rustsecp256k1_v0_8_0_i128_rshift(&cf, 62); + g->v[3] = rustsecp256k1_v0_8_0_i128_to_i64(&cg) & M62; rustsecp256k1_v0_8_0_i128_rshift(&cg, 62); /* What remains is limb 5 of t*[f,g]; store it as output limb 4. */ - f->v[4] = (int64_t)cf; - g->v[4] = (int64_t)cg; + f->v[4] = rustsecp256k1_v0_8_0_i128_to_i64(&cf); + g->v[4] = rustsecp256k1_v0_8_0_i128_to_i64(&cg); } /* Compute (t/2^62) * [f, g], where t is a transition matrix for 62 divsteps. @@ -427,66 +459,70 @@ static void rustsecp256k1_v0_7_0_modinv64_update_fg_62(rustsecp256k1_v0_7_0_modi * * This implements the update_fg function from the explanation. */ -static void rustsecp256k1_v0_7_0_modinv64_update_fg_62_var(int len, rustsecp256k1_v0_7_0_modinv64_signed62 *f, rustsecp256k1_v0_7_0_modinv64_signed62 *g, const rustsecp256k1_v0_7_0_modinv64_trans2x2 *t) { +static void rustsecp256k1_v0_8_0_modinv64_update_fg_62_var(int len, rustsecp256k1_v0_8_0_modinv64_signed62 *f, rustsecp256k1_v0_8_0_modinv64_signed62 *g, const rustsecp256k1_v0_8_0_modinv64_trans2x2 *t) { const int64_t M62 = (int64_t)(UINT64_MAX >> 2); const int64_t u = t->u, v = t->v, q = t->q, r = t->r; int64_t fi, gi; - int128_t cf, cg; + rustsecp256k1_v0_8_0_int128 cf, cg; int i; VERIFY_CHECK(len > 0); /* Start computing t*[f,g]. */ fi = f->v[0]; gi = g->v[0]; - cf = (int128_t)u * fi + (int128_t)v * gi; - cg = (int128_t)q * fi + (int128_t)r * gi; + rustsecp256k1_v0_8_0_i128_mul(&cf, u, fi); + rustsecp256k1_v0_8_0_i128_accum_mul(&cf, v, gi); + rustsecp256k1_v0_8_0_i128_mul(&cg, q, fi); + rustsecp256k1_v0_8_0_i128_accum_mul(&cg, r, gi); /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */ - VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62; - VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62; + VERIFY_CHECK((rustsecp256k1_v0_8_0_i128_to_i64(&cf) & M62) == 0); rustsecp256k1_v0_8_0_i128_rshift(&cf, 62); + VERIFY_CHECK((rustsecp256k1_v0_8_0_i128_to_i64(&cg) & M62) == 0); rustsecp256k1_v0_8_0_i128_rshift(&cg, 62); /* Now iteratively compute limb i=1..len of t*[f,g], and store them in output limb i-1 (shifting * down by 62 bits). */ for (i = 1; i < len; ++i) { fi = f->v[i]; gi = g->v[i]; - cf += (int128_t)u * fi + (int128_t)v * gi; - cg += (int128_t)q * fi + (int128_t)r * gi; - f->v[i - 1] = (int64_t)cf & M62; cf >>= 62; - g->v[i - 1] = (int64_t)cg & M62; cg >>= 62; + rustsecp256k1_v0_8_0_i128_accum_mul(&cf, u, fi); + rustsecp256k1_v0_8_0_i128_accum_mul(&cf, v, gi); + rustsecp256k1_v0_8_0_i128_accum_mul(&cg, q, fi); + rustsecp256k1_v0_8_0_i128_accum_mul(&cg, r, gi); + f->v[i - 1] = rustsecp256k1_v0_8_0_i128_to_i64(&cf) & M62; rustsecp256k1_v0_8_0_i128_rshift(&cf, 62); + g->v[i - 1] = rustsecp256k1_v0_8_0_i128_to_i64(&cg) & M62; rustsecp256k1_v0_8_0_i128_rshift(&cg, 62); } /* What remains is limb (len) of t*[f,g]; store it as output limb (len-1). */ - f->v[len - 1] = (int64_t)cf; - g->v[len - 1] = (int64_t)cg; + f->v[len - 1] = rustsecp256k1_v0_8_0_i128_to_i64(&cf); + g->v[len - 1] = rustsecp256k1_v0_8_0_i128_to_i64(&cg); } /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */ -static void rustsecp256k1_v0_7_0_modinv64(rustsecp256k1_v0_7_0_modinv64_signed62 *x, const rustsecp256k1_v0_7_0_modinv64_modinfo *modinfo) { +static void rustsecp256k1_v0_8_0_modinv64(rustsecp256k1_v0_8_0_modinv64_signed62 *x, const rustsecp256k1_v0_8_0_modinv64_modinfo *modinfo) { /* Start with d=0, e=1, f=modulus, g=x, zeta=-1. */ - rustsecp256k1_v0_7_0_modinv64_signed62 d = {{0, 0, 0, 0, 0}}; - rustsecp256k1_v0_7_0_modinv64_signed62 e = {{1, 0, 0, 0, 0}}; - rustsecp256k1_v0_7_0_modinv64_signed62 f = modinfo->modulus; - rustsecp256k1_v0_7_0_modinv64_signed62 g = *x; + rustsecp256k1_v0_8_0_modinv64_signed62 d = {{0, 0, 0, 0, 0}}; + rustsecp256k1_v0_8_0_modinv64_signed62 e = {{1, 0, 0, 0, 0}}; + rustsecp256k1_v0_8_0_modinv64_signed62 f = modinfo->modulus; + rustsecp256k1_v0_8_0_modinv64_signed62 g = *x; int i; int64_t zeta = -1; /* zeta = -(delta+1/2); delta starts at 1/2. */ /* Do 10 iterations of 59 divsteps each = 590 divsteps. This suffices for 256-bit inputs. */ for (i = 0; i < 10; ++i) { /* Compute transition matrix and new zeta after 59 divsteps. */ - rustsecp256k1_v0_7_0_modinv64_trans2x2 t; - zeta = rustsecp256k1_v0_7_0_modinv64_divsteps_59(zeta, f.v[0], g.v[0], &t); + rustsecp256k1_v0_8_0_modinv64_trans2x2 t; + zeta = rustsecp256k1_v0_8_0_modinv64_divsteps_59(zeta, f.v[0], g.v[0], &t); /* Update d,e using that transition matrix. */ - rustsecp256k1_v0_7_0_modinv64_update_de_62(&d, &e, &t, modinfo); + rustsecp256k1_v0_8_0_modinv64_update_de_62(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ #endif - rustsecp256k1_v0_7_0_modinv64_update_fg_62(&f, &g, &t); + rustsecp256k1_v0_8_0_modinv64_update_fg_62(&f, &g, &t); #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ #endif } @@ -495,28 +531,28 @@ static void rustsecp256k1_v0_7_0_modinv64(rustsecp256k1_v0_7_0_modinv64_signed62 * values i.e. +/- 1, and d now contains +/- the modular inverse. */ #ifdef VERIFY /* g == 0 */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0); + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, -1) == 0 || - rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, 1) == 0 || - (rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && - rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && - (rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 || - rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0))); + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, -1) == 0 || + rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, 1) == 0 || + (rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + (rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 || + rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0))); #endif /* Optionally negate d, normalize to [0,modulus), and return it. */ - rustsecp256k1_v0_7_0_modinv64_normalize_62(&d, f.v[4], modinfo); + rustsecp256k1_v0_8_0_modinv64_normalize_62(&d, f.v[4], modinfo); *x = d; } /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */ -static void rustsecp256k1_v0_7_0_modinv64_var(rustsecp256k1_v0_7_0_modinv64_signed62 *x, const rustsecp256k1_v0_7_0_modinv64_modinfo *modinfo) { +static void rustsecp256k1_v0_8_0_modinv64_var(rustsecp256k1_v0_8_0_modinv64_signed62 *x, const rustsecp256k1_v0_8_0_modinv64_modinfo *modinfo) { /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */ - rustsecp256k1_v0_7_0_modinv64_signed62 d = {{0, 0, 0, 0, 0}}; - rustsecp256k1_v0_7_0_modinv64_signed62 e = {{1, 0, 0, 0, 0}}; - rustsecp256k1_v0_7_0_modinv64_signed62 f = modinfo->modulus; - rustsecp256k1_v0_7_0_modinv64_signed62 g = *x; + rustsecp256k1_v0_8_0_modinv64_signed62 d = {{0, 0, 0, 0, 0}}; + rustsecp256k1_v0_8_0_modinv64_signed62 e = {{1, 0, 0, 0, 0}}; + rustsecp256k1_v0_8_0_modinv64_signed62 f = modinfo->modulus; + rustsecp256k1_v0_8_0_modinv64_signed62 g = *x; #ifdef VERIFY int i = 0; #endif @@ -527,18 +563,18 @@ static void rustsecp256k1_v0_7_0_modinv64_var(rustsecp256k1_v0_7_0_modinv64_sign /* Do iterations of 62 divsteps each until g=0. */ while (1) { /* Compute transition matrix and new eta after 62 divsteps. */ - rustsecp256k1_v0_7_0_modinv64_trans2x2 t; - eta = rustsecp256k1_v0_7_0_modinv64_divsteps_62_var(eta, f.v[0], g.v[0], &t); + rustsecp256k1_v0_8_0_modinv64_trans2x2 t; + eta = rustsecp256k1_v0_8_0_modinv64_divsteps_62_var(eta, f.v[0], g.v[0], &t); /* Update d,e using that transition matrix. */ - rustsecp256k1_v0_7_0_modinv64_update_de_62(&d, &e, &t, modinfo); + rustsecp256k1_v0_8_0_modinv64_update_de_62(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ #endif - rustsecp256k1_v0_7_0_modinv64_update_fg_62_var(len, &f, &g, &t); + rustsecp256k1_v0_8_0_modinv64_update_fg_62_var(len, &f, &g, &t); /* If the bottom limb of g is zero, there is a chance that g=0. */ if (g.v[0] == 0) { cond = 0; @@ -564,10 +600,10 @@ static void rustsecp256k1_v0_7_0_modinv64_var(rustsecp256k1_v0_7_0_modinv64_sign } #ifdef VERIFY VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ #endif } @@ -575,18 +611,18 @@ static void rustsecp256k1_v0_7_0_modinv64_var(rustsecp256k1_v0_7_0_modinv64_sign * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ #ifdef VERIFY /* g == 0 */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0); + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ - VERIFY_CHECK(rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, -1) == 0 || - rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, 1) == 0 || - (rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && - rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && - (rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 || - rustsecp256k1_v0_7_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0))); + VERIFY_CHECK(rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, -1) == 0 || + rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, 1) == 0 || + (rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + (rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 || + rustsecp256k1_v0_8_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0))); #endif /* Optionally negate d, normalize to [0,modulus), and return it. */ - rustsecp256k1_v0_7_0_modinv64_normalize_62(&d, f.v[len - 1], modinfo); + rustsecp256k1_v0_8_0_modinv64_normalize_62(&d, f.v[len - 1], modinfo); *x = d; } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/Makefile.am.include b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/Makefile.am.include index 09277537e..9d2aeb334 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/Makefile.am.include +++ b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/Makefile.am.include @@ -1,4 +1,4 @@ -include_HEADERS += include/rustsecp256k1_v0_7_0_ecdh.h +include_HEADERS += include/rustsecp256k1_v0_8_0_ecdh.h noinst_HEADERS += src/modules/ecdh/main_impl.h noinst_HEADERS += src/modules/ecdh/tests_impl.h noinst_HEADERS += src/modules/ecdh/bench_impl.h diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/bench_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/bench_impl.h index 1360b57bc..1f25c4235 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/bench_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/bench_impl.h @@ -7,11 +7,11 @@ #ifndef SECP256K1_MODULE_ECDH_BENCH_H #define SECP256K1_MODULE_ECDH_BENCH_H -#include "../include/secp256k1_ecdh.h" +#include "../../../include/secp256k1_ecdh.h" typedef struct { - rustsecp256k1_v0_7_0_context *ctx; - rustsecp256k1_v0_7_0_pubkey point; + rustsecp256k1_v0_8_0_context *ctx; + rustsecp256k1_v0_8_0_pubkey point; unsigned char scalar[32]; } bench_ecdh_data; @@ -29,7 +29,7 @@ static void bench_ecdh_setup(void* arg) { for (i = 0; i < 32; i++) { data->scalar[i] = i + 1; } - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(data->ctx, &data->point, point, sizeof(point)) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(data->ctx, &data->point, point, sizeof(point)) == 1); } static void bench_ecdh(void* arg, int iters) { @@ -38,7 +38,7 @@ static void bench_ecdh(void* arg, int iters) { bench_ecdh_data *data = (bench_ecdh_data*)arg; for (i = 0; i < iters; i++) { - CHECK(rustsecp256k1_v0_7_0_ecdh(data->ctx, res, &data->point, data->scalar, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdh(data->ctx, res, &data->point, data->scalar, NULL, NULL) == 1); } } @@ -47,11 +47,11 @@ void run_ecdh_bench(int iters, int argc, char** argv) { int d = argc == 1; /* create a context with no capabilities */ - data.ctx = rustsecp256k1_v0_7_0_context_create(SECP256K1_FLAGS_TYPE_CONTEXT); + data.ctx = rustsecp256k1_v0_8_0_context_create(SECP256K1_FLAGS_TYPE_CONTEXT); if (d || have_flag(argc, argv, "ecdh")) run_benchmark("ecdh", bench_ecdh, bench_ecdh_setup, NULL, &data, 10, iters); - rustsecp256k1_v0_7_0_context_destroy(data.ctx); + rustsecp256k1_v0_8_0_context_destroy(data.ctx); } #endif /* SECP256K1_MODULE_ECDH_BENCH_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/main_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/main_impl.h index e6c8b4843..6ff593f4f 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/main_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/main_impl.h @@ -12,26 +12,26 @@ static int ecdh_hash_function_sha256(unsigned char *output, const unsigned char *x32, const unsigned char *y32, void *data) { unsigned char version = (y32[31] & 0x01) | 0x02; - rustsecp256k1_v0_7_0_sha256 sha; + rustsecp256k1_v0_8_0_sha256 sha; (void)data; - rustsecp256k1_v0_7_0_sha256_initialize(&sha); - rustsecp256k1_v0_7_0_sha256_write(&sha, &version, 1); - rustsecp256k1_v0_7_0_sha256_write(&sha, x32, 32); - rustsecp256k1_v0_7_0_sha256_finalize(&sha, output); + rustsecp256k1_v0_8_0_sha256_initialize(&sha); + rustsecp256k1_v0_8_0_sha256_write(&sha, &version, 1); + rustsecp256k1_v0_8_0_sha256_write(&sha, x32, 32); + rustsecp256k1_v0_8_0_sha256_finalize(&sha, output); return 1; } -const rustsecp256k1_v0_7_0_ecdh_hash_function rustsecp256k1_v0_7_0_ecdh_hash_function_sha256 = ecdh_hash_function_sha256; -const rustsecp256k1_v0_7_0_ecdh_hash_function rustsecp256k1_v0_7_0_ecdh_hash_function_default = ecdh_hash_function_sha256; +const rustsecp256k1_v0_8_0_ecdh_hash_function rustsecp256k1_v0_8_0_ecdh_hash_function_sha256 = ecdh_hash_function_sha256; +const rustsecp256k1_v0_8_0_ecdh_hash_function rustsecp256k1_v0_8_0_ecdh_hash_function_default = ecdh_hash_function_sha256; -int rustsecp256k1_v0_7_0_ecdh(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *output, const rustsecp256k1_v0_7_0_pubkey *point, const unsigned char *scalar, rustsecp256k1_v0_7_0_ecdh_hash_function hashfp, void *data) { +int rustsecp256k1_v0_8_0_ecdh(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output, const rustsecp256k1_v0_8_0_pubkey *point, const unsigned char *scalar, rustsecp256k1_v0_8_0_ecdh_hash_function hashfp, void *data) { int ret = 0; int overflow = 0; - rustsecp256k1_v0_7_0_gej res; - rustsecp256k1_v0_7_0_ge pt; - rustsecp256k1_v0_7_0_scalar s; + rustsecp256k1_v0_8_0_gej res; + rustsecp256k1_v0_8_0_ge pt; + rustsecp256k1_v0_8_0_scalar s; unsigned char x[32]; unsigned char y[32]; @@ -41,29 +41,29 @@ int rustsecp256k1_v0_7_0_ecdh(const rustsecp256k1_v0_7_0_context* ctx, unsigned ARG_CHECK(scalar != NULL); if (hashfp == NULL) { - hashfp = rustsecp256k1_v0_7_0_ecdh_hash_function_default; + hashfp = rustsecp256k1_v0_8_0_ecdh_hash_function_default; } - rustsecp256k1_v0_7_0_pubkey_load(ctx, &pt, point); - rustsecp256k1_v0_7_0_scalar_set_b32(&s, scalar, &overflow); + rustsecp256k1_v0_8_0_pubkey_load(ctx, &pt, point); + rustsecp256k1_v0_8_0_scalar_set_b32(&s, scalar, &overflow); - overflow |= rustsecp256k1_v0_7_0_scalar_is_zero(&s); - rustsecp256k1_v0_7_0_scalar_cmov(&s, &rustsecp256k1_v0_7_0_scalar_one, overflow); + overflow |= rustsecp256k1_v0_8_0_scalar_is_zero(&s); + rustsecp256k1_v0_8_0_scalar_cmov(&s, &rustsecp256k1_v0_8_0_scalar_one, overflow); - rustsecp256k1_v0_7_0_ecmult_const(&res, &pt, &s, 256); - rustsecp256k1_v0_7_0_ge_set_gej(&pt, &res); + rustsecp256k1_v0_8_0_ecmult_const(&res, &pt, &s, 256); + rustsecp256k1_v0_8_0_ge_set_gej(&pt, &res); /* Compute a hash of the point */ - rustsecp256k1_v0_7_0_fe_normalize(&pt.x); - rustsecp256k1_v0_7_0_fe_normalize(&pt.y); - rustsecp256k1_v0_7_0_fe_get_b32(x, &pt.x); - rustsecp256k1_v0_7_0_fe_get_b32(y, &pt.y); + rustsecp256k1_v0_8_0_fe_normalize(&pt.x); + rustsecp256k1_v0_8_0_fe_normalize(&pt.y); + rustsecp256k1_v0_8_0_fe_get_b32(x, &pt.x); + rustsecp256k1_v0_8_0_fe_get_b32(y, &pt.y); ret = hashfp(output, x, y, data); memset(x, 0, 32); memset(y, 0, 32); - rustsecp256k1_v0_7_0_scalar_clear(&s); + rustsecp256k1_v0_8_0_scalar_clear(&s); return !!ret & !overflow; } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/tests_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/tests_impl.h index 2fabd8e63..2f6703e92 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/ecdh/tests_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/ecdh/tests_impl.h @@ -26,71 +26,71 @@ int ecdh_hash_function_custom(unsigned char *output, const unsigned char *x, con void test_ecdh_api(void) { /* Setup context that just counts errors */ - rustsecp256k1_v0_7_0_context *tctx = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_SIGN); - rustsecp256k1_v0_7_0_pubkey point; + rustsecp256k1_v0_8_0_context *tctx = rustsecp256k1_v0_8_0_context_create(SECP256K1_CONTEXT_NONE); + rustsecp256k1_v0_8_0_pubkey point; unsigned char res[32]; unsigned char s_one[32] = { 0 }; int32_t ecount = 0; s_one[31] = 1; - rustsecp256k1_v0_7_0_context_set_error_callback(tctx, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_illegal_callback(tctx, counting_illegal_callback_fn, &ecount); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(tctx, &point, s_one) == 1); + rustsecp256k1_v0_8_0_context_set_error_callback(tctx, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_8_0_context_set_illegal_callback(tctx, counting_illegal_callback_fn, &ecount); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(tctx, &point, s_one) == 1); /* Check all NULLs are detected */ - CHECK(rustsecp256k1_v0_7_0_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_ecdh(tctx, NULL, &point, s_one, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdh(tctx, NULL, &point, s_one, NULL, NULL) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_ecdh(tctx, res, NULL, s_one, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdh(tctx, res, NULL, s_one, NULL, NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_ecdh(tctx, res, &point, NULL, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdh(tctx, res, &point, NULL, NULL, NULL) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_7_0_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1); CHECK(ecount == 3); /* Cleanup */ - rustsecp256k1_v0_7_0_context_destroy(tctx); + rustsecp256k1_v0_8_0_context_destroy(tctx); } void test_ecdh_generator_basepoint(void) { unsigned char s_one[32] = { 0 }; - rustsecp256k1_v0_7_0_pubkey point[2]; + rustsecp256k1_v0_8_0_pubkey point[2]; int i; s_one[31] = 1; /* Check against pubkey creation when the basepoint is the generator */ - for (i = 0; i < 100; ++i) { - rustsecp256k1_v0_7_0_sha256 sha; + for (i = 0; i < 2 * count; ++i) { + rustsecp256k1_v0_8_0_sha256 sha; unsigned char s_b32[32]; unsigned char output_ecdh[65]; unsigned char output_ser[32]; unsigned char point_ser[65]; size_t point_ser_len = sizeof(point_ser); - rustsecp256k1_v0_7_0_scalar s; + rustsecp256k1_v0_8_0_scalar s; random_scalar_order(&s); - rustsecp256k1_v0_7_0_scalar_get_b32(s_b32, &s); + rustsecp256k1_v0_8_0_scalar_get_b32(s_b32, &s); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &point[0], s_one) == 1); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &point[1], s_b32) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &point[0], s_one) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &point[1], s_b32) == 1); /* compute using ECDH function with custom hash function */ - CHECK(rustsecp256k1_v0_7_0_ecdh(ctx, output_ecdh, &point[0], s_b32, ecdh_hash_function_custom, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdh(ctx, output_ecdh, &point[0], s_b32, ecdh_hash_function_custom, NULL) == 1); /* compute "explicitly" */ - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_UNCOMPRESSED) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_UNCOMPRESSED) == 1); /* compare */ - CHECK(rustsecp256k1_v0_7_0_memcmp_var(output_ecdh, point_ser, 65) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(output_ecdh, point_ser, 65) == 0); /* compute using ECDH function with default hash function */ - CHECK(rustsecp256k1_v0_7_0_ecdh(ctx, output_ecdh, &point[0], s_b32, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdh(ctx, output_ecdh, &point[0], s_b32, NULL, NULL) == 1); /* compute "explicitly" */ - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_COMPRESSED) == 1); - rustsecp256k1_v0_7_0_sha256_initialize(&sha); - rustsecp256k1_v0_7_0_sha256_write(&sha, point_ser, point_ser_len); - rustsecp256k1_v0_7_0_sha256_finalize(&sha, output_ser); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_COMPRESSED) == 1); + rustsecp256k1_v0_8_0_sha256_initialize(&sha); + rustsecp256k1_v0_8_0_sha256_write(&sha, point_ser, point_ser_len); + rustsecp256k1_v0_8_0_sha256_finalize(&sha, output_ser); /* compare */ - CHECK(rustsecp256k1_v0_7_0_memcmp_var(output_ecdh, output_ser, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(output_ecdh, output_ser, 32) == 0); } } @@ -104,29 +104,62 @@ void test_bad_scalar(void) { }; unsigned char s_rand[32] = { 0 }; unsigned char output[32]; - rustsecp256k1_v0_7_0_scalar rand; - rustsecp256k1_v0_7_0_pubkey point; + rustsecp256k1_v0_8_0_scalar rand; + rustsecp256k1_v0_8_0_pubkey point; /* Create random point */ random_scalar_order(&rand); - rustsecp256k1_v0_7_0_scalar_get_b32(s_rand, &rand); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &point, s_rand) == 1); + rustsecp256k1_v0_8_0_scalar_get_b32(s_rand, &rand); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &point, s_rand) == 1); /* Try to multiply it by bad values */ - CHECK(rustsecp256k1_v0_7_0_ecdh(ctx, output, &point, s_zero, NULL, NULL) == 0); - CHECK(rustsecp256k1_v0_7_0_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdh(ctx, output, &point, s_zero, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 0); /* ...and a good one */ s_overflow[31] -= 1; - CHECK(rustsecp256k1_v0_7_0_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdh(ctx, output, &point, s_overflow, NULL, NULL) == 1); /* Hash function failure results in ecdh failure */ - CHECK(rustsecp256k1_v0_7_0_ecdh(ctx, output, &point, s_overflow, ecdh_hash_function_test_fail, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdh(ctx, output, &point, s_overflow, ecdh_hash_function_test_fail, NULL) == 0); +} + +/** Test that ECDH(sG, 1/s) == ECDH((1/s)G, s) == ECDH(G, 1) for a few random s. */ +void test_result_basepoint(void) { + rustsecp256k1_v0_8_0_pubkey point; + rustsecp256k1_v0_8_0_scalar rand; + unsigned char s[32]; + unsigned char s_inv[32]; + unsigned char out[32]; + unsigned char out_inv[32]; + unsigned char out_base[32]; + int i; + + unsigned char s_one[32] = { 0 }; + s_one[31] = 1; + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &point, s_one) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdh(ctx, out_base, &point, s_one, NULL, NULL) == 1); + + for (i = 0; i < 2 * count; i++) { + random_scalar_order(&rand); + rustsecp256k1_v0_8_0_scalar_get_b32(s, &rand); + rustsecp256k1_v0_8_0_scalar_inverse(&rand, &rand); + rustsecp256k1_v0_8_0_scalar_get_b32(s_inv, &rand); + + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &point, s) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdh(ctx, out, &point, s_inv, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(out, out_base, 32) == 0); + + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &point, s_inv) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdh(ctx, out_inv, &point, s, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(out_inv, out_base, 32) == 0); + } } void run_ecdh_tests(void) { test_ecdh_api(); test_ecdh_generator_basepoint(); test_bad_scalar(); + test_result_basepoint(); } #endif /* SECP256K1_MODULE_ECDH_TESTS_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/Makefile.am.include b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/Makefile.am.include index 85cbb025f..8a1e10170 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/Makefile.am.include +++ b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/Makefile.am.include @@ -1,4 +1,4 @@ -include_HEADERS += include/rustsecp256k1_v0_7_0_extrakeys.h +include_HEADERS += include/rustsecp256k1_v0_8_0_extrakeys.h noinst_HEADERS += src/modules/extrakeys/tests_impl.h noinst_HEADERS += src/modules/extrakeys/tests_exhaustive_impl.h noinst_HEADERS += src/modules/extrakeys/main_impl.h diff --git a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/main_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/main_impl.h index d817dcf47..e48909678 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/main_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/main_impl.h @@ -10,54 +10,54 @@ #include "../../../include/secp256k1.h" #include "../../../include/secp256k1_extrakeys.h" -static SECP256K1_INLINE int rustsecp256k1_v0_7_0_xonly_pubkey_load(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_ge *ge, const rustsecp256k1_v0_7_0_xonly_pubkey *pubkey) { - return rustsecp256k1_v0_7_0_pubkey_load(ctx, ge, (const rustsecp256k1_v0_7_0_pubkey *) pubkey); +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_xonly_pubkey_load(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_ge *ge, const rustsecp256k1_v0_8_0_xonly_pubkey *pubkey) { + return rustsecp256k1_v0_8_0_pubkey_load(ctx, ge, (const rustsecp256k1_v0_8_0_pubkey *) pubkey); } -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_xonly_pubkey_save(rustsecp256k1_v0_7_0_xonly_pubkey *pubkey, rustsecp256k1_v0_7_0_ge *ge) { - rustsecp256k1_v0_7_0_pubkey_save((rustsecp256k1_v0_7_0_pubkey *) pubkey, ge); +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_xonly_pubkey_save(rustsecp256k1_v0_8_0_xonly_pubkey *pubkey, rustsecp256k1_v0_8_0_ge *ge) { + rustsecp256k1_v0_8_0_pubkey_save((rustsecp256k1_v0_8_0_pubkey *) pubkey, ge); } -int rustsecp256k1_v0_7_0_xonly_pubkey_parse(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_xonly_pubkey *pubkey, const unsigned char *input32) { - rustsecp256k1_v0_7_0_ge pk; - rustsecp256k1_v0_7_0_fe x; +int rustsecp256k1_v0_8_0_xonly_pubkey_parse(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_xonly_pubkey *pubkey, const unsigned char *input32) { + rustsecp256k1_v0_8_0_ge pk; + rustsecp256k1_v0_8_0_fe x; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); ARG_CHECK(input32 != NULL); - if (!rustsecp256k1_v0_7_0_fe_set_b32(&x, input32)) { + if (!rustsecp256k1_v0_8_0_fe_set_b32(&x, input32)) { return 0; } - if (!rustsecp256k1_v0_7_0_ge_set_xo_var(&pk, &x, 0)) { + if (!rustsecp256k1_v0_8_0_ge_set_xo_var(&pk, &x, 0)) { return 0; } - if (!rustsecp256k1_v0_7_0_ge_is_in_correct_subgroup(&pk)) { + if (!rustsecp256k1_v0_8_0_ge_is_in_correct_subgroup(&pk)) { return 0; } - rustsecp256k1_v0_7_0_xonly_pubkey_save(pubkey, &pk); + rustsecp256k1_v0_8_0_xonly_pubkey_save(pubkey, &pk); return 1; } -int rustsecp256k1_v0_7_0_xonly_pubkey_serialize(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *output32, const rustsecp256k1_v0_7_0_xonly_pubkey *pubkey) { - rustsecp256k1_v0_7_0_ge pk; +int rustsecp256k1_v0_8_0_xonly_pubkey_serialize(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output32, const rustsecp256k1_v0_8_0_xonly_pubkey *pubkey) { + rustsecp256k1_v0_8_0_ge pk; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output32 != NULL); memset(output32, 0, 32); ARG_CHECK(pubkey != NULL); - if (!rustsecp256k1_v0_7_0_xonly_pubkey_load(ctx, &pk, pubkey)) { + if (!rustsecp256k1_v0_8_0_xonly_pubkey_load(ctx, &pk, pubkey)) { return 0; } - rustsecp256k1_v0_7_0_fe_get_b32(output32, &pk.x); + rustsecp256k1_v0_8_0_fe_get_b32(output32, &pk.x); return 1; } -int rustsecp256k1_v0_7_0_xonly_pubkey_cmp(const rustsecp256k1_v0_7_0_context* ctx, const rustsecp256k1_v0_7_0_xonly_pubkey* pk0, const rustsecp256k1_v0_7_0_xonly_pubkey* pk1) { +int rustsecp256k1_v0_8_0_xonly_pubkey_cmp(const rustsecp256k1_v0_8_0_context* ctx, const rustsecp256k1_v0_8_0_xonly_pubkey* pk0, const rustsecp256k1_v0_8_0_xonly_pubkey* pk1) { unsigned char out[2][32]; - const rustsecp256k1_v0_7_0_xonly_pubkey* pk[2]; + const rustsecp256k1_v0_8_0_xonly_pubkey* pk[2]; int i; VERIFY_CHECK(ctx != NULL); @@ -70,7 +70,7 @@ int rustsecp256k1_v0_7_0_xonly_pubkey_cmp(const rustsecp256k1_v0_7_0_context* ct * pubkeys are involved and prevents edge cases such as sorting * algorithms that use this function and do not terminate as a * result. */ - if (!rustsecp256k1_v0_7_0_xonly_pubkey_serialize(ctx, out[i], pk[i])) { + if (!rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, out[i], pk[i])) { /* Note that xonly_pubkey_serialize should already set the output to * zero in that case, but it's not guaranteed by the API, we can't * test it and writing a VERIFY_CHECK is more complex than @@ -78,44 +78,44 @@ int rustsecp256k1_v0_7_0_xonly_pubkey_cmp(const rustsecp256k1_v0_7_0_context* ct memset(out[i], 0, sizeof(out[i])); } } - return rustsecp256k1_v0_7_0_memcmp_var(out[0], out[1], sizeof(out[1])); + return rustsecp256k1_v0_8_0_memcmp_var(out[0], out[1], sizeof(out[1])); } /** Keeps a group element as is if it has an even Y and otherwise negates it. * y_parity is set to 0 in the former case and to 1 in the latter case. * Requires that the coordinates of r are normalized. */ -static int rustsecp256k1_v0_7_0_extrakeys_ge_even_y(rustsecp256k1_v0_7_0_ge *r) { +static int rustsecp256k1_v0_8_0_extrakeys_ge_even_y(rustsecp256k1_v0_8_0_ge *r) { int y_parity = 0; - VERIFY_CHECK(!rustsecp256k1_v0_7_0_ge_is_infinity(r)); + VERIFY_CHECK(!rustsecp256k1_v0_8_0_ge_is_infinity(r)); - if (rustsecp256k1_v0_7_0_fe_is_odd(&r->y)) { - rustsecp256k1_v0_7_0_fe_negate(&r->y, &r->y, 1); + if (rustsecp256k1_v0_8_0_fe_is_odd(&r->y)) { + rustsecp256k1_v0_8_0_fe_negate(&r->y, &r->y, 1); y_parity = 1; } return y_parity; } -int rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_xonly_pubkey *xonly_pubkey, int *pk_parity, const rustsecp256k1_v0_7_0_pubkey *pubkey) { - rustsecp256k1_v0_7_0_ge pk; +int rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_xonly_pubkey *xonly_pubkey, int *pk_parity, const rustsecp256k1_v0_8_0_pubkey *pubkey) { + rustsecp256k1_v0_8_0_ge pk; int tmp; VERIFY_CHECK(ctx != NULL); ARG_CHECK(xonly_pubkey != NULL); ARG_CHECK(pubkey != NULL); - if (!rustsecp256k1_v0_7_0_pubkey_load(ctx, &pk, pubkey)) { + if (!rustsecp256k1_v0_8_0_pubkey_load(ctx, &pk, pubkey)) { return 0; } - tmp = rustsecp256k1_v0_7_0_extrakeys_ge_even_y(&pk); + tmp = rustsecp256k1_v0_8_0_extrakeys_ge_even_y(&pk); if (pk_parity != NULL) { *pk_parity = tmp; } - rustsecp256k1_v0_7_0_xonly_pubkey_save(xonly_pubkey, &pk); + rustsecp256k1_v0_8_0_xonly_pubkey_save(xonly_pubkey, &pk); return 1; } -int rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_pubkey *output_pubkey, const rustsecp256k1_v0_7_0_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) { - rustsecp256k1_v0_7_0_ge pk; +int rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_pubkey *output_pubkey, const rustsecp256k1_v0_8_0_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) { + rustsecp256k1_v0_8_0_ge pk; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output_pubkey != NULL); @@ -123,16 +123,16 @@ int rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(const rustsecp256k1_v0_7_0_conte ARG_CHECK(internal_pubkey != NULL); ARG_CHECK(tweak32 != NULL); - if (!rustsecp256k1_v0_7_0_xonly_pubkey_load(ctx, &pk, internal_pubkey) - || !rustsecp256k1_v0_7_0_ec_pubkey_tweak_add_helper(&pk, tweak32)) { + if (!rustsecp256k1_v0_8_0_xonly_pubkey_load(ctx, &pk, internal_pubkey) + || !rustsecp256k1_v0_8_0_ec_pubkey_tweak_add_helper(&pk, tweak32)) { return 0; } - rustsecp256k1_v0_7_0_pubkey_save(output_pubkey, &pk); + rustsecp256k1_v0_8_0_pubkey_save(output_pubkey, &pk); return 1; } -int rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check(const rustsecp256k1_v0_7_0_context* ctx, const unsigned char *tweaked_pubkey32, int tweaked_pk_parity, const rustsecp256k1_v0_7_0_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) { - rustsecp256k1_v0_7_0_ge pk; +int rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add_check(const rustsecp256k1_v0_8_0_context* ctx, const unsigned char *tweaked_pubkey32, int tweaked_pk_parity, const rustsecp256k1_v0_8_0_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) { + rustsecp256k1_v0_8_0_ge pk; unsigned char pk_expected32[32]; VERIFY_CHECK(ctx != NULL); @@ -140,31 +140,31 @@ int rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check(const rustsecp256k1_v0_7_0 ARG_CHECK(tweaked_pubkey32 != NULL); ARG_CHECK(tweak32 != NULL); - if (!rustsecp256k1_v0_7_0_xonly_pubkey_load(ctx, &pk, internal_pubkey) - || !rustsecp256k1_v0_7_0_ec_pubkey_tweak_add_helper(&pk, tweak32)) { + if (!rustsecp256k1_v0_8_0_xonly_pubkey_load(ctx, &pk, internal_pubkey) + || !rustsecp256k1_v0_8_0_ec_pubkey_tweak_add_helper(&pk, tweak32)) { return 0; } - rustsecp256k1_v0_7_0_fe_normalize_var(&pk.x); - rustsecp256k1_v0_7_0_fe_normalize_var(&pk.y); - rustsecp256k1_v0_7_0_fe_get_b32(pk_expected32, &pk.x); + rustsecp256k1_v0_8_0_fe_normalize_var(&pk.x); + rustsecp256k1_v0_8_0_fe_normalize_var(&pk.y); + rustsecp256k1_v0_8_0_fe_get_b32(pk_expected32, &pk.x); - return rustsecp256k1_v0_7_0_memcmp_var(&pk_expected32, tweaked_pubkey32, 32) == 0 - && rustsecp256k1_v0_7_0_fe_is_odd(&pk.y) == tweaked_pk_parity; + return rustsecp256k1_v0_8_0_memcmp_var(&pk_expected32, tweaked_pubkey32, 32) == 0 + && rustsecp256k1_v0_8_0_fe_is_odd(&pk.y) == tweaked_pk_parity; } -static void rustsecp256k1_v0_7_0_keypair_save(rustsecp256k1_v0_7_0_keypair *keypair, const rustsecp256k1_v0_7_0_scalar *sk, rustsecp256k1_v0_7_0_ge *pk) { - rustsecp256k1_v0_7_0_scalar_get_b32(&keypair->data[0], sk); - rustsecp256k1_v0_7_0_pubkey_save((rustsecp256k1_v0_7_0_pubkey *)&keypair->data[32], pk); +static void rustsecp256k1_v0_8_0_keypair_save(rustsecp256k1_v0_8_0_keypair *keypair, const rustsecp256k1_v0_8_0_scalar *sk, rustsecp256k1_v0_8_0_ge *pk) { + rustsecp256k1_v0_8_0_scalar_get_b32(&keypair->data[0], sk); + rustsecp256k1_v0_8_0_pubkey_save((rustsecp256k1_v0_8_0_pubkey *)&keypair->data[32], pk); } -static int rustsecp256k1_v0_7_0_keypair_seckey_load(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_scalar *sk, const rustsecp256k1_v0_7_0_keypair *keypair) { +static int rustsecp256k1_v0_8_0_keypair_seckey_load(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_scalar *sk, const rustsecp256k1_v0_8_0_keypair *keypair) { int ret; - ret = rustsecp256k1_v0_7_0_scalar_set_b32_seckey(sk, &keypair->data[0]); + ret = rustsecp256k1_v0_8_0_scalar_set_b32_seckey(sk, &keypair->data[0]); /* We can declassify ret here because sk is only zero if a keypair function * failed (which zeroes the keypair) and its return value is ignored. */ - rustsecp256k1_v0_7_0_declassify(ctx, &ret, sizeof(ret)); + rustsecp256k1_v0_8_0_declassify(ctx, &ret, sizeof(ret)); ARG_CHECK(ret); return ret; } @@ -172,45 +172,45 @@ static int rustsecp256k1_v0_7_0_keypair_seckey_load(const rustsecp256k1_v0_7_0_c /* Load a keypair into pk and sk (if non-NULL). This function declassifies pk * and ARG_CHECKs that the keypair is not invalid. It always initializes sk and * pk with dummy values. */ -static int rustsecp256k1_v0_7_0_keypair_load(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_scalar *sk, rustsecp256k1_v0_7_0_ge *pk, const rustsecp256k1_v0_7_0_keypair *keypair) { +static int rustsecp256k1_v0_8_0_keypair_load(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_scalar *sk, rustsecp256k1_v0_8_0_ge *pk, const rustsecp256k1_v0_8_0_keypair *keypair) { int ret; - const rustsecp256k1_v0_7_0_pubkey *pubkey = (const rustsecp256k1_v0_7_0_pubkey *)&keypair->data[32]; + const rustsecp256k1_v0_8_0_pubkey *pubkey = (const rustsecp256k1_v0_8_0_pubkey *)&keypair->data[32]; /* Need to declassify the pubkey because pubkey_load ARG_CHECKs if it's * invalid. */ - rustsecp256k1_v0_7_0_declassify(ctx, pubkey, sizeof(*pubkey)); - ret = rustsecp256k1_v0_7_0_pubkey_load(ctx, pk, pubkey); + rustsecp256k1_v0_8_0_declassify(ctx, pubkey, sizeof(*pubkey)); + ret = rustsecp256k1_v0_8_0_pubkey_load(ctx, pk, pubkey); if (sk != NULL) { - ret = ret && rustsecp256k1_v0_7_0_keypair_seckey_load(ctx, sk, keypair); + ret = ret && rustsecp256k1_v0_8_0_keypair_seckey_load(ctx, sk, keypair); } if (!ret) { - *pk = rustsecp256k1_v0_7_0_ge_const_g; + *pk = rustsecp256k1_v0_8_0_ge_const_g; if (sk != NULL) { - *sk = rustsecp256k1_v0_7_0_scalar_one; + *sk = rustsecp256k1_v0_8_0_scalar_one; } } return ret; } -int rustsecp256k1_v0_7_0_keypair_create(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_keypair *keypair, const unsigned char *seckey32) { - rustsecp256k1_v0_7_0_scalar sk; - rustsecp256k1_v0_7_0_ge pk; +int rustsecp256k1_v0_8_0_keypair_create(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_keypair *keypair, const unsigned char *seckey32) { + rustsecp256k1_v0_8_0_scalar sk; + rustsecp256k1_v0_8_0_ge pk; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(keypair != NULL); memset(keypair, 0, sizeof(*keypair)); - ARG_CHECK(rustsecp256k1_v0_7_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_8_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(seckey32 != NULL); - ret = rustsecp256k1_v0_7_0_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &sk, &pk, seckey32); - rustsecp256k1_v0_7_0_keypair_save(keypair, &sk, &pk); - rustsecp256k1_v0_7_0_memczero(keypair, sizeof(*keypair), !ret); + ret = rustsecp256k1_v0_8_0_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &sk, &pk, seckey32); + rustsecp256k1_v0_8_0_keypair_save(keypair, &sk, &pk); + rustsecp256k1_v0_8_0_memczero(keypair, sizeof(*keypair), !ret); - rustsecp256k1_v0_7_0_scalar_clear(&sk); + rustsecp256k1_v0_8_0_scalar_clear(&sk); return ret; } -int rustsecp256k1_v0_7_0_keypair_sec(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *seckey, const rustsecp256k1_v0_7_0_keypair *keypair) { +int rustsecp256k1_v0_8_0_keypair_sec(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const rustsecp256k1_v0_8_0_keypair *keypair) { VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); memset(seckey, 0, 32); @@ -220,7 +220,7 @@ int rustsecp256k1_v0_7_0_keypair_sec(const rustsecp256k1_v0_7_0_context* ctx, un return 1; } -int rustsecp256k1_v0_7_0_keypair_pub(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_pubkey *pubkey, const rustsecp256k1_v0_7_0_keypair *keypair) { +int rustsecp256k1_v0_8_0_keypair_pub(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_pubkey *pubkey, const rustsecp256k1_v0_8_0_keypair *keypair) { VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); @@ -230,8 +230,8 @@ int rustsecp256k1_v0_7_0_keypair_pub(const rustsecp256k1_v0_7_0_context* ctx, ru return 1; } -int rustsecp256k1_v0_7_0_keypair_xonly_pub(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_xonly_pubkey *pubkey, int *pk_parity, const rustsecp256k1_v0_7_0_keypair *keypair) { - rustsecp256k1_v0_7_0_ge pk; +int rustsecp256k1_v0_8_0_keypair_xonly_pub(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_xonly_pubkey *pubkey, int *pk_parity, const rustsecp256k1_v0_8_0_keypair *keypair) { + rustsecp256k1_v0_8_0_ge pk; int tmp; VERIFY_CHECK(ctx != NULL); @@ -239,21 +239,21 @@ int rustsecp256k1_v0_7_0_keypair_xonly_pub(const rustsecp256k1_v0_7_0_context* c memset(pubkey, 0, sizeof(*pubkey)); ARG_CHECK(keypair != NULL); - if (!rustsecp256k1_v0_7_0_keypair_load(ctx, NULL, &pk, keypair)) { + if (!rustsecp256k1_v0_8_0_keypair_load(ctx, NULL, &pk, keypair)) { return 0; } - tmp = rustsecp256k1_v0_7_0_extrakeys_ge_even_y(&pk); + tmp = rustsecp256k1_v0_8_0_extrakeys_ge_even_y(&pk); if (pk_parity != NULL) { *pk_parity = tmp; } - rustsecp256k1_v0_7_0_xonly_pubkey_save(pubkey, &pk); + rustsecp256k1_v0_8_0_xonly_pubkey_save(pubkey, &pk); return 1; } -int rustsecp256k1_v0_7_0_keypair_xonly_tweak_add(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_keypair *keypair, const unsigned char *tweak32) { - rustsecp256k1_v0_7_0_ge pk; - rustsecp256k1_v0_7_0_scalar sk; +int rustsecp256k1_v0_8_0_keypair_xonly_tweak_add(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_keypair *keypair, const unsigned char *tweak32) { + rustsecp256k1_v0_8_0_ge pk; + rustsecp256k1_v0_8_0_scalar sk; int y_parity; int ret; @@ -261,23 +261,23 @@ int rustsecp256k1_v0_7_0_keypair_xonly_tweak_add(const rustsecp256k1_v0_7_0_cont ARG_CHECK(keypair != NULL); ARG_CHECK(tweak32 != NULL); - ret = rustsecp256k1_v0_7_0_keypair_load(ctx, &sk, &pk, keypair); + ret = rustsecp256k1_v0_8_0_keypair_load(ctx, &sk, &pk, keypair); memset(keypair, 0, sizeof(*keypair)); - y_parity = rustsecp256k1_v0_7_0_extrakeys_ge_even_y(&pk); + y_parity = rustsecp256k1_v0_8_0_extrakeys_ge_even_y(&pk); if (y_parity == 1) { - rustsecp256k1_v0_7_0_scalar_negate(&sk, &sk); + rustsecp256k1_v0_8_0_scalar_negate(&sk, &sk); } - ret &= rustsecp256k1_v0_7_0_ec_seckey_tweak_add_helper(&sk, tweak32); - ret &= rustsecp256k1_v0_7_0_ec_pubkey_tweak_add_helper(&pk, tweak32); + ret &= rustsecp256k1_v0_8_0_ec_seckey_tweak_add_helper(&sk, tweak32); + ret &= rustsecp256k1_v0_8_0_ec_pubkey_tweak_add_helper(&pk, tweak32); - rustsecp256k1_v0_7_0_declassify(ctx, &ret, sizeof(ret)); + rustsecp256k1_v0_8_0_declassify(ctx, &ret, sizeof(ret)); if (ret) { - rustsecp256k1_v0_7_0_keypair_save(keypair, &sk, &pk); + rustsecp256k1_v0_8_0_keypair_save(keypair, &sk, &pk); } - rustsecp256k1_v0_7_0_scalar_clear(&sk); + rustsecp256k1_v0_8_0_scalar_clear(&sk); return ret; } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h index 3782a1dcf..2d70b3e18 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h @@ -7,57 +7,57 @@ #ifndef SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_H #define SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_H -#include "src/modules/extrakeys/main_impl.h" #include "../../../include/secp256k1_extrakeys.h" +#include "main_impl.h" -static void test_exhaustive_extrakeys(const rustsecp256k1_v0_7_0_context *ctx, const rustsecp256k1_v0_7_0_ge* group) { - rustsecp256k1_v0_7_0_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1]; - rustsecp256k1_v0_7_0_pubkey pubkey[EXHAUSTIVE_TEST_ORDER - 1]; - rustsecp256k1_v0_7_0_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1]; +static void test_exhaustive_extrakeys(const rustsecp256k1_v0_8_0_context *ctx, const rustsecp256k1_v0_8_0_ge* group) { + rustsecp256k1_v0_8_0_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1]; + rustsecp256k1_v0_8_0_pubkey pubkey[EXHAUSTIVE_TEST_ORDER - 1]; + rustsecp256k1_v0_8_0_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1]; int parities[EXHAUSTIVE_TEST_ORDER - 1]; unsigned char xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - 1][32]; int i; for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_7_0_fe fe; - rustsecp256k1_v0_7_0_scalar scalar_i; + rustsecp256k1_v0_8_0_fe fe; + rustsecp256k1_v0_8_0_scalar scalar_i; unsigned char buf[33]; int parity; - rustsecp256k1_v0_7_0_scalar_set_int(&scalar_i, i); - rustsecp256k1_v0_7_0_scalar_get_b32(buf, &scalar_i); + rustsecp256k1_v0_8_0_scalar_set_int(&scalar_i, i); + rustsecp256k1_v0_8_0_scalar_get_b32(buf, &scalar_i); /* Construct pubkey and keypair. */ - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair[i - 1], buf)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey[i - 1], buf)); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair[i - 1], buf)); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey[i - 1], buf)); /* Construct serialized xonly_pubkey from keypair. */ - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parities[i - 1], &keypair[i - 1])); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1])); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parities[i - 1], &keypair[i - 1])); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1])); /* Parse the xonly_pubkey back and verify it matches the previously serialized value. */ - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_parse(ctx, &xonly_pubkey[i - 1], xonly_pubkey_bytes[i - 1])); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1])); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, &xonly_pubkey[i - 1], xonly_pubkey_bytes[i - 1])); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1])); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0); /* Construct the xonly_pubkey from the pubkey, and verify it matches the same. */ - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(ctx, &xonly_pubkey[i - 1], &parity, &pubkey[i - 1])); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &xonly_pubkey[i - 1], &parity, &pubkey[i - 1])); CHECK(parity == parities[i - 1]); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1])); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1])); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0); /* Compare the xonly_pubkey bytes against the precomputed group. */ - rustsecp256k1_v0_7_0_fe_set_b32(&fe, xonly_pubkey_bytes[i - 1]); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&fe, &group[i].x)); + rustsecp256k1_v0_8_0_fe_set_b32(&fe, xonly_pubkey_bytes[i - 1]); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&fe, &group[i].x)); /* Check the parity against the precomputed group. */ fe = group[i].y; - rustsecp256k1_v0_7_0_fe_normalize_var(&fe); - CHECK(rustsecp256k1_v0_7_0_fe_is_odd(&fe) == parities[i - 1]); + rustsecp256k1_v0_8_0_fe_normalize_var(&fe); + CHECK(rustsecp256k1_v0_8_0_fe_is_odd(&fe) == parities[i - 1]); /* Verify that the higher half is identical to the lower half mirrored. */ if (i > EXHAUSTIVE_TEST_ORDER / 2) { - CHECK(rustsecp256k1_v0_7_0_memcmp_var(xonly_pubkey_bytes[i - 1], xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - i - 1], 32) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(xonly_pubkey_bytes[i - 1], xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - i - 1], 32) == 0); CHECK(parities[i - 1] == 1 - parities[EXHAUSTIVE_TEST_ORDER - i - 1]); } } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_impl.h index 15cd880bd..69020ac01 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/extrakeys/tests_impl.h @@ -9,19 +9,17 @@ #include "../../../include/secp256k1_extrakeys.h" -static rustsecp256k1_v0_7_0_context* api_test_context(int flags, int *ecount) { - rustsecp256k1_v0_7_0_context *ctx0 = rustsecp256k1_v0_7_0_context_create(flags); - rustsecp256k1_v0_7_0_context_set_error_callback(ctx0, counting_illegal_callback_fn, ecount); - rustsecp256k1_v0_7_0_context_set_illegal_callback(ctx0, counting_illegal_callback_fn, ecount); - return ctx0; +static void set_counting_callbacks(rustsecp256k1_v0_8_0_context *ctx0, int *ecount) { + rustsecp256k1_v0_8_0_context_set_error_callback(ctx0, counting_illegal_callback_fn, ecount); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx0, counting_illegal_callback_fn, ecount); } void test_xonly_pubkey(void) { - rustsecp256k1_v0_7_0_pubkey pk; - rustsecp256k1_v0_7_0_xonly_pubkey xonly_pk, xonly_pk_tmp; - rustsecp256k1_v0_7_0_ge pk1; - rustsecp256k1_v0_7_0_ge pk2; - rustsecp256k1_v0_7_0_fe y; + rustsecp256k1_v0_8_0_pubkey pk; + rustsecp256k1_v0_8_0_xonly_pubkey xonly_pk, xonly_pk_tmp; + rustsecp256k1_v0_8_0_ge pk1; + rustsecp256k1_v0_8_0_ge pk2; + rustsecp256k1_v0_8_0_fe y; unsigned char sk[32]; unsigned char xy_sk[32]; unsigned char buf32[32]; @@ -31,110 +29,103 @@ void test_xonly_pubkey(void) { int i; int ecount; - rustsecp256k1_v0_7_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); - rustsecp256k1_v0_7_0_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount); - rustsecp256k1_v0_7_0_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); - rustsecp256k1_v0_7_0_testrand256(sk); + set_counting_callbacks(ctx, &ecount); + + rustsecp256k1_v0_8_0_testrand256(sk); memset(ones32, 0xFF, 32); - rustsecp256k1_v0_7_0_testrand256(xy_sk); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(sign, &pk, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1); + rustsecp256k1_v0_8_0_testrand256(xy_sk); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pk, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1); /* Test xonly_pubkey_from_pubkey */ ecount = 0; - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(sign, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(verify, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(none, NULL, &pk_parity, &pk) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, NULL, &pk_parity, &pk) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(none, &xonly_pk, NULL, &pk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, NULL, &pk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, NULL) == 0); CHECK(ecount == 2); memset(&pk, 0, sizeof(pk)); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 0); CHECK(ecount == 3); /* Choose a secret key such that the resulting pubkey and xonly_pubkey match. */ memset(sk, 0, sizeof(sk)); sk[0] = 1; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pk, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pk, &xonly_pk, sizeof(pk)) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pk, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pk, &xonly_pk, sizeof(pk)) == 0); CHECK(pk_parity == 0); /* Choose a secret key such that pubkey and xonly_pubkey are each others * negation. */ sk[0] = 2; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pk, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&xonly_pk, &pk, sizeof(xonly_pk)) != 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pk, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&xonly_pk, &pk, sizeof(xonly_pk)) != 0); CHECK(pk_parity == 1); - rustsecp256k1_v0_7_0_pubkey_load(ctx, &pk1, &pk); - rustsecp256k1_v0_7_0_pubkey_load(ctx, &pk2, (rustsecp256k1_v0_7_0_pubkey *) &xonly_pk); - CHECK(rustsecp256k1_v0_7_0_fe_equal(&pk1.x, &pk2.x) == 1); - rustsecp256k1_v0_7_0_fe_negate(&y, &pk2.y, 1); - CHECK(rustsecp256k1_v0_7_0_fe_equal(&pk1.y, &y) == 1); + rustsecp256k1_v0_8_0_pubkey_load(ctx, &pk1, &pk); + rustsecp256k1_v0_8_0_pubkey_load(ctx, &pk2, (rustsecp256k1_v0_8_0_pubkey *) &xonly_pk); + CHECK(rustsecp256k1_v0_8_0_fe_equal(&pk1.x, &pk2.x) == 1); + rustsecp256k1_v0_8_0_fe_negate(&y, &pk2.y, 1); + CHECK(rustsecp256k1_v0_8_0_fe_equal(&pk1.y, &y) == 1); /* Test xonly_pubkey_serialize and xonly_pubkey_parse */ ecount = 0; - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(none, NULL, &xonly_pk) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, NULL, &xonly_pk) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(none, buf32, NULL) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(buf32, zeros64, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, buf32, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(buf32, zeros64, 32) == 0); CHECK(ecount == 2); { /* A pubkey filled with 0s will fail to serialize due to pubkey_load * special casing. */ - rustsecp256k1_v0_7_0_xonly_pubkey pk_tmp; + rustsecp256k1_v0_8_0_xonly_pubkey pk_tmp; memset(&pk_tmp, 0, sizeof(pk_tmp)); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(none, buf32, &pk_tmp) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, buf32, &pk_tmp) == 0); } /* pubkey_load called illegal callback */ CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(none, buf32, &xonly_pk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, buf32, &xonly_pk) == 1); ecount = 0; - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_parse(none, NULL, buf32) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, NULL, buf32) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_parse(none, &xonly_pk, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, &xonly_pk, NULL) == 0); CHECK(ecount == 2); /* Serialization and parse roundtrip */ - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(none, &xonly_pk, NULL, &pk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(ctx, buf32, &xonly_pk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_parse(ctx, &xonly_pk_tmp, buf32) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(xonly_pk)) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, NULL, &pk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, buf32, &xonly_pk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, &xonly_pk_tmp, buf32) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(xonly_pk)) == 0); /* Test parsing invalid field elements */ memset(&xonly_pk, 1, sizeof(xonly_pk)); /* Overflowing field element */ - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_parse(none, &xonly_pk, ones32) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, &xonly_pk, ones32) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); memset(&xonly_pk, 1, sizeof(xonly_pk)); /* There's no point with x-coordinate 0 on secp256k1 */ - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_parse(none, &xonly_pk, zeros64) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, &xonly_pk, zeros64) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); /* If a random 32-byte string can not be parsed with ec_pubkey_parse * (because interpreted as X coordinate it does not correspond to a point on * the curve) then xonly_pubkey_parse should fail as well. */ for (i = 0; i < count; i++) { unsigned char rand33[33]; - rustsecp256k1_v0_7_0_testrand256(&rand33[1]); + rustsecp256k1_v0_8_0_testrand256(&rand33[1]); rand33[0] = SECP256K1_TAG_PUBKEY_EVEN; - if (!rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, &pk, rand33, 33)) { + if (!rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, &pk, rand33, 33)) { memset(&xonly_pk, 1, sizeof(xonly_pk)); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); } else { - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 1); } } CHECK(ecount == 2); - - rustsecp256k1_v0_7_0_context_destroy(none); - rustsecp256k1_v0_7_0_context_destroy(sign); - rustsecp256k1_v0_7_0_context_destroy(verify); } void test_xonly_pubkey_comparison(void) { @@ -146,171 +137,160 @@ void test_xonly_pubkey_comparison(void) { 0xde, 0x36, 0x0e, 0x87, 0x59, 0x8f, 0x3c, 0x01, 0x36, 0x2a, 0x2a, 0xb8, 0xc6, 0xf4, 0x5e, 0x4d, 0xb2, 0xc2, 0xd5, 0x03, 0xa7, 0xf9, 0xf1, 0x4f, 0xa8, 0xfa, 0x95, 0xa8, 0xe9, 0x69, 0x76, 0x1c }; - rustsecp256k1_v0_7_0_xonly_pubkey pk1; - rustsecp256k1_v0_7_0_xonly_pubkey pk2; + rustsecp256k1_v0_8_0_xonly_pubkey pk1; + rustsecp256k1_v0_8_0_xonly_pubkey pk2; int ecount = 0; - rustsecp256k1_v0_7_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_parse(none, &pk1, pk1_ser) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_parse(none, &pk2, pk2_ser) == 1); + set_counting_callbacks(ctx, &ecount); + + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, &pk1, pk1_ser) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, &pk2, pk2_ser) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_cmp(none, NULL, &pk2) < 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_cmp(ctx, NULL, &pk2) < 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_cmp(none, &pk1, NULL) > 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_cmp(ctx, &pk1, NULL) > 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_cmp(none, &pk1, &pk2) < 0); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_cmp(none, &pk2, &pk1) > 0); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_cmp(none, &pk1, &pk1) == 0); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_cmp(none, &pk2, &pk2) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_cmp(ctx, &pk1, &pk2) < 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_cmp(ctx, &pk2, &pk1) > 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_cmp(ctx, &pk1, &pk1) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_cmp(ctx, &pk2, &pk2) == 0); CHECK(ecount == 2); memset(&pk1, 0, sizeof(pk1)); /* illegal pubkey */ - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_cmp(none, &pk1, &pk2) < 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_cmp(ctx, &pk1, &pk2) < 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_cmp(none, &pk1, &pk1) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_cmp(ctx, &pk1, &pk1) == 0); CHECK(ecount == 5); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_cmp(none, &pk2, &pk1) > 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_cmp(ctx, &pk2, &pk1) > 0); CHECK(ecount == 6); - - rustsecp256k1_v0_7_0_context_destroy(none); } void test_xonly_pubkey_tweak(void) { unsigned char zeros64[64] = { 0 }; unsigned char overflows[32]; unsigned char sk[32]; - rustsecp256k1_v0_7_0_pubkey internal_pk; - rustsecp256k1_v0_7_0_xonly_pubkey internal_xonly_pk; - rustsecp256k1_v0_7_0_pubkey output_pk; + rustsecp256k1_v0_8_0_pubkey internal_pk; + rustsecp256k1_v0_8_0_xonly_pubkey internal_xonly_pk; + rustsecp256k1_v0_8_0_pubkey output_pk; int pk_parity; unsigned char tweak[32]; int i; int ecount; - rustsecp256k1_v0_7_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); - rustsecp256k1_v0_7_0_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount); - rustsecp256k1_v0_7_0_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); + + set_counting_callbacks(ctx, &ecount); memset(overflows, 0xff, sizeof(overflows)); - rustsecp256k1_v0_7_0_testrand256(tweak); - rustsecp256k1_v0_7_0_testrand256(sk); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &internal_pk, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1); + rustsecp256k1_v0_8_0_testrand256(tweak); + rustsecp256k1_v0_8_0_testrand256(sk); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &internal_pk, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &internal_xonly_pk, &pk_parity, &internal_pk) == 1); ecount = 0; - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(none, &output_pk, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, tweak) == 1); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(sign, &output_pk, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, tweak) == 1); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(verify, NULL, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add(ctx, NULL, &internal_xonly_pk, tweak) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(verify, &output_pk, NULL, tweak) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add(ctx, &output_pk, NULL, tweak) == 0); CHECK(ecount == 2); /* NULL internal_xonly_pk zeroes the output_pk */ - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, NULL) == 0); CHECK(ecount == 3); /* NULL tweak zeroes the output_pk */ - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); /* Invalid tweak zeroes the output_pk */ - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, overflows) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, overflows) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); /* A zero tweak is fine */ - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, zeros64) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, zeros64) == 1); /* Fails if the resulting key was infinity */ for (i = 0; i < count; i++) { - rustsecp256k1_v0_7_0_scalar scalar_tweak; + rustsecp256k1_v0_8_0_scalar scalar_tweak; /* Because sk may be negated before adding, we need to try with tweak = * sk as well as tweak = -sk. */ - rustsecp256k1_v0_7_0_scalar_set_b32(&scalar_tweak, sk, NULL); - rustsecp256k1_v0_7_0_scalar_negate(&scalar_tweak, &scalar_tweak); - rustsecp256k1_v0_7_0_scalar_get_b32(tweak, &scalar_tweak); - CHECK((rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, sk) == 0) - || (rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); + rustsecp256k1_v0_8_0_scalar_set_b32(&scalar_tweak, sk, NULL); + rustsecp256k1_v0_8_0_scalar_negate(&scalar_tweak, &scalar_tweak); + rustsecp256k1_v0_8_0_scalar_get_b32(tweak, &scalar_tweak); + CHECK((rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, sk) == 0) + || (rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, tweak) == 0)); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); } /* Invalid pk with a valid tweak */ memset(&internal_xonly_pk, 0, sizeof(internal_xonly_pk)); - rustsecp256k1_v0_7_0_testrand256(tweak); + rustsecp256k1_v0_8_0_testrand256(tweak); ecount = 0; - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, tweak) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); - - rustsecp256k1_v0_7_0_context_destroy(none); - rustsecp256k1_v0_7_0_context_destroy(sign); - rustsecp256k1_v0_7_0_context_destroy(verify); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); } void test_xonly_pubkey_tweak_check(void) { unsigned char zeros64[64] = { 0 }; unsigned char overflows[32]; unsigned char sk[32]; - rustsecp256k1_v0_7_0_pubkey internal_pk; - rustsecp256k1_v0_7_0_xonly_pubkey internal_xonly_pk; - rustsecp256k1_v0_7_0_pubkey output_pk; - rustsecp256k1_v0_7_0_xonly_pubkey output_xonly_pk; + rustsecp256k1_v0_8_0_pubkey internal_pk; + rustsecp256k1_v0_8_0_xonly_pubkey internal_xonly_pk; + rustsecp256k1_v0_8_0_pubkey output_pk; + rustsecp256k1_v0_8_0_xonly_pubkey output_xonly_pk; unsigned char output_pk32[32]; unsigned char buf32[32]; int pk_parity; unsigned char tweak[32]; int ecount; - rustsecp256k1_v0_7_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); - rustsecp256k1_v0_7_0_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount); - rustsecp256k1_v0_7_0_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); + + set_counting_callbacks(ctx, &ecount); memset(overflows, 0xff, sizeof(overflows)); - rustsecp256k1_v0_7_0_testrand256(tweak); - rustsecp256k1_v0_7_0_testrand256(sk); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &internal_pk, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1); + rustsecp256k1_v0_8_0_testrand256(tweak); + rustsecp256k1_v0_8_0_testrand256(sk); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &internal_pk, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &internal_xonly_pk, &pk_parity, &internal_pk) == 1); ecount = 0; - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(verify, &output_xonly_pk, &pk_parity, &output_pk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(ctx, buf32, &output_xonly_pk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check(none, buf32, pk_parity, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &output_xonly_pk, &pk_parity, &output_pk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, buf32, &output_xonly_pk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add_check(ctx, buf32, pk_parity, &internal_xonly_pk, tweak) == 1); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check(sign, buf32, pk_parity, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add_check(ctx, buf32, pk_parity, &internal_xonly_pk, tweak) == 1); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, &internal_xonly_pk, tweak) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check(verify, NULL, pk_parity, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add_check(ctx, buf32, pk_parity, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add_check(ctx, NULL, pk_parity, &internal_xonly_pk, tweak) == 0); CHECK(ecount == 1); /* invalid pk_parity value */ - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check(verify, buf32, 2, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add_check(ctx, buf32, 2, &internal_xonly_pk, tweak) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, NULL, tweak) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add_check(ctx, buf32, pk_parity, NULL, tweak) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, &internal_xonly_pk, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add_check(ctx, buf32, pk_parity, &internal_xonly_pk, NULL) == 0); CHECK(ecount == 3); memset(tweak, 1, sizeof(tweak)); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(ctx, &internal_xonly_pk, NULL, &internal_pk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, tweak) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(ctx, &output_xonly_pk, &pk_parity, &output_pk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(ctx, output_pk32, &output_xonly_pk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check(ctx, output_pk32, pk_parity, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &internal_xonly_pk, NULL, &internal_pk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &output_xonly_pk, &pk_parity, &output_pk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, output_pk32, &output_xonly_pk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add_check(ctx, output_pk32, pk_parity, &internal_xonly_pk, tweak) == 1); /* Wrong pk_parity */ - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check(ctx, output_pk32, !pk_parity, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add_check(ctx, output_pk32, !pk_parity, &internal_xonly_pk, tweak) == 0); /* Wrong public key */ - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(ctx, buf32, &internal_xonly_pk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check(ctx, buf32, pk_parity, &internal_xonly_pk, tweak) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, buf32, &internal_xonly_pk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add_check(ctx, buf32, pk_parity, &internal_xonly_pk, tweak) == 0); /* Overflowing tweak not allowed */ - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check(ctx, output_pk32, pk_parity, &internal_xonly_pk, overflows) == 0); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, overflows) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add_check(ctx, output_pk32, pk_parity, &internal_xonly_pk, overflows) == 0); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, overflows) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); CHECK(ecount == 3); - - rustsecp256k1_v0_7_0_context_destroy(none); - rustsecp256k1_v0_7_0_context_destroy(sign); - rustsecp256k1_v0_7_0_context_destroy(verify); } /* Starts with an initial pubkey and recursively creates N_PUBKEYS - 1 @@ -319,29 +299,29 @@ void test_xonly_pubkey_tweak_check(void) { #define N_PUBKEYS 32 void test_xonly_pubkey_tweak_recursive(void) { unsigned char sk[32]; - rustsecp256k1_v0_7_0_pubkey pk[N_PUBKEYS]; + rustsecp256k1_v0_8_0_pubkey pk[N_PUBKEYS]; unsigned char pk_serialized[32]; unsigned char tweak[N_PUBKEYS - 1][32]; int i; - rustsecp256k1_v0_7_0_testrand256(sk); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pk[0], sk) == 1); + rustsecp256k1_v0_8_0_testrand256(sk); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pk[0], sk) == 1); /* Add tweaks */ for (i = 0; i < N_PUBKEYS - 1; i++) { - rustsecp256k1_v0_7_0_xonly_pubkey xonly_pk; + rustsecp256k1_v0_8_0_xonly_pubkey xonly_pk; memset(tweak[i], i + 1, sizeof(tweak[i])); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, NULL, &pk[i]) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(ctx, &pk[i + 1], &xonly_pk, tweak[i]) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, NULL, &pk[i]) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add(ctx, &pk[i + 1], &xonly_pk, tweak[i]) == 1); } /* Verify tweaks */ for (i = N_PUBKEYS - 1; i > 0; i--) { - rustsecp256k1_v0_7_0_xonly_pubkey xonly_pk; + rustsecp256k1_v0_8_0_xonly_pubkey xonly_pk; int pk_parity; - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk[i]) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(ctx, pk_serialized, &xonly_pk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, NULL, &pk[i - 1]) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check(ctx, pk_serialized, pk_parity, &xonly_pk, tweak[i - 1]) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk[i]) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, pk_serialized, &xonly_pk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, NULL, &pk[i - 1]) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add_check(ctx, pk_serialized, pk_parity, &xonly_pk, tweak[i - 1]) == 1); } } #undef N_PUBKEYS @@ -351,232 +331,222 @@ void test_keypair(void) { unsigned char sk_tmp[32]; unsigned char zeros96[96] = { 0 }; unsigned char overflows[32]; - rustsecp256k1_v0_7_0_keypair keypair; - rustsecp256k1_v0_7_0_pubkey pk, pk_tmp; - rustsecp256k1_v0_7_0_xonly_pubkey xonly_pk, xonly_pk_tmp; + rustsecp256k1_v0_8_0_keypair keypair; + rustsecp256k1_v0_8_0_pubkey pk, pk_tmp; + rustsecp256k1_v0_8_0_xonly_pubkey xonly_pk, xonly_pk_tmp; int pk_parity, pk_parity_tmp; int ecount; - rustsecp256k1_v0_7_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); - rustsecp256k1_v0_7_0_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount); - rustsecp256k1_v0_7_0_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); - rustsecp256k1_v0_7_0_context *sttc = rustsecp256k1_v0_7_0_context_clone(rustsecp256k1_v0_7_0_context_no_precomp); - rustsecp256k1_v0_7_0_context_set_error_callback(sttc, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_illegal_callback(sttc, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_8_0_context *sttc = rustsecp256k1_v0_8_0_context_clone(rustsecp256k1_v0_8_0_context_static); + + set_counting_callbacks(ctx, &ecount); + set_counting_callbacks(sttc, &ecount); CHECK(sizeof(zeros96) == sizeof(keypair)); memset(overflows, 0xFF, sizeof(overflows)); /* Test keypair_create */ ecount = 0; - rustsecp256k1_v0_7_0_testrand256(sk); - CHECK(rustsecp256k1_v0_7_0_keypair_create(none, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) != 0); + rustsecp256k1_v0_8_0_testrand256(sk); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) != 0); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_keypair_create(verify, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) != 0); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) != 0); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_keypair_create(sign, NULL, sk) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, NULL, sk) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_create(sign, &keypair, NULL) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_keypair_create(sign, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_keypair_create(sttc, &keypair, sk) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_create(sttc, &keypair, sk) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); CHECK(ecount == 3); /* Invalid secret key */ - CHECK(rustsecp256k1_v0_7_0_keypair_create(sign, &keypair, zeros96) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); - CHECK(rustsecp256k1_v0_7_0_keypair_create(sign, &keypair, overflows) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, zeros96) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, overflows) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); /* Test keypair_pub */ ecount = 0; - rustsecp256k1_v0_7_0_testrand256(sk); - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_pub(none, &pk, &keypair) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_pub(none, NULL, &keypair) == 0); + rustsecp256k1_v0_8_0_testrand256(sk); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_pub(ctx, &pk, &keypair) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_pub(ctx, NULL, &keypair) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_pub(none, &pk, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_pub(ctx, &pk, NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros96, &pk, sizeof(pk)) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros96, &pk, sizeof(pk)) == 0); /* Using an invalid keypair is fine for keypair_pub */ memset(&keypair, 0, sizeof(keypair)); - CHECK(rustsecp256k1_v0_7_0_keypair_pub(none, &pk, &keypair) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros96, &pk, sizeof(pk)) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_pub(ctx, &pk, &keypair) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros96, &pk, sizeof(pk)) == 0); /* keypair holds the same pubkey as pubkey_create */ - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(sign, &pk, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_create(sign, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_pub(none, &pk_tmp, &keypair) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pk, &pk_tmp, sizeof(pk)) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pk, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_pub(ctx, &pk_tmp, &keypair) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pk, &pk_tmp, sizeof(pk)) == 0); /** Test keypair_xonly_pub **/ ecount = 0; - rustsecp256k1_v0_7_0_testrand256(sk); - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(none, NULL, &pk_parity, &keypair) == 0); + rustsecp256k1_v0_8_0_testrand256(sk); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &xonly_pk, &pk_parity, &keypair) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, NULL, &pk_parity, &keypair) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(none, &xonly_pk, NULL, &keypair) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(none, &xonly_pk, &pk_parity, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &xonly_pk, NULL, &keypair) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &xonly_pk, &pk_parity, NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0); /* Using an invalid keypair will set the xonly_pk to 0 (first reset * xonly_pk). */ - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &xonly_pk, &pk_parity, &keypair) == 1); memset(&keypair, 0, sizeof(keypair)); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &xonly_pk, &pk_parity, &keypair) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0); CHECK(ecount == 3); /** keypair holds the same xonly pubkey as pubkey_create **/ - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(sign, &pk, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_create(sign, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(none, &xonly_pk_tmp, &pk_parity_tmp, &keypair) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(pk)) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pk, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &xonly_pk_tmp, &pk_parity_tmp, &keypair) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(pk)) == 0); CHECK(pk_parity == pk_parity_tmp); /* Test keypair_seckey */ ecount = 0; - rustsecp256k1_v0_7_0_testrand256(sk); - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_sec(none, sk_tmp, &keypair) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_sec(none, NULL, &keypair) == 0); + rustsecp256k1_v0_8_0_testrand256(sk); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_sec(ctx, sk_tmp, &keypair) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_sec(ctx, NULL, &keypair) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_sec(none, sk_tmp, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_sec(ctx, sk_tmp, NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros96, sk_tmp, sizeof(sk_tmp)) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros96, sk_tmp, sizeof(sk_tmp)) == 0); /* keypair returns the same seckey it got */ - CHECK(rustsecp256k1_v0_7_0_keypair_create(sign, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_sec(none, sk_tmp, &keypair) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(sk, sk_tmp, sizeof(sk_tmp)) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_sec(ctx, sk_tmp, &keypair) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(sk, sk_tmp, sizeof(sk_tmp)) == 0); /* Using an invalid keypair is fine for keypair_seckey */ memset(&keypair, 0, sizeof(keypair)); - CHECK(rustsecp256k1_v0_7_0_keypair_sec(none, sk_tmp, &keypair) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros96, sk_tmp, sizeof(sk_tmp)) == 0); - - rustsecp256k1_v0_7_0_context_destroy(none); - rustsecp256k1_v0_7_0_context_destroy(sign); - rustsecp256k1_v0_7_0_context_destroy(verify); - rustsecp256k1_v0_7_0_context_destroy(sttc); + CHECK(rustsecp256k1_v0_8_0_keypair_sec(ctx, sk_tmp, &keypair) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros96, sk_tmp, sizeof(sk_tmp)) == 0); + rustsecp256k1_v0_8_0_context_destroy(sttc); } void test_keypair_add(void) { unsigned char sk[32]; - rustsecp256k1_v0_7_0_keypair keypair; + rustsecp256k1_v0_8_0_keypair keypair; unsigned char overflows[32]; unsigned char zeros96[96] = { 0 }; unsigned char tweak[32]; int i; int ecount = 0; - rustsecp256k1_v0_7_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); - rustsecp256k1_v0_7_0_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount); - rustsecp256k1_v0_7_0_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); + + set_counting_callbacks(ctx, &ecount); CHECK(sizeof(zeros96) == sizeof(keypair)); - rustsecp256k1_v0_7_0_testrand256(sk); - rustsecp256k1_v0_7_0_testrand256(tweak); + rustsecp256k1_v0_8_0_testrand256(sk); + rustsecp256k1_v0_8_0_testrand256(tweak); memset(overflows, 0xFF, 32); - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_tweak_add(none, &keypair, tweak) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_tweak_add(sign, &keypair, tweak) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_tweak_add(verify, &keypair, tweak) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_tweak_add(verify, NULL, tweak) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_tweak_add(ctx, NULL, tweak) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_tweak_add(verify, &keypair, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_tweak_add(ctx, &keypair, NULL) == 0); CHECK(ecount == 2); /* This does not set the keypair to zeroes */ - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) != 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) != 0); /* Invalid tweak zeroes the keypair */ - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_tweak_add(ctx, &keypair, overflows) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_tweak_add(ctx, &keypair, overflows) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0); /* A zero tweak is fine */ - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_tweak_add(ctx, &keypair, zeros96) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_tweak_add(ctx, &keypair, zeros96) == 1); /* Fails if the resulting keypair was (sk=0, pk=infinity) */ for (i = 0; i < count; i++) { - rustsecp256k1_v0_7_0_scalar scalar_tweak; - rustsecp256k1_v0_7_0_keypair keypair_tmp; - rustsecp256k1_v0_7_0_testrand256(sk); - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair, sk) == 1); + rustsecp256k1_v0_8_0_scalar scalar_tweak; + rustsecp256k1_v0_8_0_keypair keypair_tmp; + rustsecp256k1_v0_8_0_testrand256(sk); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk) == 1); memcpy(&keypair_tmp, &keypair, sizeof(keypair)); /* Because sk may be negated before adding, we need to try with tweak = * sk as well as tweak = -sk. */ - rustsecp256k1_v0_7_0_scalar_set_b32(&scalar_tweak, sk, NULL); - rustsecp256k1_v0_7_0_scalar_negate(&scalar_tweak, &scalar_tweak); - rustsecp256k1_v0_7_0_scalar_get_b32(tweak, &scalar_tweak); - CHECK((rustsecp256k1_v0_7_0_keypair_xonly_tweak_add(ctx, &keypair, sk) == 0) - || (rustsecp256k1_v0_7_0_keypair_xonly_tweak_add(ctx, &keypair_tmp, tweak) == 0)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0 - || rustsecp256k1_v0_7_0_memcmp_var(&keypair_tmp, zeros96, sizeof(keypair_tmp)) == 0); + rustsecp256k1_v0_8_0_scalar_set_b32(&scalar_tweak, sk, NULL); + rustsecp256k1_v0_8_0_scalar_negate(&scalar_tweak, &scalar_tweak); + rustsecp256k1_v0_8_0_scalar_get_b32(tweak, &scalar_tweak); + CHECK((rustsecp256k1_v0_8_0_keypair_xonly_tweak_add(ctx, &keypair, sk) == 0) + || (rustsecp256k1_v0_8_0_keypair_xonly_tweak_add(ctx, &keypair_tmp, tweak) == 0)); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0 + || rustsecp256k1_v0_8_0_memcmp_var(&keypair_tmp, zeros96, sizeof(keypair_tmp)) == 0); } /* Invalid keypair with a valid tweak */ memset(&keypair, 0, sizeof(keypair)); - rustsecp256k1_v0_7_0_testrand256(tweak); + rustsecp256k1_v0_8_0_testrand256(tweak); ecount = 0; - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0); /* Only seckey part of keypair invalid */ - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk) == 1); memset(&keypair, 0, 32); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 0); CHECK(ecount == 2); /* Only pubkey part of keypair invalid */ - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk) == 1); memset(&keypair.data[32], 0, 64); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 0); CHECK(ecount == 3); /* Check that the keypair_tweak_add implementation is correct */ - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk) == 1); for (i = 0; i < count; i++) { - rustsecp256k1_v0_7_0_xonly_pubkey internal_pk; - rustsecp256k1_v0_7_0_xonly_pubkey output_pk; - rustsecp256k1_v0_7_0_pubkey output_pk_xy; - rustsecp256k1_v0_7_0_pubkey output_pk_expected; + rustsecp256k1_v0_8_0_xonly_pubkey internal_pk; + rustsecp256k1_v0_8_0_xonly_pubkey output_pk; + rustsecp256k1_v0_8_0_pubkey output_pk_xy; + rustsecp256k1_v0_8_0_pubkey output_pk_expected; unsigned char pk32[32]; unsigned char sk32[32]; int pk_parity; - rustsecp256k1_v0_7_0_testrand256(tweak); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(ctx, &output_pk, &pk_parity, &keypair) == 1); + rustsecp256k1_v0_8_0_testrand256(tweak); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &output_pk, &pk_parity, &keypair) == 1); /* Check that it passes xonly_pubkey_tweak_add_check */ - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(ctx, pk32, &output_pk) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check(ctx, pk32, pk_parity, &internal_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, pk32, &output_pk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add_check(ctx, pk32, pk_parity, &internal_pk, tweak) == 1); /* Check that the resulting pubkey matches xonly_pubkey_tweak_add */ - CHECK(rustsecp256k1_v0_7_0_keypair_pub(ctx, &output_pk_xy, &keypair) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add(ctx, &output_pk_expected, &internal_pk, tweak) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_pub(ctx, &output_pk_xy, &keypair) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add(ctx, &output_pk_expected, &internal_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0); /* Check that the secret key in the keypair is tweaked correctly */ - CHECK(rustsecp256k1_v0_7_0_keypair_sec(none, sk32, &keypair) == 1); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &output_pk_expected, sk32) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_sec(ctx, sk32, &keypair) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &output_pk_expected, sk32) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0); } - rustsecp256k1_v0_7_0_context_destroy(none); - rustsecp256k1_v0_7_0_context_destroy(sign); - rustsecp256k1_v0_7_0_context_destroy(verify); } void run_extrakeys_tests(void) { diff --git a/secp256k1-sys/depend/secp256k1/src/modules/recovery/Makefile.am.include b/secp256k1-sys/depend/secp256k1/src/modules/recovery/Makefile.am.include index 0bbd25025..7975d6f31 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/recovery/Makefile.am.include +++ b/secp256k1-sys/depend/secp256k1/src/modules/recovery/Makefile.am.include @@ -1,4 +1,4 @@ -include_HEADERS += include/rustsecp256k1_v0_7_0_recovery.h +include_HEADERS += include/rustsecp256k1_v0_8_0_recovery.h noinst_HEADERS += src/modules/recovery/main_impl.h noinst_HEADERS += src/modules/recovery/tests_impl.h noinst_HEADERS += src/modules/recovery/tests_exhaustive_impl.h diff --git a/secp256k1-sys/depend/secp256k1/src/modules/recovery/bench_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/recovery/bench_impl.h index 806da3520..333891bf1 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/recovery/bench_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/recovery/bench_impl.h @@ -7,10 +7,10 @@ #ifndef SECP256K1_MODULE_RECOVERY_BENCH_H #define SECP256K1_MODULE_RECOVERY_BENCH_H -#include "../include/secp256k1_recovery.h" +#include "../../../include/secp256k1_recovery.h" typedef struct { - rustsecp256k1_v0_7_0_context *ctx; + rustsecp256k1_v0_8_0_context *ctx; unsigned char msg[32]; unsigned char sig[64]; } bench_recover_data; @@ -18,16 +18,16 @@ typedef struct { void bench_recover(void* arg, int iters) { int i; bench_recover_data *data = (bench_recover_data*)arg; - rustsecp256k1_v0_7_0_pubkey pubkey; + rustsecp256k1_v0_8_0_pubkey pubkey; unsigned char pubkeyc[33]; for (i = 0; i < iters; i++) { int j; size_t pubkeylen = 33; - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature sig; - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(data->ctx, &sig, data->sig, i % 2)); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recover(data->ctx, &pubkey, &sig, data->msg)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_serialize(data->ctx, pubkeyc, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED)); + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature sig; + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(data->ctx, &sig, data->sig, i % 2)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recover(data->ctx, &pubkey, &sig, data->msg)); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_serialize(data->ctx, pubkeyc, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED)); for (j = 0; j < 32; j++) { data->sig[j + 32] = data->msg[j]; /* Move former message to S. */ data->msg[j] = data->sig[j]; /* Move former R to message. */ @@ -52,11 +52,11 @@ void run_recovery_bench(int iters, int argc, char** argv) { bench_recover_data data; int d = argc == 1; - data.ctx = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_VERIFY); + data.ctx = rustsecp256k1_v0_8_0_context_create(SECP256K1_CONTEXT_NONE); if (d || have_flag(argc, argv, "ecdsa") || have_flag(argc, argv, "recover") || have_flag(argc, argv, "ecdsa_recover")) run_benchmark("ecdsa_recover", bench_recover, bench_recover_setup, NULL, &data, 10, iters); - rustsecp256k1_v0_7_0_context_destroy(data.ctx); + rustsecp256k1_v0_8_0_context_destroy(data.ctx); } #endif /* SECP256K1_MODULE_RECOVERY_BENCH_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/modules/recovery/main_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/recovery/main_impl.h index 6caeb6500..ad54dea37 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/recovery/main_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/recovery/main_impl.h @@ -9,34 +9,34 @@ #include "../../../include/secp256k1_recovery.h" -static void rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_load(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_scalar* r, rustsecp256k1_v0_7_0_scalar* s, int* recid, const rustsecp256k1_v0_7_0_ecdsa_recoverable_signature* sig) { +static void rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_load(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_scalar* r, rustsecp256k1_v0_8_0_scalar* s, int* recid, const rustsecp256k1_v0_8_0_ecdsa_recoverable_signature* sig) { (void)ctx; - if (sizeof(rustsecp256k1_v0_7_0_scalar) == 32) { - /* When the rustsecp256k1_v0_7_0_scalar type is exactly 32 byte, use its - * representation inside rustsecp256k1_v0_7_0_ecdsa_signature, as conversion is very fast. - * Note that rustsecp256k1_v0_7_0_ecdsa_signature_save must use the same representation. */ + if (sizeof(rustsecp256k1_v0_8_0_scalar) == 32) { + /* When the rustsecp256k1_v0_8_0_scalar type is exactly 32 byte, use its + * representation inside rustsecp256k1_v0_8_0_ecdsa_signature, as conversion is very fast. + * Note that rustsecp256k1_v0_8_0_ecdsa_signature_save must use the same representation. */ memcpy(r, &sig->data[0], 32); memcpy(s, &sig->data[32], 32); } else { - rustsecp256k1_v0_7_0_scalar_set_b32(r, &sig->data[0], NULL); - rustsecp256k1_v0_7_0_scalar_set_b32(s, &sig->data[32], NULL); + rustsecp256k1_v0_8_0_scalar_set_b32(r, &sig->data[0], NULL); + rustsecp256k1_v0_8_0_scalar_set_b32(s, &sig->data[32], NULL); } *recid = sig->data[64]; } -static void rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_save(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature* sig, const rustsecp256k1_v0_7_0_scalar* r, const rustsecp256k1_v0_7_0_scalar* s, int recid) { - if (sizeof(rustsecp256k1_v0_7_0_scalar) == 32) { +static void rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_save(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature* sig, const rustsecp256k1_v0_8_0_scalar* r, const rustsecp256k1_v0_8_0_scalar* s, int recid) { + if (sizeof(rustsecp256k1_v0_8_0_scalar) == 32) { memcpy(&sig->data[0], r, 32); memcpy(&sig->data[32], s, 32); } else { - rustsecp256k1_v0_7_0_scalar_get_b32(&sig->data[0], r); - rustsecp256k1_v0_7_0_scalar_get_b32(&sig->data[32], s); + rustsecp256k1_v0_8_0_scalar_get_b32(&sig->data[0], r); + rustsecp256k1_v0_8_0_scalar_get_b32(&sig->data[32], s); } sig->data[64] = recid; } -int rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid) { - rustsecp256k1_v0_7_0_scalar r, s; +int rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid) { + rustsecp256k1_v0_8_0_scalar r, s; int ret = 1; int overflow = 0; @@ -45,110 +45,110 @@ int rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(const rustsec ARG_CHECK(input64 != NULL); ARG_CHECK(recid >= 0 && recid <= 3); - rustsecp256k1_v0_7_0_scalar_set_b32(&r, &input64[0], &overflow); + rustsecp256k1_v0_8_0_scalar_set_b32(&r, &input64[0], &overflow); ret &= !overflow; - rustsecp256k1_v0_7_0_scalar_set_b32(&s, &input64[32], &overflow); + rustsecp256k1_v0_8_0_scalar_set_b32(&s, &input64[32], &overflow); ret &= !overflow; if (ret) { - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_save(sig, &r, &s, recid); + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_save(sig, &r, &s, recid); } else { memset(sig, 0, sizeof(*sig)); } return ret; } -int rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_serialize_compact(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *output64, int *recid, const rustsecp256k1_v0_7_0_ecdsa_recoverable_signature* sig) { - rustsecp256k1_v0_7_0_scalar r, s; +int rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_serialize_compact(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output64, int *recid, const rustsecp256k1_v0_8_0_ecdsa_recoverable_signature* sig) { + rustsecp256k1_v0_8_0_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output64 != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(recid != NULL); - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_load(ctx, &r, &s, recid, sig); - rustsecp256k1_v0_7_0_scalar_get_b32(&output64[0], &r); - rustsecp256k1_v0_7_0_scalar_get_b32(&output64[32], &s); + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_load(ctx, &r, &s, recid, sig); + rustsecp256k1_v0_8_0_scalar_get_b32(&output64[0], &r); + rustsecp256k1_v0_8_0_scalar_get_b32(&output64[32], &s); return 1; } -int rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_convert(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_ecdsa_signature* sig, const rustsecp256k1_v0_7_0_ecdsa_recoverable_signature* sigin) { - rustsecp256k1_v0_7_0_scalar r, s; +int rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_convert(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_ecdsa_signature* sig, const rustsecp256k1_v0_8_0_ecdsa_recoverable_signature* sigin) { + rustsecp256k1_v0_8_0_scalar r, s; int recid; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(sigin != NULL); - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, sigin); - rustsecp256k1_v0_7_0_ecdsa_signature_save(sig, &r, &s); + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, sigin); + rustsecp256k1_v0_8_0_ecdsa_signature_save(sig, &r, &s); return 1; } -static int rustsecp256k1_v0_7_0_ecdsa_sig_recover(const rustsecp256k1_v0_7_0_scalar *sigr, const rustsecp256k1_v0_7_0_scalar* sigs, rustsecp256k1_v0_7_0_ge *pubkey, const rustsecp256k1_v0_7_0_scalar *message, int recid) { +static int rustsecp256k1_v0_8_0_ecdsa_sig_recover(const rustsecp256k1_v0_8_0_scalar *sigr, const rustsecp256k1_v0_8_0_scalar* sigs, rustsecp256k1_v0_8_0_ge *pubkey, const rustsecp256k1_v0_8_0_scalar *message, int recid) { unsigned char brx[32]; - rustsecp256k1_v0_7_0_fe fx; - rustsecp256k1_v0_7_0_ge x; - rustsecp256k1_v0_7_0_gej xj; - rustsecp256k1_v0_7_0_scalar rn, u1, u2; - rustsecp256k1_v0_7_0_gej qj; + rustsecp256k1_v0_8_0_fe fx; + rustsecp256k1_v0_8_0_ge x; + rustsecp256k1_v0_8_0_gej xj; + rustsecp256k1_v0_8_0_scalar rn, u1, u2; + rustsecp256k1_v0_8_0_gej qj; int r; - if (rustsecp256k1_v0_7_0_scalar_is_zero(sigr) || rustsecp256k1_v0_7_0_scalar_is_zero(sigs)) { + if (rustsecp256k1_v0_8_0_scalar_is_zero(sigr) || rustsecp256k1_v0_8_0_scalar_is_zero(sigs)) { return 0; } - rustsecp256k1_v0_7_0_scalar_get_b32(brx, sigr); - r = rustsecp256k1_v0_7_0_fe_set_b32(&fx, brx); + rustsecp256k1_v0_8_0_scalar_get_b32(brx, sigr); + r = rustsecp256k1_v0_8_0_fe_set_b32(&fx, brx); (void)r; VERIFY_CHECK(r); /* brx comes from a scalar, so is less than the order; certainly less than p */ if (recid & 2) { - if (rustsecp256k1_v0_7_0_fe_cmp_var(&fx, &rustsecp256k1_v0_7_0_ecdsa_const_p_minus_order) >= 0) { + if (rustsecp256k1_v0_8_0_fe_cmp_var(&fx, &rustsecp256k1_v0_8_0_ecdsa_const_p_minus_order) >= 0) { return 0; } - rustsecp256k1_v0_7_0_fe_add(&fx, &rustsecp256k1_v0_7_0_ecdsa_const_order_as_fe); + rustsecp256k1_v0_8_0_fe_add(&fx, &rustsecp256k1_v0_8_0_ecdsa_const_order_as_fe); } - if (!rustsecp256k1_v0_7_0_ge_set_xo_var(&x, &fx, recid & 1)) { + if (!rustsecp256k1_v0_8_0_ge_set_xo_var(&x, &fx, recid & 1)) { return 0; } - rustsecp256k1_v0_7_0_gej_set_ge(&xj, &x); - rustsecp256k1_v0_7_0_scalar_inverse_var(&rn, sigr); - rustsecp256k1_v0_7_0_scalar_mul(&u1, &rn, message); - rustsecp256k1_v0_7_0_scalar_negate(&u1, &u1); - rustsecp256k1_v0_7_0_scalar_mul(&u2, &rn, sigs); - rustsecp256k1_v0_7_0_ecmult(&qj, &xj, &u2, &u1); - rustsecp256k1_v0_7_0_ge_set_gej_var(pubkey, &qj); - return !rustsecp256k1_v0_7_0_gej_is_infinity(&qj); + rustsecp256k1_v0_8_0_gej_set_ge(&xj, &x); + rustsecp256k1_v0_8_0_scalar_inverse_var(&rn, sigr); + rustsecp256k1_v0_8_0_scalar_mul(&u1, &rn, message); + rustsecp256k1_v0_8_0_scalar_negate(&u1, &u1); + rustsecp256k1_v0_8_0_scalar_mul(&u2, &rn, sigs); + rustsecp256k1_v0_8_0_ecmult(&qj, &xj, &u2, &u1); + rustsecp256k1_v0_8_0_ge_set_gej_var(pubkey, &qj); + return !rustsecp256k1_v0_8_0_gej_is_infinity(&qj); } -int rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_ecdsa_recoverable_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_7_0_nonce_function noncefp, const void* noncedata) { - rustsecp256k1_v0_7_0_scalar r, s; +int rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_ecdsa_recoverable_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_8_0_nonce_function noncefp, const void* noncedata) { + rustsecp256k1_v0_8_0_scalar r, s; int ret, recid; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_7_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_8_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(msghash32 != NULL); ARG_CHECK(signature != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_7_0_ecdsa_sign_inner(ctx, &r, &s, &recid, msghash32, seckey, noncefp, noncedata); - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_save(signature, &r, &s, recid); + ret = rustsecp256k1_v0_8_0_ecdsa_sign_inner(ctx, &r, &s, &recid, msghash32, seckey, noncefp, noncedata); + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_save(signature, &r, &s, recid); return ret; } -int rustsecp256k1_v0_7_0_ecdsa_recover(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_pubkey *pubkey, const rustsecp256k1_v0_7_0_ecdsa_recoverable_signature *signature, const unsigned char *msghash32) { - rustsecp256k1_v0_7_0_ge q; - rustsecp256k1_v0_7_0_scalar r, s; - rustsecp256k1_v0_7_0_scalar m; +int rustsecp256k1_v0_8_0_ecdsa_recover(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_pubkey *pubkey, const rustsecp256k1_v0_8_0_ecdsa_recoverable_signature *signature, const unsigned char *msghash32) { + rustsecp256k1_v0_8_0_ge q; + rustsecp256k1_v0_8_0_scalar r, s; + rustsecp256k1_v0_8_0_scalar m; int recid; VERIFY_CHECK(ctx != NULL); ARG_CHECK(msghash32 != NULL); ARG_CHECK(signature != NULL); ARG_CHECK(pubkey != NULL); - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature); + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature); VERIFY_CHECK(recid >= 0 && recid < 4); /* should have been caught in parse_compact */ - rustsecp256k1_v0_7_0_scalar_set_b32(&m, msghash32, NULL); - if (rustsecp256k1_v0_7_0_ecdsa_sig_recover(&r, &s, &q, &m, recid)) { - rustsecp256k1_v0_7_0_pubkey_save(pubkey, &q); + rustsecp256k1_v0_8_0_scalar_set_b32(&m, msghash32, NULL); + if (rustsecp256k1_v0_8_0_ecdsa_sig_recover(&r, &s, &q, &m, recid)) { + rustsecp256k1_v0_8_0_pubkey_save(pubkey, &q); return 1; } else { memset(pubkey, 0, sizeof(*pubkey)); diff --git a/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_exhaustive_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_exhaustive_impl.h index c4741e98b..f0bea0855 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_exhaustive_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_exhaustive_impl.h @@ -7,10 +7,10 @@ #ifndef SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H #define SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H -#include "src/modules/recovery/main_impl.h" +#include "main_impl.h" #include "../../../include/secp256k1_recovery.h" -void test_exhaustive_recovery_sign(const rustsecp256k1_v0_7_0_context *ctx, const rustsecp256k1_v0_7_0_ge *group) { +void test_exhaustive_recovery_sign(const rustsecp256k1_v0_8_0_context *ctx, const rustsecp256k1_v0_8_0_ge *group) { int i, j, k; uint64_t iter = 0; @@ -20,23 +20,23 @@ void test_exhaustive_recovery_sign(const rustsecp256k1_v0_7_0_context *ctx, cons if (skip_section(&iter)) continue; for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */ const int starting_k = k; - rustsecp256k1_v0_7_0_fe r_dot_y_normalized; - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature rsig; - rustsecp256k1_v0_7_0_ecdsa_signature sig; - rustsecp256k1_v0_7_0_scalar sk, msg, r, s, expected_r; + rustsecp256k1_v0_8_0_fe r_dot_y_normalized; + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature rsig; + rustsecp256k1_v0_8_0_ecdsa_signature sig; + rustsecp256k1_v0_8_0_scalar sk, msg, r, s, expected_r; unsigned char sk32[32], msg32[32]; int expected_recid; int recid; int overflow; - rustsecp256k1_v0_7_0_scalar_set_int(&msg, i); - rustsecp256k1_v0_7_0_scalar_set_int(&sk, j); - rustsecp256k1_v0_7_0_scalar_get_b32(sk32, &sk); - rustsecp256k1_v0_7_0_scalar_get_b32(msg32, &msg); + rustsecp256k1_v0_8_0_scalar_set_int(&msg, i); + rustsecp256k1_v0_8_0_scalar_set_int(&sk, j); + rustsecp256k1_v0_8_0_scalar_get_b32(sk32, &sk); + rustsecp256k1_v0_8_0_scalar_get_b32(msg32, &msg); - rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, rustsecp256k1_v0_7_0_nonce_function_smallint, &k); + rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, rustsecp256k1_v0_8_0_nonce_function_smallint, &k); /* Check directly */ - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig); + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig); r_from_k(&expected_r, group, k, &overflow); CHECK(r == expected_r); CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER || @@ -50,18 +50,18 @@ void test_exhaustive_recovery_sign(const rustsecp256k1_v0_7_0_context *ctx, cons * in the real group. */ expected_recid = overflow ? 2 : 0; r_dot_y_normalized = group[k].y; - rustsecp256k1_v0_7_0_fe_normalize(&r_dot_y_normalized); + rustsecp256k1_v0_8_0_fe_normalize(&r_dot_y_normalized); /* Also the recovery id is flipped depending if we hit the low-s branch */ if ((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER) { - expected_recid |= rustsecp256k1_v0_7_0_fe_is_odd(&r_dot_y_normalized); + expected_recid |= rustsecp256k1_v0_8_0_fe_is_odd(&r_dot_y_normalized); } else { - expected_recid |= !rustsecp256k1_v0_7_0_fe_is_odd(&r_dot_y_normalized); + expected_recid |= !rustsecp256k1_v0_8_0_fe_is_odd(&r_dot_y_normalized); } CHECK(recid == expected_recid); /* Convert to a standard sig then check */ - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); - rustsecp256k1_v0_7_0_ecdsa_signature_load(ctx, &r, &s, &sig); + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); + rustsecp256k1_v0_8_0_ecdsa_signature_load(ctx, &r, &s, &sig); /* Note that we compute expected_r *after* signing -- this is important * because our nonce-computing function function might change k during * signing. */ @@ -79,7 +79,7 @@ void test_exhaustive_recovery_sign(const rustsecp256k1_v0_7_0_context *ctx, cons } } -void test_exhaustive_recovery_verify(const rustsecp256k1_v0_7_0_context *ctx, const rustsecp256k1_v0_7_0_ge *group) { +void test_exhaustive_recovery_verify(const rustsecp256k1_v0_8_0_context *ctx, const rustsecp256k1_v0_8_0_ge *group) { /* This is essentially a copy of test_exhaustive_verify, with recovery added */ int s, r, msg, key; uint64_t iter = 0; @@ -87,41 +87,41 @@ void test_exhaustive_recovery_verify(const rustsecp256k1_v0_7_0_context *ctx, co for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) { for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) { for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) { - rustsecp256k1_v0_7_0_ge nonconst_ge; - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature rsig; - rustsecp256k1_v0_7_0_ecdsa_signature sig; - rustsecp256k1_v0_7_0_pubkey pk; - rustsecp256k1_v0_7_0_scalar sk_s, msg_s, r_s, s_s; - rustsecp256k1_v0_7_0_scalar s_times_k_s, msg_plus_r_times_sk_s; + rustsecp256k1_v0_8_0_ge nonconst_ge; + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature rsig; + rustsecp256k1_v0_8_0_ecdsa_signature sig; + rustsecp256k1_v0_8_0_pubkey pk; + rustsecp256k1_v0_8_0_scalar sk_s, msg_s, r_s, s_s; + rustsecp256k1_v0_8_0_scalar s_times_k_s, msg_plus_r_times_sk_s; int recid = 0; int k, should_verify; unsigned char msg32[32]; if (skip_section(&iter)) continue; - rustsecp256k1_v0_7_0_scalar_set_int(&s_s, s); - rustsecp256k1_v0_7_0_scalar_set_int(&r_s, r); - rustsecp256k1_v0_7_0_scalar_set_int(&msg_s, msg); - rustsecp256k1_v0_7_0_scalar_set_int(&sk_s, key); - rustsecp256k1_v0_7_0_scalar_get_b32(msg32, &msg_s); + rustsecp256k1_v0_8_0_scalar_set_int(&s_s, s); + rustsecp256k1_v0_8_0_scalar_set_int(&r_s, r); + rustsecp256k1_v0_8_0_scalar_set_int(&msg_s, msg); + rustsecp256k1_v0_8_0_scalar_set_int(&sk_s, key); + rustsecp256k1_v0_8_0_scalar_get_b32(msg32, &msg_s); /* Verify by hand */ /* Run through every k value that gives us this r and check that *one* works. * Note there could be none, there could be multiple, ECDSA is weird. */ should_verify = 0; for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) { - rustsecp256k1_v0_7_0_scalar check_x_s; + rustsecp256k1_v0_8_0_scalar check_x_s; r_from_k(&check_x_s, group, k, NULL); if (r_s == check_x_s) { - rustsecp256k1_v0_7_0_scalar_set_int(&s_times_k_s, k); - rustsecp256k1_v0_7_0_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); - rustsecp256k1_v0_7_0_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); - rustsecp256k1_v0_7_0_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); - should_verify |= rustsecp256k1_v0_7_0_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); + rustsecp256k1_v0_8_0_scalar_set_int(&s_times_k_s, k); + rustsecp256k1_v0_8_0_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); + rustsecp256k1_v0_8_0_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); + rustsecp256k1_v0_8_0_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); + should_verify |= rustsecp256k1_v0_8_0_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); } } /* nb we have a "high s" rule */ - should_verify &= !rustsecp256k1_v0_7_0_scalar_is_high(&s_s); + should_verify &= !rustsecp256k1_v0_8_0_scalar_is_high(&s_s); /* We would like to try recovering the pubkey and checking that it matches, * but pubkey recovery is impossible in the exhaustive tests (the reason @@ -129,19 +129,19 @@ void test_exhaustive_recovery_verify(const rustsecp256k1_v0_7_0_context *ctx, co * overlap between the sets, so there are no valid signatures). */ /* Verify by converting to a standard signature and calling verify */ - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid); - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid); + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); - rustsecp256k1_v0_7_0_pubkey_save(&pk, &nonconst_ge); + rustsecp256k1_v0_8_0_pubkey_save(&pk, &nonconst_ge); CHECK(should_verify == - rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &sig, msg32, &pk)); + rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &sig, msg32, &pk)); } } } } } -static void test_exhaustive_recovery(const rustsecp256k1_v0_7_0_context *ctx, const rustsecp256k1_v0_7_0_ge *group) { +static void test_exhaustive_recovery(const rustsecp256k1_v0_8_0_context *ctx, const rustsecp256k1_v0_8_0_ge *group) { test_exhaustive_recovery_sign(ctx, group); test_exhaustive_recovery_verify(ctx, group); } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_impl.h index 53bdddcca..d14b444eb 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/recovery/tests_impl.h @@ -25,20 +25,16 @@ static int recovery_test_nonce_function(unsigned char *nonce32, const unsigned c } /* On the next run, return a valid nonce, but flip a coin as to whether or not to fail signing. */ memset(nonce32, 1, 32); - return rustsecp256k1_v0_7_0_testrand_bits(1); + return rustsecp256k1_v0_8_0_testrand_bits(1); } void test_ecdsa_recovery_api(void) { /* Setup contexts that just count errors */ - rustsecp256k1_v0_7_0_context *none = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_NONE); - rustsecp256k1_v0_7_0_context *sign = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_SIGN); - rustsecp256k1_v0_7_0_context *vrfy = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_VERIFY); - rustsecp256k1_v0_7_0_context *both = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - rustsecp256k1_v0_7_0_context *sttc = rustsecp256k1_v0_7_0_context_clone(rustsecp256k1_v0_7_0_context_no_precomp); - rustsecp256k1_v0_7_0_pubkey pubkey; - rustsecp256k1_v0_7_0_pubkey recpubkey; - rustsecp256k1_v0_7_0_ecdsa_signature normal_sig; - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature recsig; + rustsecp256k1_v0_8_0_context *sttc = rustsecp256k1_v0_8_0_context_clone(rustsecp256k1_v0_8_0_context_static); + rustsecp256k1_v0_8_0_pubkey pubkey; + rustsecp256k1_v0_8_0_pubkey recpubkey; + rustsecp256k1_v0_8_0_ecdsa_signature normal_sig; + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature recsig; unsigned char privkey[32] = { 1 }; unsigned char message[32] = { 2 }; int32_t ecount = 0; @@ -50,165 +46,143 @@ void test_ecdsa_recovery_api(void) { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - rustsecp256k1_v0_7_0_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_error_callback(both, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_error_callback(sttc, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_illegal_callback(sttc, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_8_0_context_set_error_callback(ctx, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_8_0_context_set_error_callback(sttc, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_8_0_context_set_illegal_callback(sttc, counting_illegal_callback_fn, &ecount); /* Construct and verify corresponding public key. */ - CHECK(rustsecp256k1_v0_7_0_ec_seckey_verify(ctx, privkey) == 1); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey, privkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, privkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey, privkey) == 1); /* Check bad contexts and NULLs for signing */ ecount = 0; - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(none, &recsig, message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(ctx, &recsig, message, privkey, NULL, NULL) == 1); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(sign, &recsig, message, privkey, NULL, NULL) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(vrfy, &recsig, message, privkey, NULL, NULL) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(both, NULL, message, privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(ctx, NULL, message, privkey, NULL, NULL) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(both, &recsig, NULL, privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(ctx, &recsig, NULL, privkey, NULL, NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(both, &recsig, message, NULL, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(ctx, &recsig, message, NULL, NULL, NULL) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(sttc, &recsig, message, privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(sttc, &recsig, message, privkey, NULL, NULL) == 0); CHECK(ecount == 4); /* This will fail or succeed randomly, and in either case will not ARG_CHECK failure */ - rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, recovery_test_nonce_function, NULL); + rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(ctx, &recsig, message, privkey, recovery_test_nonce_function, NULL); CHECK(ecount == 4); /* These will all fail, but not in ARG_CHECK way */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(both, &recsig, message, zero_privkey, NULL, NULL) == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(both, &recsig, message, over_privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(ctx, &recsig, message, zero_privkey, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(ctx, &recsig, message, over_privkey, NULL, NULL) == 0); /* This one will succeed. */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(ctx, &recsig, message, privkey, NULL, NULL) == 1); CHECK(ecount == 4); /* Check signing with a goofy nonce function */ /* Check bad contexts and NULLs for recovery */ ecount = 0; - CHECK(rustsecp256k1_v0_7_0_ecdsa_recover(none, &recpubkey, &recsig, message) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recover(sign, &recpubkey, &recsig, message) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recover(vrfy, &recpubkey, &recsig, message) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recover(both, &recpubkey, &recsig, message) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recover(ctx, &recpubkey, &recsig, message) == 1); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recover(both, NULL, &recsig, message) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recover(ctx, NULL, &recsig, message) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recover(both, &recpubkey, NULL, message) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recover(ctx, &recpubkey, NULL, message) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recover(both, &recpubkey, &recsig, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recover(ctx, &recpubkey, &recsig, NULL) == 0); CHECK(ecount == 3); /* Check NULLs for conversion */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(both, &normal_sig, message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &normal_sig, message, privkey, NULL, NULL) == 1); ecount = 0; - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_convert(both, NULL, &recsig) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_convert(ctx, NULL, &recsig) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_convert(both, &normal_sig, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_convert(ctx, &normal_sig, NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_convert(both, &normal_sig, &recsig) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_convert(ctx, &normal_sig, &recsig) == 1); /* Check NULLs for de/serialization */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(ctx, &recsig, message, privkey, NULL, NULL) == 1); ecount = 0; - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_serialize_compact(both, NULL, &recid, &recsig) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_serialize_compact(ctx, NULL, &recid, &recsig) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_serialize_compact(both, sig, NULL, &recsig) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, NULL, &recsig) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, NULL) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, &recsig) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &recsig) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(both, NULL, sig, recid) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(ctx, NULL, sig, recid) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, NULL, recid) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(ctx, &recsig, NULL, recid) == 0); CHECK(ecount == 5); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, -1) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(ctx, &recsig, sig, -1) == 0); CHECK(ecount == 6); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, 5) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(ctx, &recsig, sig, 5) == 0); CHECK(ecount == 7); /* overflow in signature will fail but not affect ecount */ memcpy(sig, over_privkey, 32); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, recid) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(ctx, &recsig, sig, recid) == 0); CHECK(ecount == 7); /* cleanup */ - rustsecp256k1_v0_7_0_context_destroy(none); - rustsecp256k1_v0_7_0_context_destroy(sign); - rustsecp256k1_v0_7_0_context_destroy(vrfy); - rustsecp256k1_v0_7_0_context_destroy(both); - rustsecp256k1_v0_7_0_context_destroy(sttc); + rustsecp256k1_v0_8_0_context_destroy(sttc); } void test_ecdsa_recovery_end_to_end(void) { unsigned char extra[32] = {0x00}; unsigned char privkey[32]; unsigned char message[32]; - rustsecp256k1_v0_7_0_ecdsa_signature signature[5]; - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature rsignature[5]; + rustsecp256k1_v0_8_0_ecdsa_signature signature[5]; + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature rsignature[5]; unsigned char sig[74]; - rustsecp256k1_v0_7_0_pubkey pubkey; - rustsecp256k1_v0_7_0_pubkey recpubkey; + rustsecp256k1_v0_8_0_pubkey pubkey; + rustsecp256k1_v0_8_0_pubkey recpubkey; int recid = 0; /* Generate a random key and message. */ { - rustsecp256k1_v0_7_0_scalar msg, key; + rustsecp256k1_v0_8_0_scalar msg, key; random_scalar_order_test(&msg); random_scalar_order_test(&key); - rustsecp256k1_v0_7_0_scalar_get_b32(privkey, &key); - rustsecp256k1_v0_7_0_scalar_get_b32(message, &msg); + rustsecp256k1_v0_8_0_scalar_get_b32(privkey, &key); + rustsecp256k1_v0_8_0_scalar_get_b32(message, &msg); } /* Construct and verify corresponding public key. */ - CHECK(rustsecp256k1_v0_7_0_ec_seckey_verify(ctx, privkey) == 1); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey, privkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, privkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey, privkey) == 1); /* Serialize/parse compact and verify/recover. */ extra[0] = 0; - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(ctx, &rsignature[0], message, privkey, NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(ctx, &rsignature[4], message, privkey, NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(ctx, &rsignature[1], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(ctx, &rsignature[0], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(ctx, &rsignature[4], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(ctx, &rsignature[1], message, privkey, NULL, extra) == 1); extra[31] = 1; - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(ctx, &rsignature[2], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(ctx, &rsignature[2], message, privkey, NULL, extra) == 1); extra[31] = 0; extra[0] = 1; - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&signature[4], &signature[0], 64) == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&signature[4], &signature[0], 64) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1); memset(&rsignature[4], 0, sizeof(rsignature[4])); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1); /* Parse compact (with recovery id) and recover. */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) == 0); /* Serialize/destroy/parse signature and verify again. */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1); - sig[rustsecp256k1_v0_7_0_testrand_bits(6)] += 1 + rustsecp256k1_v0_7_0_testrand_int(255); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1); + sig[rustsecp256k1_v0_8_0_testrand_bits(6)] += 1 + rustsecp256k1_v0_8_0_testrand_int(255); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0); /* Recover again */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 || - rustsecp256k1_v0_7_0_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) != 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 || + rustsecp256k1_v0_8_0_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) != 0); } /* Tests several edge cases. */ @@ -231,7 +205,7 @@ void test_ecdsa_recovery_edge_cases(void) { 0x7D, 0xD7, 0x3E, 0x38, 0x7E, 0xE4, 0xFC, 0x86, 0x6E, 0x1B, 0xE8, 0xEC, 0xC7, 0xDD, 0x95, 0x57 }; - rustsecp256k1_v0_7_0_pubkey pubkey; + rustsecp256k1_v0_8_0_pubkey pubkey; /* signature (r,s) = (4,4), which can be recovered with all 4 recids. */ const unsigned char sigb64[64] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -243,19 +217,19 @@ void test_ecdsa_recovery_edge_cases(void) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, }; - rustsecp256k1_v0_7_0_pubkey pubkeyb; - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature rsig; - rustsecp256k1_v0_7_0_ecdsa_signature sig; + rustsecp256k1_v0_8_0_pubkey pubkeyb; + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature rsig; + rustsecp256k1_v0_8_0_ecdsa_signature sig; int recid; - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 0)); - CHECK(!rustsecp256k1_v0_7_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 1)); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 2)); - CHECK(!rustsecp256k1_v0_7_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 3)); - CHECK(!rustsecp256k1_v0_7_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 0)); + CHECK(!rustsecp256k1_v0_8_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 1)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 2)); + CHECK(!rustsecp256k1_v0_8_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 3)); + CHECK(!rustsecp256k1_v0_8_0_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); for (recid = 0; recid < 4; recid++) { int i; @@ -300,40 +274,40 @@ void test_ecdsa_recovery_edge_cases(void) { 0xE6, 0xAF, 0x48, 0xA0, 0x3B, 0xBF, 0xD2, 0x5E, 0x8C, 0xD0, 0x36, 0x41, 0x45, 0x02, 0x01, 0x04 }; - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1); for (recid2 = 0; recid2 < 4; recid2++) { - rustsecp256k1_v0_7_0_pubkey pubkey2b; - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid2) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recover(ctx, &pubkey2b, &rsig, msg32) == 1); + rustsecp256k1_v0_8_0_pubkey pubkey2b; + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid2) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recover(ctx, &pubkey2b, &rsig, msg32) == 1); /* Verifying with (order + r,4) should always fail. */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, sigbderlong, sizeof(sigbderlong)) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, sigbderlong, sizeof(sigbderlong)) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); } /* DER parsing tests. */ /* Zero length r/s. */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, sigcder_zr, sizeof(sigcder_zr)) == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, sigcder_zs, sizeof(sigcder_zs)) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, sigcder_zr, sizeof(sigcder_zr)) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, sigcder_zs, sizeof(sigcder_zs)) == 0); /* Leading zeros. */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt1, sizeof(sigbderalt1)) == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt2, sizeof(sigbderalt2)) == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt1, sizeof(sigbderalt1)) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt2, sizeof(sigbderalt2)) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 0); sigbderalt3[4] = 1; - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); sigbderalt4[7] = 1; - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); /* Damage signature. */ sigbder[7]++; - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); sigbder[7]--; - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, 6) == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder) - 1) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, 6) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder) - 1) == 0); for(i = 0; i < 8; i++) { int c; unsigned char orig = sigbder[i]; @@ -343,7 +317,7 @@ void test_ecdsa_recovery_edge_cases(void) { continue; } sigbder[i] = c; - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 0 || rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 0 || rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); } sigbder[i] = orig; } @@ -363,25 +337,25 @@ void test_ecdsa_recovery_edge_cases(void) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, }; - rustsecp256k1_v0_7_0_pubkey pubkeyc; - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recover(ctx, &pubkeyc, &rsig, msg32) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 1); + rustsecp256k1_v0_8_0_pubkey pubkeyc; + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recover(ctx, &pubkeyc, &rsig, msg32) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 1); sigcder[4] = 0; sigc64[31] = 0; - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0); sigcder[4] = 1; sigcder[7] = 0; sigc64[31] = 1; sigc64[63] = 0; - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0); } } diff --git a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/Makefile.am.include b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/Makefile.am.include index 2f142abe6..6d2e5fce6 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/Makefile.am.include +++ b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/Makefile.am.include @@ -1,4 +1,4 @@ -include_HEADERS += include/rustsecp256k1_v0_7_0_schnorrsig.h +include_HEADERS += include/rustsecp256k1_v0_8_0_schnorrsig.h noinst_HEADERS += src/modules/schnorrsig/main_impl.h noinst_HEADERS += src/modules/schnorrsig/tests_impl.h noinst_HEADERS += src/modules/schnorrsig/tests_exhaustive_impl.h diff --git a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/bench_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/bench_impl.h index 003d01c04..ebd124a17 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/bench_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/bench_impl.h @@ -12,10 +12,10 @@ #define MSGLEN 32 typedef struct { - rustsecp256k1_v0_7_0_context *ctx; + rustsecp256k1_v0_8_0_context *ctx; int n; - const rustsecp256k1_v0_7_0_keypair **keypairs; + const rustsecp256k1_v0_8_0_keypair **keypairs; const unsigned char **pk; const unsigned char **sigs; const unsigned char **msgs; @@ -30,7 +30,7 @@ void bench_schnorrsig_sign(void* arg, int iters) { for (i = 0; i < iters; i++) { msg[0] = i; msg[1] = i >> 8; - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(data->ctx, sig, msg, MSGLEN, data->keypairs[i], NULL)); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(data->ctx, sig, msg, MSGLEN, data->keypairs[i], NULL)); } } @@ -39,9 +39,9 @@ void bench_schnorrsig_verify(void* arg, int iters) { int i; for (i = 0; i < iters; i++) { - rustsecp256k1_v0_7_0_xonly_pubkey pk; - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_parse(data->ctx, &pk, data->pk[i]) == 1); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(data->ctx, data->sigs[i], data->msgs[i], MSGLEN, &pk)); + rustsecp256k1_v0_8_0_xonly_pubkey pk; + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_parse(data->ctx, &pk, data->pk[i]) == 1); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(data->ctx, data->sigs[i], data->msgs[i], MSGLEN, &pk)); } } @@ -50,8 +50,8 @@ void run_schnorrsig_bench(int iters, int argc, char** argv) { bench_schnorrsig_data data; int d = argc == 1; - data.ctx = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_VERIFY | SECP256K1_CONTEXT_SIGN); - data.keypairs = (const rustsecp256k1_v0_7_0_keypair **)malloc(iters * sizeof(rustsecp256k1_v0_7_0_keypair *)); + data.ctx = rustsecp256k1_v0_8_0_context_create(SECP256K1_CONTEXT_NONE); + data.keypairs = (const rustsecp256k1_v0_8_0_keypair **)malloc(iters * sizeof(rustsecp256k1_v0_8_0_keypair *)); data.pk = (const unsigned char **)malloc(iters * sizeof(unsigned char *)); data.msgs = (const unsigned char **)malloc(iters * sizeof(unsigned char *)); data.sigs = (const unsigned char **)malloc(iters * sizeof(unsigned char *)); @@ -61,9 +61,9 @@ void run_schnorrsig_bench(int iters, int argc, char** argv) { unsigned char sk[32]; unsigned char *msg = (unsigned char *)malloc(MSGLEN); unsigned char *sig = (unsigned char *)malloc(64); - rustsecp256k1_v0_7_0_keypair *keypair = (rustsecp256k1_v0_7_0_keypair *)malloc(sizeof(*keypair)); + rustsecp256k1_v0_8_0_keypair *keypair = (rustsecp256k1_v0_8_0_keypair *)malloc(sizeof(*keypair)); unsigned char *pk_char = (unsigned char *)malloc(32); - rustsecp256k1_v0_7_0_xonly_pubkey pk; + rustsecp256k1_v0_8_0_xonly_pubkey pk; msg[0] = sk[0] = i; msg[1] = sk[1] = i >> 8; msg[2] = sk[2] = i >> 16; @@ -76,10 +76,10 @@ void run_schnorrsig_bench(int iters, int argc, char** argv) { data.msgs[i] = msg; data.sigs[i] = sig; - CHECK(rustsecp256k1_v0_7_0_keypair_create(data.ctx, keypair, sk)); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(data.ctx, sig, msg, MSGLEN, keypair, NULL)); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(data.ctx, &pk, NULL, keypair)); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(data.ctx, pk_char, &pk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_create(data.ctx, keypair, sk)); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(data.ctx, sig, msg, MSGLEN, keypair, NULL)); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(data.ctx, &pk, NULL, keypair)); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(data.ctx, pk_char, &pk) == 1); } if (d || have_flag(argc, argv, "schnorrsig") || have_flag(argc, argv, "sign") || have_flag(argc, argv, "schnorrsig_sign")) run_benchmark("schnorrsig_sign", bench_schnorrsig_sign, NULL, NULL, (void *) &data, 10, iters); @@ -91,12 +91,14 @@ void run_schnorrsig_bench(int iters, int argc, char** argv) { free((void *)data.msgs[i]); free((void *)data.sigs[i]); } - free(data.keypairs); - free(data.pk); - free(data.msgs); - free(data.sigs); - rustsecp256k1_v0_7_0_context_destroy(data.ctx); + /* Casting to (void *) avoids a stupid warning in MSVC. */ + free((void *)data.keypairs); + free((void *)data.pk); + free((void *)data.msgs); + free((void *)data.sigs); + + rustsecp256k1_v0_8_0_context_destroy(data.ctx); } #endif diff --git a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/main_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/main_impl.h index 78c1742d5..7d59e3c80 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/main_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/main_impl.h @@ -13,8 +13,8 @@ /* Initializes SHA256 with fixed midstate. This midstate was computed by applying * SHA256 to SHA256("BIP0340/nonce")||SHA256("BIP0340/nonce"). */ -static void rustsecp256k1_v0_7_0_nonce_function_bip340_sha256_tagged(rustsecp256k1_v0_7_0_sha256 *sha) { - rustsecp256k1_v0_7_0_sha256_initialize(sha); +static void rustsecp256k1_v0_8_0_nonce_function_bip340_sha256_tagged(rustsecp256k1_v0_8_0_sha256 *sha) { + rustsecp256k1_v0_8_0_sha256_initialize(sha); sha->s[0] = 0x46615b35ul; sha->s[1] = 0xf4bfbff7ul; sha->s[2] = 0x9f8dc671ul; @@ -29,8 +29,8 @@ static void rustsecp256k1_v0_7_0_nonce_function_bip340_sha256_tagged(rustsecp256 /* Initializes SHA256 with fixed midstate. This midstate was computed by applying * SHA256 to SHA256("BIP0340/aux")||SHA256("BIP0340/aux"). */ -static void rustsecp256k1_v0_7_0_nonce_function_bip340_sha256_tagged_aux(rustsecp256k1_v0_7_0_sha256 *sha) { - rustsecp256k1_v0_7_0_sha256_initialize(sha); +static void rustsecp256k1_v0_8_0_nonce_function_bip340_sha256_tagged_aux(rustsecp256k1_v0_8_0_sha256 *sha) { + rustsecp256k1_v0_8_0_sha256_initialize(sha); sha->s[0] = 0x24dd3219ul; sha->s[1] = 0x4eba7e70ul; sha->s[2] = 0xca0fabb9ul; @@ -50,7 +50,7 @@ static const unsigned char bip340_algo[13] = "BIP0340/nonce"; static const unsigned char schnorrsig_extraparams_magic[4] = SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC; static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *msg, size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo, size_t algolen, void *data) { - rustsecp256k1_v0_7_0_sha256 sha; + rustsecp256k1_v0_8_0_sha256 sha; unsigned char masked_key[32]; int i; @@ -59,9 +59,9 @@ static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *ms } if (data != NULL) { - rustsecp256k1_v0_7_0_nonce_function_bip340_sha256_tagged_aux(&sha); - rustsecp256k1_v0_7_0_sha256_write(&sha, data, 32); - rustsecp256k1_v0_7_0_sha256_finalize(&sha, masked_key); + rustsecp256k1_v0_8_0_nonce_function_bip340_sha256_tagged_aux(&sha); + rustsecp256k1_v0_8_0_sha256_write(&sha, data, 32); + rustsecp256k1_v0_8_0_sha256_finalize(&sha, masked_key); for (i = 0; i < 32; i++) { masked_key[i] ^= key32[i]; } @@ -82,26 +82,26 @@ static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *ms * algorithms. If this nonce function is used in BIP-340 signing as defined * in the spec, an optimized tagging implementation is used. */ if (algolen == sizeof(bip340_algo) - && rustsecp256k1_v0_7_0_memcmp_var(algo, bip340_algo, algolen) == 0) { - rustsecp256k1_v0_7_0_nonce_function_bip340_sha256_tagged(&sha); + && rustsecp256k1_v0_8_0_memcmp_var(algo, bip340_algo, algolen) == 0) { + rustsecp256k1_v0_8_0_nonce_function_bip340_sha256_tagged(&sha); } else { - rustsecp256k1_v0_7_0_sha256_initialize_tagged(&sha, algo, algolen); + rustsecp256k1_v0_8_0_sha256_initialize_tagged(&sha, algo, algolen); } /* Hash masked-key||pk||msg using the tagged hash as per the spec */ - rustsecp256k1_v0_7_0_sha256_write(&sha, masked_key, 32); - rustsecp256k1_v0_7_0_sha256_write(&sha, xonly_pk32, 32); - rustsecp256k1_v0_7_0_sha256_write(&sha, msg, msglen); - rustsecp256k1_v0_7_0_sha256_finalize(&sha, nonce32); + rustsecp256k1_v0_8_0_sha256_write(&sha, masked_key, 32); + rustsecp256k1_v0_8_0_sha256_write(&sha, xonly_pk32, 32); + rustsecp256k1_v0_8_0_sha256_write(&sha, msg, msglen); + rustsecp256k1_v0_8_0_sha256_finalize(&sha, nonce32); return 1; } -const rustsecp256k1_v0_7_0_nonce_function_hardened rustsecp256k1_v0_7_0_nonce_function_bip340 = nonce_function_bip340; +const rustsecp256k1_v0_8_0_nonce_function_hardened rustsecp256k1_v0_8_0_nonce_function_bip340 = nonce_function_bip340; /* Initializes SHA256 with fixed midstate. This midstate was computed by applying * SHA256 to SHA256("BIP0340/challenge")||SHA256("BIP0340/challenge"). */ -static void rustsecp256k1_v0_7_0_schnorrsig_sha256_tagged(rustsecp256k1_v0_7_0_sha256 *sha) { - rustsecp256k1_v0_7_0_sha256_initialize(sha); +static void rustsecp256k1_v0_8_0_schnorrsig_sha256_tagged(rustsecp256k1_v0_8_0_sha256 *sha) { + rustsecp256k1_v0_8_0_sha256_initialize(sha); sha->s[0] = 0x9cecba11ul; sha->s[1] = 0x23925381ul; sha->s[2] = 0x11679112ul; @@ -113,113 +113,117 @@ static void rustsecp256k1_v0_7_0_schnorrsig_sha256_tagged(rustsecp256k1_v0_7_0_s sha->bytes = 64; } -static void rustsecp256k1_v0_7_0_schnorrsig_challenge(rustsecp256k1_v0_7_0_scalar* e, const unsigned char *r32, const unsigned char *msg, size_t msglen, const unsigned char *pubkey32) +static void rustsecp256k1_v0_8_0_schnorrsig_challenge(rustsecp256k1_v0_8_0_scalar* e, const unsigned char *r32, const unsigned char *msg, size_t msglen, const unsigned char *pubkey32) { unsigned char buf[32]; - rustsecp256k1_v0_7_0_sha256 sha; + rustsecp256k1_v0_8_0_sha256 sha; /* tagged hash(r.x, pk.x, msg) */ - rustsecp256k1_v0_7_0_schnorrsig_sha256_tagged(&sha); - rustsecp256k1_v0_7_0_sha256_write(&sha, r32, 32); - rustsecp256k1_v0_7_0_sha256_write(&sha, pubkey32, 32); - rustsecp256k1_v0_7_0_sha256_write(&sha, msg, msglen); - rustsecp256k1_v0_7_0_sha256_finalize(&sha, buf); + rustsecp256k1_v0_8_0_schnorrsig_sha256_tagged(&sha); + rustsecp256k1_v0_8_0_sha256_write(&sha, r32, 32); + rustsecp256k1_v0_8_0_sha256_write(&sha, pubkey32, 32); + rustsecp256k1_v0_8_0_sha256_write(&sha, msg, msglen); + rustsecp256k1_v0_8_0_sha256_finalize(&sha, buf); /* Set scalar e to the challenge hash modulo the curve order as per * BIP340. */ - rustsecp256k1_v0_7_0_scalar_set_b32(e, buf, NULL); + rustsecp256k1_v0_8_0_scalar_set_b32(e, buf, NULL); } -static int rustsecp256k1_v0_7_0_schnorrsig_sign_internal(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *sig64, const unsigned char *msg, size_t msglen, const rustsecp256k1_v0_7_0_keypair *keypair, rustsecp256k1_v0_7_0_nonce_function_hardened noncefp, void *ndata) { - rustsecp256k1_v0_7_0_scalar sk; - rustsecp256k1_v0_7_0_scalar e; - rustsecp256k1_v0_7_0_scalar k; - rustsecp256k1_v0_7_0_gej rj; - rustsecp256k1_v0_7_0_ge pk; - rustsecp256k1_v0_7_0_ge r; +static int rustsecp256k1_v0_8_0_schnorrsig_sign_internal(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *sig64, const unsigned char *msg, size_t msglen, const rustsecp256k1_v0_8_0_keypair *keypair, rustsecp256k1_v0_8_0_nonce_function_hardened noncefp, void *ndata) { + rustsecp256k1_v0_8_0_scalar sk; + rustsecp256k1_v0_8_0_scalar e; + rustsecp256k1_v0_8_0_scalar k; + rustsecp256k1_v0_8_0_gej rj; + rustsecp256k1_v0_8_0_ge pk; + rustsecp256k1_v0_8_0_ge r; unsigned char buf[32] = { 0 }; unsigned char pk_buf[32]; unsigned char seckey[32]; int ret = 1; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_7_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_8_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(sig64 != NULL); ARG_CHECK(msg != NULL || msglen == 0); ARG_CHECK(keypair != NULL); if (noncefp == NULL) { - noncefp = rustsecp256k1_v0_7_0_nonce_function_bip340; + noncefp = rustsecp256k1_v0_8_0_nonce_function_bip340; } - ret &= rustsecp256k1_v0_7_0_keypair_load(ctx, &sk, &pk, keypair); + ret &= rustsecp256k1_v0_8_0_keypair_load(ctx, &sk, &pk, keypair); /* Because we are signing for a x-only pubkey, the secret key is negated * before signing if the point corresponding to the secret key does not * have an even Y. */ - if (rustsecp256k1_v0_7_0_fe_is_odd(&pk.y)) { - rustsecp256k1_v0_7_0_scalar_negate(&sk, &sk); + if (rustsecp256k1_v0_8_0_fe_is_odd(&pk.y)) { + rustsecp256k1_v0_8_0_scalar_negate(&sk, &sk); } - rustsecp256k1_v0_7_0_scalar_get_b32(seckey, &sk); - rustsecp256k1_v0_7_0_fe_get_b32(pk_buf, &pk.x); + rustsecp256k1_v0_8_0_scalar_get_b32(seckey, &sk); + rustsecp256k1_v0_8_0_fe_get_b32(pk_buf, &pk.x); ret &= !!noncefp(buf, msg, msglen, seckey, pk_buf, bip340_algo, sizeof(bip340_algo), ndata); - rustsecp256k1_v0_7_0_scalar_set_b32(&k, buf, NULL); - ret &= !rustsecp256k1_v0_7_0_scalar_is_zero(&k); - rustsecp256k1_v0_7_0_scalar_cmov(&k, &rustsecp256k1_v0_7_0_scalar_one, !ret); + rustsecp256k1_v0_8_0_scalar_set_b32(&k, buf, NULL); + ret &= !rustsecp256k1_v0_8_0_scalar_is_zero(&k); + rustsecp256k1_v0_8_0_scalar_cmov(&k, &rustsecp256k1_v0_8_0_scalar_one, !ret); - rustsecp256k1_v0_7_0_ecmult_gen(&ctx->ecmult_gen_ctx, &rj, &k); - rustsecp256k1_v0_7_0_ge_set_gej(&r, &rj); + rustsecp256k1_v0_8_0_ecmult_gen(&ctx->ecmult_gen_ctx, &rj, &k); + rustsecp256k1_v0_8_0_ge_set_gej(&r, &rj); /* We declassify r to allow using it as a branch point. This is fine * because r is not a secret. */ - rustsecp256k1_v0_7_0_declassify(ctx, &r, sizeof(r)); - rustsecp256k1_v0_7_0_fe_normalize_var(&r.y); - if (rustsecp256k1_v0_7_0_fe_is_odd(&r.y)) { - rustsecp256k1_v0_7_0_scalar_negate(&k, &k); + rustsecp256k1_v0_8_0_declassify(ctx, &r, sizeof(r)); + rustsecp256k1_v0_8_0_fe_normalize_var(&r.y); + if (rustsecp256k1_v0_8_0_fe_is_odd(&r.y)) { + rustsecp256k1_v0_8_0_scalar_negate(&k, &k); } - rustsecp256k1_v0_7_0_fe_normalize_var(&r.x); - rustsecp256k1_v0_7_0_fe_get_b32(&sig64[0], &r.x); + rustsecp256k1_v0_8_0_fe_normalize_var(&r.x); + rustsecp256k1_v0_8_0_fe_get_b32(&sig64[0], &r.x); - rustsecp256k1_v0_7_0_schnorrsig_challenge(&e, &sig64[0], msg, msglen, pk_buf); - rustsecp256k1_v0_7_0_scalar_mul(&e, &e, &sk); - rustsecp256k1_v0_7_0_scalar_add(&e, &e, &k); - rustsecp256k1_v0_7_0_scalar_get_b32(&sig64[32], &e); + rustsecp256k1_v0_8_0_schnorrsig_challenge(&e, &sig64[0], msg, msglen, pk_buf); + rustsecp256k1_v0_8_0_scalar_mul(&e, &e, &sk); + rustsecp256k1_v0_8_0_scalar_add(&e, &e, &k); + rustsecp256k1_v0_8_0_scalar_get_b32(&sig64[32], &e); - rustsecp256k1_v0_7_0_memczero(sig64, 64, !ret); - rustsecp256k1_v0_7_0_scalar_clear(&k); - rustsecp256k1_v0_7_0_scalar_clear(&sk); + rustsecp256k1_v0_8_0_memczero(sig64, 64, !ret); + rustsecp256k1_v0_8_0_scalar_clear(&k); + rustsecp256k1_v0_8_0_scalar_clear(&sk); memset(seckey, 0, sizeof(seckey)); return ret; } -int rustsecp256k1_v0_7_0_schnorrsig_sign(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1_v0_7_0_keypair *keypair, const unsigned char *aux_rand32) { +int rustsecp256k1_v0_8_0_schnorrsig_sign32(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1_v0_8_0_keypair *keypair, const unsigned char *aux_rand32) { /* We cast away const from the passed aux_rand32 argument since we know the default nonce function does not modify it. */ - return rustsecp256k1_v0_7_0_schnorrsig_sign_internal(ctx, sig64, msg32, 32, keypair, rustsecp256k1_v0_7_0_nonce_function_bip340, (unsigned char*)aux_rand32); + return rustsecp256k1_v0_8_0_schnorrsig_sign_internal(ctx, sig64, msg32, 32, keypair, rustsecp256k1_v0_8_0_nonce_function_bip340, (unsigned char*)aux_rand32); } -int rustsecp256k1_v0_7_0_schnorrsig_sign_custom(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *sig64, const unsigned char *msg, size_t msglen, const rustsecp256k1_v0_7_0_keypair *keypair, rustsecp256k1_v0_7_0_schnorrsig_extraparams *extraparams) { - rustsecp256k1_v0_7_0_nonce_function_hardened noncefp = NULL; +int rustsecp256k1_v0_8_0_schnorrsig_sign(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1_v0_8_0_keypair *keypair, const unsigned char *aux_rand32) { + return rustsecp256k1_v0_8_0_schnorrsig_sign32(ctx, sig64, msg32, keypair, aux_rand32); +} + +int rustsecp256k1_v0_8_0_schnorrsig_sign_custom(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *sig64, const unsigned char *msg, size_t msglen, const rustsecp256k1_v0_8_0_keypair *keypair, rustsecp256k1_v0_8_0_schnorrsig_extraparams *extraparams) { + rustsecp256k1_v0_8_0_nonce_function_hardened noncefp = NULL; void *ndata = NULL; VERIFY_CHECK(ctx != NULL); if (extraparams != NULL) { - ARG_CHECK(rustsecp256k1_v0_7_0_memcmp_var(extraparams->magic, + ARG_CHECK(rustsecp256k1_v0_8_0_memcmp_var(extraparams->magic, schnorrsig_extraparams_magic, sizeof(extraparams->magic)) == 0); noncefp = extraparams->noncefp; ndata = extraparams->ndata; } - return rustsecp256k1_v0_7_0_schnorrsig_sign_internal(ctx, sig64, msg, msglen, keypair, noncefp, ndata); + return rustsecp256k1_v0_8_0_schnorrsig_sign_internal(ctx, sig64, msg, msglen, keypair, noncefp, ndata); } -int rustsecp256k1_v0_7_0_schnorrsig_verify(const rustsecp256k1_v0_7_0_context* ctx, const unsigned char *sig64, const unsigned char *msg, size_t msglen, const rustsecp256k1_v0_7_0_xonly_pubkey *pubkey) { - rustsecp256k1_v0_7_0_scalar s; - rustsecp256k1_v0_7_0_scalar e; - rustsecp256k1_v0_7_0_gej rj; - rustsecp256k1_v0_7_0_ge pk; - rustsecp256k1_v0_7_0_gej pkj; - rustsecp256k1_v0_7_0_fe rx; - rustsecp256k1_v0_7_0_ge r; +int rustsecp256k1_v0_8_0_schnorrsig_verify(const rustsecp256k1_v0_8_0_context* ctx, const unsigned char *sig64, const unsigned char *msg, size_t msglen, const rustsecp256k1_v0_8_0_xonly_pubkey *pubkey) { + rustsecp256k1_v0_8_0_scalar s; + rustsecp256k1_v0_8_0_scalar e; + rustsecp256k1_v0_8_0_gej rj; + rustsecp256k1_v0_8_0_ge pk; + rustsecp256k1_v0_8_0_gej pkj; + rustsecp256k1_v0_8_0_fe rx; + rustsecp256k1_v0_8_0_ge r; unsigned char buf[32]; int overflow; @@ -228,36 +232,36 @@ int rustsecp256k1_v0_7_0_schnorrsig_verify(const rustsecp256k1_v0_7_0_context* c ARG_CHECK(msg != NULL || msglen == 0); ARG_CHECK(pubkey != NULL); - if (!rustsecp256k1_v0_7_0_fe_set_b32(&rx, &sig64[0])) { + if (!rustsecp256k1_v0_8_0_fe_set_b32(&rx, &sig64[0])) { return 0; } - rustsecp256k1_v0_7_0_scalar_set_b32(&s, &sig64[32], &overflow); + rustsecp256k1_v0_8_0_scalar_set_b32(&s, &sig64[32], &overflow); if (overflow) { return 0; } - if (!rustsecp256k1_v0_7_0_xonly_pubkey_load(ctx, &pk, pubkey)) { + if (!rustsecp256k1_v0_8_0_xonly_pubkey_load(ctx, &pk, pubkey)) { return 0; } /* Compute e. */ - rustsecp256k1_v0_7_0_fe_get_b32(buf, &pk.x); - rustsecp256k1_v0_7_0_schnorrsig_challenge(&e, &sig64[0], msg, msglen, buf); + rustsecp256k1_v0_8_0_fe_get_b32(buf, &pk.x); + rustsecp256k1_v0_8_0_schnorrsig_challenge(&e, &sig64[0], msg, msglen, buf); /* Compute rj = s*G + (-e)*pkj */ - rustsecp256k1_v0_7_0_scalar_negate(&e, &e); - rustsecp256k1_v0_7_0_gej_set_ge(&pkj, &pk); - rustsecp256k1_v0_7_0_ecmult(&rj, &pkj, &e, &s); + rustsecp256k1_v0_8_0_scalar_negate(&e, &e); + rustsecp256k1_v0_8_0_gej_set_ge(&pkj, &pk); + rustsecp256k1_v0_8_0_ecmult(&rj, &pkj, &e, &s); - rustsecp256k1_v0_7_0_ge_set_gej_var(&r, &rj); - if (rustsecp256k1_v0_7_0_ge_is_infinity(&r)) { + rustsecp256k1_v0_8_0_ge_set_gej_var(&r, &rj); + if (rustsecp256k1_v0_8_0_ge_is_infinity(&r)) { return 0; } - rustsecp256k1_v0_7_0_fe_normalize_var(&r.y); - return !rustsecp256k1_v0_7_0_fe_is_odd(&r.y) && - rustsecp256k1_v0_7_0_fe_equal_var(&rx, &r.x); + rustsecp256k1_v0_8_0_fe_normalize_var(&r.y); + return !rustsecp256k1_v0_8_0_fe_is_odd(&r.y) && + rustsecp256k1_v0_8_0_fe_equal_var(&rx, &r.x); } #endif diff --git a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h index 4862849e6..2f29dc2c7 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h @@ -8,7 +8,7 @@ #define SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_H #include "../../../include/secp256k1_schnorrsig.h" -#include "src/modules/schnorrsig/main_impl.h" +#include "main_impl.h" static const unsigned char invalid_pubkey_bytes[][32] = { /* 0 */ @@ -58,12 +58,12 @@ static const unsigned char invalid_pubkey_bytes[][32] = { #define NUM_INVALID_KEYS (sizeof(invalid_pubkey_bytes) / sizeof(invalid_pubkey_bytes[0])) -static int rustsecp256k1_v0_7_0_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg, +static int rustsecp256k1_v0_8_0_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg, size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo, size_t algolen, void* data) { - rustsecp256k1_v0_7_0_scalar s; + rustsecp256k1_v0_8_0_scalar s; int *idata = data; (void)msg; (void)msglen; @@ -71,12 +71,12 @@ static int rustsecp256k1_v0_7_0_hardened_nonce_function_smallint(unsigned char * (void)xonly_pk32; (void)algo; (void)algolen; - rustsecp256k1_v0_7_0_scalar_set_int(&s, *idata); - rustsecp256k1_v0_7_0_scalar_get_b32(nonce32, &s); + rustsecp256k1_v0_8_0_scalar_set_int(&s, *idata); + rustsecp256k1_v0_8_0_scalar_get_b32(nonce32, &s); return 1; } -static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_7_0_context *ctx, const rustsecp256k1_v0_7_0_xonly_pubkey* pubkeys, unsigned char (*xonly_pubkey_bytes)[32], const int* parities) { +static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_8_0_context *ctx, const rustsecp256k1_v0_8_0_xonly_pubkey* pubkeys, unsigned char (*xonly_pubkey_bytes)[32], const int* parities) { int d; uint64_t iter = 0; /* Iterate over the possible public keys to verify against (through their corresponding DL d). */ @@ -102,10 +102,10 @@ static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_7_0_context } /* Randomly generate messages until all challenges have been hit. */ while (e_count_done < EXHAUSTIVE_TEST_ORDER) { - rustsecp256k1_v0_7_0_scalar e; + rustsecp256k1_v0_8_0_scalar e; unsigned char msg32[32]; - rustsecp256k1_v0_7_0_testrand256(msg32); - rustsecp256k1_v0_7_0_schnorrsig_challenge(&e, sig64, msg32, sizeof(msg32), pk32); + rustsecp256k1_v0_8_0_testrand256(msg32); + rustsecp256k1_v0_8_0_schnorrsig_challenge(&e, sig64, msg32, sizeof(msg32), pk32); /* Only do work if we hit a challenge we haven't tried before. */ if (!e_done[e]) { /* Iterate over the possible valid last 32 bytes in the signature. @@ -114,16 +114,16 @@ static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_7_0_context for (s = 0; s <= EXHAUSTIVE_TEST_ORDER + 1; ++s) { int expect_valid, valid; if (s <= EXHAUSTIVE_TEST_ORDER) { - rustsecp256k1_v0_7_0_scalar s_s; - rustsecp256k1_v0_7_0_scalar_set_int(&s_s, s); - rustsecp256k1_v0_7_0_scalar_get_b32(sig64 + 32, &s_s); + rustsecp256k1_v0_8_0_scalar s_s; + rustsecp256k1_v0_8_0_scalar_set_int(&s_s, s); + rustsecp256k1_v0_8_0_scalar_get_b32(sig64 + 32, &s_s); expect_valid = actual_k != -1 && s != EXHAUSTIVE_TEST_ORDER && (s_s == (actual_k + actual_d * e) % EXHAUSTIVE_TEST_ORDER); } else { - rustsecp256k1_v0_7_0_testrand256(sig64 + 32); + rustsecp256k1_v0_8_0_testrand256(sig64 + 32); expect_valid = 0; } - valid = rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig64, msg32, sizeof(msg32), &pubkeys[d - 1]); + valid = rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig64, msg32, sizeof(msg32), &pubkeys[d - 1]); CHECK(valid == expect_valid); count_valid += valid; } @@ -138,10 +138,10 @@ static void test_exhaustive_schnorrsig_verify(const rustsecp256k1_v0_7_0_context } } -static void test_exhaustive_schnorrsig_sign(const rustsecp256k1_v0_7_0_context *ctx, unsigned char (*xonly_pubkey_bytes)[32], const rustsecp256k1_v0_7_0_keypair* keypairs, const int* parities) { +static void test_exhaustive_schnorrsig_sign(const rustsecp256k1_v0_8_0_context *ctx, unsigned char (*xonly_pubkey_bytes)[32], const rustsecp256k1_v0_8_0_keypair* keypairs, const int* parities) { int d, k; uint64_t iter = 0; - rustsecp256k1_v0_7_0_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; + rustsecp256k1_v0_8_0_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; /* Loop over keys. */ for (d = 1; d < EXHAUSTIVE_TEST_ORDER; ++d) { @@ -155,25 +155,25 @@ static void test_exhaustive_schnorrsig_sign(const rustsecp256k1_v0_7_0_context * unsigned char sig64[64]; int actual_k = k; if (skip_section(&iter)) continue; - extraparams.noncefp = rustsecp256k1_v0_7_0_hardened_nonce_function_smallint; + extraparams.noncefp = rustsecp256k1_v0_8_0_hardened_nonce_function_smallint; extraparams.ndata = &k; if (parities[k - 1]) actual_k = EXHAUSTIVE_TEST_ORDER - k; /* Generate random messages until all challenges have been tried. */ while (e_count_done < EXHAUSTIVE_TEST_ORDER) { - rustsecp256k1_v0_7_0_scalar e; - rustsecp256k1_v0_7_0_testrand256(msg32); - rustsecp256k1_v0_7_0_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, sizeof(msg32), xonly_pubkey_bytes[d - 1]); + rustsecp256k1_v0_8_0_scalar e; + rustsecp256k1_v0_8_0_testrand256(msg32); + rustsecp256k1_v0_8_0_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, sizeof(msg32), xonly_pubkey_bytes[d - 1]); /* Only do work if we hit a challenge we haven't tried before. */ if (!e_done[e]) { - rustsecp256k1_v0_7_0_scalar expected_s = (actual_k + e * actual_d) % EXHAUSTIVE_TEST_ORDER; + rustsecp256k1_v0_8_0_scalar expected_s = (actual_k + e * actual_d) % EXHAUSTIVE_TEST_ORDER; unsigned char expected_s_bytes[32]; - rustsecp256k1_v0_7_0_scalar_get_b32(expected_s_bytes, &expected_s); + rustsecp256k1_v0_8_0_scalar_get_b32(expected_s_bytes, &expected_s); /* Invoke the real function to construct a signature. */ - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(ctx, sig64, msg32, sizeof(msg32), &keypairs[d - 1], &extraparams)); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(ctx, sig64, msg32, sizeof(msg32), &keypairs[d - 1], &extraparams)); /* The first 32 bytes must match the xonly pubkey for the specified k. */ - CHECK(rustsecp256k1_v0_7_0_memcmp_var(sig64, xonly_pubkey_bytes[k - 1], 32) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(sig64, xonly_pubkey_bytes[k - 1], 32) == 0); /* The last 32 bytes must match the expected s value. */ - CHECK(rustsecp256k1_v0_7_0_memcmp_var(sig64 + 32, expected_s_bytes, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(sig64 + 32, expected_s_bytes, 32) == 0); /* Don't retry other messages that result in the same challenge. */ e_done[e] = 1; ++e_count_done; @@ -183,28 +183,28 @@ static void test_exhaustive_schnorrsig_sign(const rustsecp256k1_v0_7_0_context * } } -static void test_exhaustive_schnorrsig(const rustsecp256k1_v0_7_0_context *ctx) { - rustsecp256k1_v0_7_0_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1]; - rustsecp256k1_v0_7_0_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1]; +static void test_exhaustive_schnorrsig(const rustsecp256k1_v0_8_0_context *ctx) { + rustsecp256k1_v0_8_0_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1]; + rustsecp256k1_v0_8_0_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1]; int parity[EXHAUSTIVE_TEST_ORDER - 1]; unsigned char xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - 1][32]; unsigned i; /* Verify that all invalid_pubkey_bytes are actually invalid. */ for (i = 0; i < NUM_INVALID_KEYS; ++i) { - rustsecp256k1_v0_7_0_xonly_pubkey pk; - CHECK(!rustsecp256k1_v0_7_0_xonly_pubkey_parse(ctx, &pk, invalid_pubkey_bytes[i])); + rustsecp256k1_v0_8_0_xonly_pubkey pk; + CHECK(!rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, &pk, invalid_pubkey_bytes[i])); } /* Construct keypairs and xonly-pubkeys for the entire group. */ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; ++i) { - rustsecp256k1_v0_7_0_scalar scalar_i; + rustsecp256k1_v0_8_0_scalar scalar_i; unsigned char buf[32]; - rustsecp256k1_v0_7_0_scalar_set_int(&scalar_i, i); - rustsecp256k1_v0_7_0_scalar_get_b32(buf, &scalar_i); - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair[i - 1], buf)); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parity[i - 1], &keypair[i - 1])); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1])); + rustsecp256k1_v0_8_0_scalar_set_int(&scalar_i, i); + rustsecp256k1_v0_8_0_scalar_get_b32(buf, &scalar_i); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair[i - 1], buf)); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parity[i - 1], &keypair[i - 1])); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1])); } test_exhaustive_schnorrsig_sign(ctx, xonly_pubkey_bytes, keypair, parity); diff --git a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_impl.h b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_impl.h index 0f7e54e18..77f3e8d2f 100644 --- a/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/modules/schnorrsig/tests_impl.h @@ -15,20 +15,20 @@ void nonce_function_bip340_bitflip(unsigned char **args, size_t n_flip, size_t n_bytes, size_t msglen, size_t algolen) { unsigned char nonces[2][32]; CHECK(nonce_function_bip340(nonces[0], args[0], msglen, args[1], args[2], args[3], algolen, args[4]) == 1); - rustsecp256k1_v0_7_0_testrand_flip(args[n_flip], n_bytes); + rustsecp256k1_v0_8_0_testrand_flip(args[n_flip], n_bytes); CHECK(nonce_function_bip340(nonces[1], args[0], msglen, args[1], args[2], args[3], algolen, args[4]) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(nonces[0], nonces[1], 32) != 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(nonces[0], nonces[1], 32) != 0); } /* Tests for the equality of two sha256 structs. This function only produces a * correct result if an integer multiple of 64 many bytes have been written * into the hash functions. */ -void test_sha256_eq(const rustsecp256k1_v0_7_0_sha256 *sha1, const rustsecp256k1_v0_7_0_sha256 *sha2) { +void test_sha256_eq(const rustsecp256k1_v0_8_0_sha256 *sha1, const rustsecp256k1_v0_8_0_sha256 *sha2) { /* Is buffer fully consumed? */ CHECK((sha1->bytes & 0x3F) == 0); CHECK(sha1->bytes == sha2->bytes); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(sha1->s, sha2->s, sizeof(sha1->s)) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(sha1->s, sha2->s, sizeof(sha1->s)) == 0); } void run_nonce_function_bip340_tests(void) { @@ -36,8 +36,8 @@ void run_nonce_function_bip340_tests(void) { unsigned char aux_tag[11] = "BIP0340/aux"; unsigned char algo[13] = "BIP0340/nonce"; size_t algolen = sizeof(algo); - rustsecp256k1_v0_7_0_sha256 sha; - rustsecp256k1_v0_7_0_sha256 sha_optimized; + rustsecp256k1_v0_8_0_sha256 sha; + rustsecp256k1_v0_8_0_sha256 sha_optimized; unsigned char nonce[32], nonce_z[32]; unsigned char msg[32]; size_t msglen = sizeof(msg); @@ -48,23 +48,23 @@ void run_nonce_function_bip340_tests(void) { int i; /* Check that hash initialized by - * rustsecp256k1_v0_7_0_nonce_function_bip340_sha256_tagged has the expected + * rustsecp256k1_v0_8_0_nonce_function_bip340_sha256_tagged has the expected * state. */ - rustsecp256k1_v0_7_0_sha256_initialize_tagged(&sha, tag, sizeof(tag)); - rustsecp256k1_v0_7_0_nonce_function_bip340_sha256_tagged(&sha_optimized); + rustsecp256k1_v0_8_0_sha256_initialize_tagged(&sha, tag, sizeof(tag)); + rustsecp256k1_v0_8_0_nonce_function_bip340_sha256_tagged(&sha_optimized); test_sha256_eq(&sha, &sha_optimized); /* Check that hash initialized by - * rustsecp256k1_v0_7_0_nonce_function_bip340_sha256_tagged_aux has the expected + * rustsecp256k1_v0_8_0_nonce_function_bip340_sha256_tagged_aux has the expected * state. */ - rustsecp256k1_v0_7_0_sha256_initialize_tagged(&sha, aux_tag, sizeof(aux_tag)); - rustsecp256k1_v0_7_0_nonce_function_bip340_sha256_tagged_aux(&sha_optimized); + rustsecp256k1_v0_8_0_sha256_initialize_tagged(&sha, aux_tag, sizeof(aux_tag)); + rustsecp256k1_v0_8_0_nonce_function_bip340_sha256_tagged_aux(&sha_optimized); test_sha256_eq(&sha, &sha_optimized); - rustsecp256k1_v0_7_0_testrand256(msg); - rustsecp256k1_v0_7_0_testrand256(key); - rustsecp256k1_v0_7_0_testrand256(pk); - rustsecp256k1_v0_7_0_testrand256(aux_rand); + rustsecp256k1_v0_8_0_testrand256(msg); + rustsecp256k1_v0_8_0_testrand256(key); + rustsecp256k1_v0_8_0_testrand256(pk); + rustsecp256k1_v0_8_0_testrand256(aux_rand); /* Check that a bitflip in an argument results in different nonces. */ args[0] = msg; @@ -87,31 +87,31 @@ void run_nonce_function_bip340_tests(void) { CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, NULL, 0, NULL) == 0); CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, algo, algolen, NULL) == 1); /* Other algo is fine */ - rustsecp256k1_v0_7_0_testrand_bytes_test(algo, algolen); + rustsecp256k1_v0_8_0_testrand_bytes_test(algo, algolen); CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, algo, algolen, NULL) == 1); for (i = 0; i < count; i++) { unsigned char nonce2[32]; - uint32_t offset = rustsecp256k1_v0_7_0_testrand_int(msglen - 1); + uint32_t offset = rustsecp256k1_v0_8_0_testrand_int(msglen - 1); size_t msglen_tmp = (msglen + offset) % msglen; size_t algolen_tmp; /* Different msglen gives different nonce */ CHECK(nonce_function_bip340(nonce2, msg, msglen_tmp, key, pk, algo, algolen, NULL) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(nonce, nonce2, 32) != 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(nonce, nonce2, 32) != 0); /* Different algolen gives different nonce */ - offset = rustsecp256k1_v0_7_0_testrand_int(algolen - 1); + offset = rustsecp256k1_v0_8_0_testrand_int(algolen - 1); algolen_tmp = (algolen + offset) % algolen; CHECK(nonce_function_bip340(nonce2, msg, msglen, key, pk, algo, algolen_tmp, NULL) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(nonce, nonce2, 32) != 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(nonce, nonce2, 32) != 0); } /* NULL aux_rand argument is allowed, and identical to passing all zero aux_rand. */ memset(aux_rand, 0, 32); CHECK(nonce_function_bip340(nonce_z, msg, msglen, key, pk, algo, algolen, &aux_rand) == 1); CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, algo, algolen, NULL) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(nonce_z, nonce, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(nonce_z, nonce, 32) == 0); } void test_schnorrsig_api(void) { @@ -119,123 +119,97 @@ void test_schnorrsig_api(void) { unsigned char sk2[32]; unsigned char sk3[32]; unsigned char msg[32]; - rustsecp256k1_v0_7_0_keypair keypairs[3]; - rustsecp256k1_v0_7_0_keypair invalid_keypair = {{ 0 }}; - rustsecp256k1_v0_7_0_xonly_pubkey pk[3]; - rustsecp256k1_v0_7_0_xonly_pubkey zero_pk; + rustsecp256k1_v0_8_0_keypair keypairs[3]; + rustsecp256k1_v0_8_0_keypair invalid_keypair = {{ 0 }}; + rustsecp256k1_v0_8_0_xonly_pubkey pk[3]; + rustsecp256k1_v0_8_0_xonly_pubkey zero_pk; unsigned char sig[64]; - rustsecp256k1_v0_7_0_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; - rustsecp256k1_v0_7_0_schnorrsig_extraparams invalid_extraparams = {{ 0 }, NULL, NULL}; + rustsecp256k1_v0_8_0_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; + rustsecp256k1_v0_8_0_schnorrsig_extraparams invalid_extraparams = {{ 0 }, NULL, NULL}; /** setup **/ - rustsecp256k1_v0_7_0_context *none = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_NONE); - rustsecp256k1_v0_7_0_context *sign = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_SIGN); - rustsecp256k1_v0_7_0_context *vrfy = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_VERIFY); - rustsecp256k1_v0_7_0_context *both = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - rustsecp256k1_v0_7_0_context *sttc = rustsecp256k1_v0_7_0_context_clone(rustsecp256k1_v0_7_0_context_no_precomp); + rustsecp256k1_v0_8_0_context *sttc = rustsecp256k1_v0_8_0_context_clone(rustsecp256k1_v0_8_0_context_static); int ecount; - rustsecp256k1_v0_7_0_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_error_callback(both, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_error_callback(sttc, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_illegal_callback(sttc, counting_illegal_callback_fn, &ecount); - - rustsecp256k1_v0_7_0_testrand256(sk1); - rustsecp256k1_v0_7_0_testrand256(sk2); - rustsecp256k1_v0_7_0_testrand256(sk3); - rustsecp256k1_v0_7_0_testrand256(msg); - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypairs[0], sk1) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypairs[1], sk2) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypairs[2], sk3) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(ctx, &pk[0], NULL, &keypairs[0]) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(ctx, &pk[1], NULL, &keypairs[1]) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(ctx, &pk[2], NULL, &keypairs[2]) == 1); + rustsecp256k1_v0_8_0_context_set_error_callback(ctx, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_8_0_context_set_error_callback(sttc, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_8_0_context_set_illegal_callback(sttc, counting_illegal_callback_fn, &ecount); + + rustsecp256k1_v0_8_0_testrand256(sk1); + rustsecp256k1_v0_8_0_testrand256(sk2); + rustsecp256k1_v0_8_0_testrand256(sk3); + rustsecp256k1_v0_8_0_testrand256(msg); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypairs[0], sk1) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypairs[1], sk2) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypairs[2], sk3) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &pk[0], NULL, &keypairs[0]) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &pk[1], NULL, &keypairs[1]) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &pk[2], NULL, &keypairs[2]) == 1); memset(&zero_pk, 0, sizeof(zero_pk)); /** main test body **/ ecount = 0; - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign(none, sig, msg, &keypairs[0], NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign32(ctx, sig, msg, &keypairs[0], NULL) == 1); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign(vrfy, sig, msg, &keypairs[0], NULL) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign(sign, NULL, msg, &keypairs[0], NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign32(ctx, NULL, msg, &keypairs[0], NULL) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign(sign, sig, NULL, &keypairs[0], NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign32(ctx, sig, NULL, &keypairs[0], NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign(sign, sig, msg, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign32(ctx, sig, msg, NULL, NULL) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign(sign, sig, msg, &invalid_keypair, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign32(ctx, sig, msg, &invalid_keypair, NULL) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign(sttc, sig, msg, &keypairs[0], NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign32(sttc, sig, msg, &keypairs[0], NULL) == 0); CHECK(ecount == 5); ecount = 0; - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(none, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(vrfy, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 1); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 1); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(sign, NULL, msg, sizeof(msg), &keypairs[0], &extraparams) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(ctx, NULL, msg, sizeof(msg), &keypairs[0], &extraparams) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(sign, sig, NULL, sizeof(msg), &keypairs[0], &extraparams) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(ctx, sig, NULL, sizeof(msg), &keypairs[0], &extraparams) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(sign, sig, NULL, 0, &keypairs[0], &extraparams) == 1); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(ctx, sig, NULL, 0, &keypairs[0], &extraparams) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), NULL, &extraparams) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), NULL, &extraparams) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), &invalid_keypair, &extraparams) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &invalid_keypair, &extraparams) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), &keypairs[0], NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypairs[0], NULL) == 1); CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), &keypairs[0], &invalid_extraparams) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypairs[0], &invalid_extraparams) == 0); CHECK(ecount == 5); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(sttc, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(sttc, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 0); CHECK(ecount == 6); ecount = 0; - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL) == 1); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(none, sig, msg, sizeof(msg), &pk[0]) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(sign, sig, msg, sizeof(msg), &pk[0]) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(vrfy, sig, msg, sizeof(msg), &pk[0]) == 1); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign32(ctx, sig, msg, &keypairs[0], NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &pk[0]) == 1); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(vrfy, NULL, msg, sizeof(msg), &pk[0]) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, NULL, msg, sizeof(msg), &pk[0]) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(vrfy, sig, NULL, sizeof(msg), &pk[0]) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig, NULL, sizeof(msg), &pk[0]) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(vrfy, sig, NULL, 0, &pk[0]) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig, NULL, 0, &pk[0]) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(vrfy, sig, msg, sizeof(msg), NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig, msg, sizeof(msg), NULL) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(vrfy, sig, msg, sizeof(msg), &zero_pk) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &zero_pk) == 0); CHECK(ecount == 4); - rustsecp256k1_v0_7_0_context_destroy(none); - rustsecp256k1_v0_7_0_context_destroy(sign); - rustsecp256k1_v0_7_0_context_destroy(vrfy); - rustsecp256k1_v0_7_0_context_destroy(both); - rustsecp256k1_v0_7_0_context_destroy(sttc); + rustsecp256k1_v0_8_0_context_destroy(sttc); } -/* Checks that hash initialized by rustsecp256k1_v0_7_0_schnorrsig_sha256_tagged has the +/* Checks that hash initialized by rustsecp256k1_v0_8_0_schnorrsig_sha256_tagged has the * expected state. */ void test_schnorrsig_sha256_tagged(void) { unsigned char tag[17] = "BIP0340/challenge"; - rustsecp256k1_v0_7_0_sha256 sha; - rustsecp256k1_v0_7_0_sha256 sha_optimized; + rustsecp256k1_v0_8_0_sha256 sha; + rustsecp256k1_v0_8_0_sha256 sha_optimized; - rustsecp256k1_v0_7_0_sha256_initialize_tagged(&sha, (unsigned char *) tag, sizeof(tag)); - rustsecp256k1_v0_7_0_schnorrsig_sha256_tagged(&sha_optimized); + rustsecp256k1_v0_8_0_sha256_initialize_tagged(&sha, (unsigned char *) tag, sizeof(tag)); + rustsecp256k1_v0_8_0_schnorrsig_sha256_tagged(&sha_optimized); test_sha256_eq(&sha, &sha_optimized); } @@ -243,26 +217,26 @@ void test_schnorrsig_sha256_tagged(void) { * Signs the message and checks that it's the same as expected_sig. */ void test_schnorrsig_bip_vectors_check_signing(const unsigned char *sk, const unsigned char *pk_serialized, const unsigned char *aux_rand, const unsigned char *msg32, const unsigned char *expected_sig) { unsigned char sig[64]; - rustsecp256k1_v0_7_0_keypair keypair; - rustsecp256k1_v0_7_0_xonly_pubkey pk, pk_expected; + rustsecp256k1_v0_8_0_keypair keypair; + rustsecp256k1_v0_8_0_xonly_pubkey pk, pk_expected; - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair, sk)); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign(ctx, sig, msg32, &keypair, aux_rand)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(sig, expected_sig, 64) == 0); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk)); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign32(ctx, sig, msg32, &keypair, aux_rand)); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(sig, expected_sig, 64) == 0); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_parse(ctx, &pk_expected, pk_serialized)); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(ctx, &pk, NULL, &keypair)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pk, &pk_expected, sizeof(pk)) == 0); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig, msg32, 32, &pk)); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, &pk_expected, pk_serialized)); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &pk, NULL, &keypair)); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pk, &pk_expected, sizeof(pk)) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig, msg32, 32, &pk)); } /* Helper function for schnorrsig_bip_vectors * Checks that both verify and verify_batch (TODO) return the same value as expected. */ void test_schnorrsig_bip_vectors_check_verify(const unsigned char *pk_serialized, const unsigned char *msg32, const unsigned char *sig, int expected) { - rustsecp256k1_v0_7_0_xonly_pubkey pk; + rustsecp256k1_v0_8_0_xonly_pubkey pk; - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_parse(ctx, &pk, pk_serialized)); - CHECK(expected == rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig, msg32, 32, &pk)); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, &pk, pk_serialized)); + CHECK(expected == rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig, msg32, 32, &pk)); } /* Test vectors according to BIP-340 ("Schnorr Signatures for secp256k1"). See @@ -458,9 +432,9 @@ void test_schnorrsig_bip_vectors(void) { 0xEB, 0x98, 0x98, 0xAE, 0x79, 0xB9, 0x76, 0x87, 0x66, 0xE4, 0xFA, 0xA0, 0x4A, 0x2D, 0x4A, 0x34 }; - rustsecp256k1_v0_7_0_xonly_pubkey pk_parsed; + rustsecp256k1_v0_8_0_xonly_pubkey pk_parsed; /* No need to check the signature of the test vector as parsing the pubkey already fails */ - CHECK(!rustsecp256k1_v0_7_0_xonly_pubkey_parse(ctx, &pk_parsed, pk)); + CHECK(!rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, &pk_parsed, pk)); } { /* Test vector 6 */ @@ -678,9 +652,9 @@ void test_schnorrsig_bip_vectors(void) { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x30 }; - rustsecp256k1_v0_7_0_xonly_pubkey pk_parsed; + rustsecp256k1_v0_8_0_xonly_pubkey pk_parsed; /* No need to check the signature of the test vector as parsing the pubkey already fails */ - CHECK(!rustsecp256k1_v0_7_0_xonly_pubkey_parse(ctx, &pk_parsed, pk)); + CHECK(!rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, &pk_parsed, pk)); } } @@ -727,45 +701,48 @@ static int nonce_function_overflowing(unsigned char *nonce32, const unsigned cha void test_schnorrsig_sign(void) { unsigned char sk[32]; - rustsecp256k1_v0_7_0_xonly_pubkey pk; - rustsecp256k1_v0_7_0_keypair keypair; + rustsecp256k1_v0_8_0_xonly_pubkey pk; + rustsecp256k1_v0_8_0_keypair keypair; const unsigned char msg[32] = "this is a msg for a schnorrsig.."; unsigned char sig[64]; unsigned char sig2[64]; unsigned char zeros64[64] = { 0 }; - rustsecp256k1_v0_7_0_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; + rustsecp256k1_v0_8_0_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; unsigned char aux_rand[32]; - rustsecp256k1_v0_7_0_testrand256(sk); - rustsecp256k1_v0_7_0_testrand256(aux_rand); - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair, sk)); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(ctx, &pk, NULL, &keypair)); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign(ctx, sig, msg, &keypair, NULL) == 1); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &pk)); + rustsecp256k1_v0_8_0_testrand256(sk); + rustsecp256k1_v0_8_0_testrand256(aux_rand); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk)); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &pk, NULL, &keypair)); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign32(ctx, sig, msg, &keypair, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &pk)); + /* Check that deprecated alias gives the same result */ + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign(ctx, sig2, msg, &keypair, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(sig, sig2, sizeof(sig)) == 0); /* Test different nonce functions */ - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &pk)); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &pk)); memset(sig, 1, sizeof(sig)); extraparams.noncefp = nonce_function_failing; - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(sig, zeros64, sizeof(sig)) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(sig, zeros64, sizeof(sig)) == 0); memset(&sig, 1, sizeof(sig)); extraparams.noncefp = nonce_function_0; - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(sig, zeros64, sizeof(sig)) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(sig, zeros64, sizeof(sig)) == 0); memset(&sig, 1, sizeof(sig)); extraparams.noncefp = nonce_function_overflowing; - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &pk)); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &pk)); /* When using the default nonce function, schnorrsig_sign_custom produces * the same result as schnorrsig_sign with aux_rand = extraparams.ndata */ extraparams.noncefp = NULL; extraparams.ndata = aux_rand; - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign(ctx, sig2, msg, &keypair, extraparams.ndata) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(sig, sig2, sizeof(sig)) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign32(ctx, sig2, msg, &keypair, extraparams.ndata) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(sig, sig2, sizeof(sig)) == 0); } #define N_SIGS 3 @@ -777,84 +754,84 @@ void test_schnorrsig_sign_verify(void) { unsigned char msg[N_SIGS][32]; unsigned char sig[N_SIGS][64]; size_t i; - rustsecp256k1_v0_7_0_keypair keypair; - rustsecp256k1_v0_7_0_xonly_pubkey pk; - rustsecp256k1_v0_7_0_scalar s; + rustsecp256k1_v0_8_0_keypair keypair; + rustsecp256k1_v0_8_0_xonly_pubkey pk; + rustsecp256k1_v0_8_0_scalar s; - rustsecp256k1_v0_7_0_testrand256(sk); - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair, sk)); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(ctx, &pk, NULL, &keypair)); + rustsecp256k1_v0_8_0_testrand256(sk); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk)); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &pk, NULL, &keypair)); for (i = 0; i < N_SIGS; i++) { - rustsecp256k1_v0_7_0_testrand256(msg[i]); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign(ctx, sig[i], msg[i], &keypair, NULL)); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig[i], msg[i], sizeof(msg[i]), &pk)); + rustsecp256k1_v0_8_0_testrand256(msg[i]); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign32(ctx, sig[i], msg[i], &keypair, NULL)); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig[i], msg[i], sizeof(msg[i]), &pk)); } { /* Flip a few bits in the signature and in the message and check that * verify and verify_batch (TODO) fail */ - size_t sig_idx = rustsecp256k1_v0_7_0_testrand_int(N_SIGS); - size_t byte_idx = rustsecp256k1_v0_7_0_testrand_bits(5); - unsigned char xorbyte = rustsecp256k1_v0_7_0_testrand_int(254)+1; + size_t sig_idx = rustsecp256k1_v0_8_0_testrand_int(N_SIGS); + size_t byte_idx = rustsecp256k1_v0_8_0_testrand_bits(5); + unsigned char xorbyte = rustsecp256k1_v0_8_0_testrand_int(254)+1; sig[sig_idx][byte_idx] ^= xorbyte; - CHECK(!rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); + CHECK(!rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); sig[sig_idx][byte_idx] ^= xorbyte; - byte_idx = rustsecp256k1_v0_7_0_testrand_bits(5); + byte_idx = rustsecp256k1_v0_8_0_testrand_bits(5); sig[sig_idx][32+byte_idx] ^= xorbyte; - CHECK(!rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); + CHECK(!rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); sig[sig_idx][32+byte_idx] ^= xorbyte; - byte_idx = rustsecp256k1_v0_7_0_testrand_bits(5); + byte_idx = rustsecp256k1_v0_8_0_testrand_bits(5); msg[sig_idx][byte_idx] ^= xorbyte; - CHECK(!rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); + CHECK(!rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); msg[sig_idx][byte_idx] ^= xorbyte; /* Check that above bitflips have been reversed correctly */ - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); } /* Test overflowing s */ - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL)); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk)); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign32(ctx, sig[0], msg[0], &keypair, NULL)); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk)); memset(&sig[0][32], 0xFF, 32); - CHECK(!rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk)); + CHECK(!rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk)); /* Test negative s */ - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL)); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk)); - rustsecp256k1_v0_7_0_scalar_set_b32(&s, &sig[0][32], NULL); - rustsecp256k1_v0_7_0_scalar_negate(&s, &s); - rustsecp256k1_v0_7_0_scalar_get_b32(&sig[0][32], &s); - CHECK(!rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk)); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign32(ctx, sig[0], msg[0], &keypair, NULL)); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk)); + rustsecp256k1_v0_8_0_scalar_set_b32(&s, &sig[0][32], NULL); + rustsecp256k1_v0_8_0_scalar_negate(&s, &s); + rustsecp256k1_v0_8_0_scalar_get_b32(&sig[0][32], &s); + CHECK(!rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk)); /* The empty message can be signed & verified */ - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(ctx, sig[0], NULL, 0, &keypair, NULL) == 1); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig[0], NULL, 0, &pk) == 1); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(ctx, sig[0], NULL, 0, &keypair, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig[0], NULL, 0, &pk) == 1); { /* Test varying message lengths */ unsigned char msg_large[32 * 8]; - uint32_t msglen = rustsecp256k1_v0_7_0_testrand_int(sizeof(msg_large)); + uint32_t msglen = rustsecp256k1_v0_8_0_testrand_int(sizeof(msg_large)); for (i = 0; i < sizeof(msg_large); i += 32) { - rustsecp256k1_v0_7_0_testrand256(&msg_large[i]); + rustsecp256k1_v0_8_0_testrand256(&msg_large[i]); } - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign_custom(ctx, sig[0], msg_large, msglen, &keypair, NULL) == 1); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig[0], msg_large, msglen, &pk) == 1); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign_custom(ctx, sig[0], msg_large, msglen, &keypair, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig[0], msg_large, msglen, &pk) == 1); /* Verification for a random wrong message length fails */ msglen = (msglen + (sizeof(msg_large) - 1)) % sizeof(msg_large); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig[0], msg_large, msglen, &pk) == 0); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig[0], msg_large, msglen, &pk) == 0); } } #undef N_SIGS void test_schnorrsig_taproot(void) { unsigned char sk[32]; - rustsecp256k1_v0_7_0_keypair keypair; - rustsecp256k1_v0_7_0_xonly_pubkey internal_pk; + rustsecp256k1_v0_8_0_keypair keypair; + rustsecp256k1_v0_8_0_xonly_pubkey internal_pk; unsigned char internal_pk_bytes[32]; - rustsecp256k1_v0_7_0_xonly_pubkey output_pk; + rustsecp256k1_v0_8_0_xonly_pubkey output_pk; unsigned char output_pk_bytes[32]; unsigned char tweak[32]; int pk_parity; @@ -862,27 +839,27 @@ void test_schnorrsig_taproot(void) { unsigned char sig[64]; /* Create output key */ - rustsecp256k1_v0_7_0_testrand256(sk); - CHECK(rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair, sk) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1); + rustsecp256k1_v0_8_0_testrand256(sk); + CHECK(rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, sk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1); /* In actual taproot the tweak would be hash of internal_pk */ - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(ctx, tweak, &internal_pk) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1); - CHECK(rustsecp256k1_v0_7_0_keypair_xonly_pub(ctx, &output_pk, &pk_parity, &keypair) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(ctx, output_pk_bytes, &output_pk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, tweak, &internal_pk) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1); + CHECK(rustsecp256k1_v0_8_0_keypair_xonly_pub(ctx, &output_pk, &pk_parity, &keypair) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, output_pk_bytes, &output_pk) == 1); /* Key spend */ - rustsecp256k1_v0_7_0_testrand256(msg); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_sign(ctx, sig, msg, &keypair, NULL) == 1); + rustsecp256k1_v0_8_0_testrand256(msg); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_sign32(ctx, sig, msg, &keypair, NULL) == 1); /* Verify key spend */ - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_parse(ctx, &output_pk, output_pk_bytes) == 1); - CHECK(rustsecp256k1_v0_7_0_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &output_pk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, &output_pk, output_pk_bytes) == 1); + CHECK(rustsecp256k1_v0_8_0_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &output_pk) == 1); /* Script spend */ - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_serialize(ctx, internal_pk_bytes, &internal_pk) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_serialize(ctx, internal_pk_bytes, &internal_pk) == 1); /* Verify script spend */ - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_parse(ctx, &internal_pk, internal_pk_bytes) == 1); - CHECK(rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check(ctx, output_pk_bytes, pk_parity, &internal_pk, tweak) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_parse(ctx, &internal_pk, internal_pk_bytes) == 1); + CHECK(rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add_check(ctx, output_pk_bytes, pk_parity, &internal_pk, tweak) == 1); } void run_schnorrsig_tests(void) { diff --git a/secp256k1-sys/depend/secp256k1/src/precompute_ecmult.c b/secp256k1-sys/depend/secp256k1/src/precompute_ecmult.c index 275041ce6..3500baf94 100644 --- a/secp256k1-sys/depend/secp256k1/src/precompute_ecmult.c +++ b/secp256k1-sys/depend/secp256k1/src/precompute_ecmult.c @@ -14,18 +14,21 @@ #endif #include "../include/secp256k1.h" + #include "assumptions.h" #include "util.h" + #include "field_impl.h" #include "group_impl.h" +#include "int128_impl.h" #include "ecmult.h" #include "ecmult_compute_table_impl.h" -static void print_table(FILE *fp, const char *name, int window_g, const rustsecp256k1_v0_7_0_ge_storage* table) { +static void print_table(FILE *fp, const char *name, int window_g, const rustsecp256k1_v0_8_0_ge_storage* table) { int j; int i; - fprintf(fp, "const rustsecp256k1_v0_7_0_ge_storage %s[ECMULT_TABLE_SIZE(WINDOW_G)] = {\n", name); + fprintf(fp, "const rustsecp256k1_v0_8_0_ge_storage %s[ECMULT_TABLE_SIZE(WINDOW_G)] = {\n", name); fprintf(fp, " S(%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32 ",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32",%"PRIx32")\n", SECP256K1_GE_STORAGE_CONST_GET(table[0])); @@ -44,13 +47,13 @@ static void print_table(FILE *fp, const char *name, int window_g, const rustsecp } static void print_two_tables(FILE *fp, int window_g) { - rustsecp256k1_v0_7_0_ge_storage* table = malloc(ECMULT_TABLE_SIZE(window_g) * sizeof(rustsecp256k1_v0_7_0_ge_storage)); - rustsecp256k1_v0_7_0_ge_storage* table_128 = malloc(ECMULT_TABLE_SIZE(window_g) * sizeof(rustsecp256k1_v0_7_0_ge_storage)); + rustsecp256k1_v0_8_0_ge_storage* table = malloc(ECMULT_TABLE_SIZE(window_g) * sizeof(rustsecp256k1_v0_8_0_ge_storage)); + rustsecp256k1_v0_8_0_ge_storage* table_128 = malloc(ECMULT_TABLE_SIZE(window_g) * sizeof(rustsecp256k1_v0_8_0_ge_storage)); - rustsecp256k1_v0_7_0_ecmult_compute_two_tables(table, table_128, window_g, &rustsecp256k1_v0_7_0_ge_const_g); + rustsecp256k1_v0_8_0_ecmult_compute_two_tables(table, table_128, window_g, &rustsecp256k1_v0_8_0_ge_const_g); - print_table(fp, "rustsecp256k1_v0_7_0_pre_g", window_g, table); - print_table(fp, "rustsecp256k1_v0_7_0_pre_g_128", window_g, table_128); + print_table(fp, "rustsecp256k1_v0_8_0_pre_g", window_g, table); + print_table(fp, "rustsecp256k1_v0_8_0_pre_g_128", window_g, table_128); free(table); free(table_128); @@ -68,8 +71,8 @@ int main(void) { } fprintf(fp, "/* This file was automatically generated by precompute_ecmult. */\n"); - fprintf(fp, "/* This file contains an array rustsecp256k1_v0_7_0_pre_g with odd multiples of the base point G and\n"); - fprintf(fp, " * an array rustsecp256k1_v0_7_0_pre_g_128 with odd multiples of 2^128*G for accelerating the computation of a*P + b*G.\n"); + fprintf(fp, "/* This file contains an array rustsecp256k1_v0_8_0_pre_g with odd multiples of the base point G and\n"); + fprintf(fp, " * an array rustsecp256k1_v0_8_0_pre_g_128 with odd multiples of 2^128*G for accelerating the computation of a*P + b*G.\n"); fprintf(fp, " */\n"); fprintf(fp, "#if defined HAVE_CONFIG_H\n"); fprintf(fp, "# include \"libsecp256k1-config.h\"\n"); diff --git a/secp256k1-sys/depend/secp256k1/src/precompute_ecmult_gen.c b/secp256k1-sys/depend/secp256k1/src/precompute_ecmult_gen.c index bc0997eb7..be686365c 100644 --- a/secp256k1-sys/depend/secp256k1/src/precompute_ecmult_gen.c +++ b/secp256k1-sys/depend/secp256k1/src/precompute_ecmult_gen.c @@ -8,9 +8,12 @@ #include #include "../include/secp256k1.h" + #include "assumptions.h" #include "util.h" + #include "group.h" +#include "int128_impl.h" #include "ecmult_gen.h" #include "ecmult_gen_compute_table_impl.h" @@ -41,15 +44,15 @@ int main(int argc, char **argv) { fprintf(fp, "# error Cannot compile precomputed_ecmult_gen.c in exhaustive test mode\n"); fprintf(fp, "#endif /* EXHAUSTIVE_TEST_ORDER */\n"); fprintf(fp, "#define S(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) SECP256K1_GE_STORAGE_CONST(0x##a##u,0x##b##u,0x##c##u,0x##d##u,0x##e##u,0x##f##u,0x##g##u,0x##h##u,0x##i##u,0x##j##u,0x##k##u,0x##l##u,0x##m##u,0x##n##u,0x##o##u,0x##p##u)\n"); - fprintf(fp, "const rustsecp256k1_v0_7_0_ge_storage rustsecp256k1_v0_7_0_ecmult_gen_prec_table[ECMULT_GEN_PREC_N(ECMULT_GEN_PREC_BITS)][ECMULT_GEN_PREC_G(ECMULT_GEN_PREC_BITS)] = {\n"); + fprintf(fp, "const rustsecp256k1_v0_8_0_ge_storage rustsecp256k1_v0_8_0_ecmult_gen_prec_table[ECMULT_GEN_PREC_N(ECMULT_GEN_PREC_BITS)][ECMULT_GEN_PREC_G(ECMULT_GEN_PREC_BITS)] = {\n"); for (bits = 2; bits <= 8; bits *= 2) { int g = ECMULT_GEN_PREC_G(bits); int n = ECMULT_GEN_PREC_N(bits); int inner, outer; - rustsecp256k1_v0_7_0_ge_storage* table = checked_malloc(&default_error_callback, n * g * sizeof(rustsecp256k1_v0_7_0_ge_storage)); - rustsecp256k1_v0_7_0_ecmult_gen_compute_table(table, &rustsecp256k1_v0_7_0_ge_const_g, bits); + rustsecp256k1_v0_8_0_ge_storage* table = checked_malloc(&default_error_callback, n * g * sizeof(rustsecp256k1_v0_8_0_ge_storage)); + rustsecp256k1_v0_8_0_ecmult_gen_compute_table(table, &rustsecp256k1_v0_8_0_ge_const_g, bits); fprintf(fp, "#if ECMULT_GEN_PREC_BITS == %d\n", bits); for(outer = 0; outer != n; outer++) { diff --git a/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult.c b/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult.c index 11424cd27..98172a780 100644 --- a/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult.c +++ b/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult.c @@ -1,6 +1,6 @@ /* This file was automatically generated by precompute_ecmult. */ -/* This file contains an array rustsecp256k1_v0_7_0_pre_g with odd multiples of the base point G and - * an array rustsecp256k1_v0_7_0_pre_g_128 with odd multiples of 2^128*G for accelerating the computation of a*P + b*G. +/* This file contains an array rustsecp256k1_v0_8_0_pre_g with odd multiples of the base point G and + * an array rustsecp256k1_v0_8_0_pre_g_128 with odd multiples of 2^128*G for accelerating the computation of a*P + b*G. */ #if defined HAVE_CONFIG_H # include "libsecp256k1-config.h" @@ -17,7 +17,7 @@ # error Cannot compile precomputed_ecmult.c in exhaustive test mode #endif /* EXHAUSTIVE_TEST_ORDER */ #define WINDOW_G ECMULT_WINDOW_SIZE -const rustsecp256k1_v0_7_0_ge_storage rustsecp256k1_v0_7_0_pre_g[ECMULT_TABLE_SIZE(WINDOW_G)] = { +const rustsecp256k1_v0_8_0_ge_storage rustsecp256k1_v0_8_0_pre_g[ECMULT_TABLE_SIZE(WINDOW_G)] = { S(79be667e,f9dcbbac,55a06295,ce870b07,29bfcdb,2dce28d9,59f2815b,16f81798,483ada77,26a3c465,5da4fbfc,e1108a8,fd17b448,a6855419,9c47d08f,fb10d4b8) #if WINDOW_G > 2 ,S(f9308a01,9258c310,49344f85,f89d5229,b531c845,836f99b0,8601f113,bce036f9,388f7b0f,632de814,fe337e6,2a37f356,6500a999,34c2231b,6cb9fd75,84b8e672) @@ -8237,7 +8237,7 @@ const rustsecp256k1_v0_7_0_ge_storage rustsecp256k1_v0_7_0_pre_g[ECMULT_TABLE_SI ,S(1e70619c,381a6adc,e5d925e0,c9c74f97,3c02ff64,ff2662d7,34efc485,d2bce895,c923f771,f543ffed,42935c28,8474aaaf,80a46ad4,3c579ce0,bb5e663d,668b24b3) #endif }; -const rustsecp256k1_v0_7_0_ge_storage rustsecp256k1_v0_7_0_pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)] = { +const rustsecp256k1_v0_8_0_ge_storage rustsecp256k1_v0_8_0_pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)] = { S(8f68b9d2,f63b5f33,9239c1ad,981f162e,e88c5678,723ea335,1b7b444c,9ec4c0da,662a9f2d,ba063986,de1d90c2,b6be215d,bbea2cfe,95510bfd,f23cbf79,501fff82) #if WINDOW_G > 2 ,S(38381dbe,2e509f22,8ba93363,f2451f08,fd845cb3,51d954be,18e2b8ed,d23809fa,e4a32d0a,fb917dc,b09405a5,520eb1cc,3681fccb,32d8f24d,bd707518,331fed52) diff --git a/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult.h b/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult.h index 96a562be9..78c5d0ec8 100644 --- a/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult.h +++ b/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult.h @@ -20,12 +20,12 @@ extern "C" { # else # error No known generator for the specified exhaustive test group order. # endif -static rustsecp256k1_v0_7_0_ge_storage rustsecp256k1_v0_7_0_pre_g[ECMULT_TABLE_SIZE(WINDOW_G)]; -static rustsecp256k1_v0_7_0_ge_storage rustsecp256k1_v0_7_0_pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)]; +static rustsecp256k1_v0_8_0_ge_storage rustsecp256k1_v0_8_0_pre_g[ECMULT_TABLE_SIZE(WINDOW_G)]; +static rustsecp256k1_v0_8_0_ge_storage rustsecp256k1_v0_8_0_pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)]; #else /* !defined(EXHAUSTIVE_TEST_ORDER) */ # define WINDOW_G ECMULT_WINDOW_SIZE -extern const rustsecp256k1_v0_7_0_ge_storage rustsecp256k1_v0_7_0_pre_g[ECMULT_TABLE_SIZE(WINDOW_G)]; -extern const rustsecp256k1_v0_7_0_ge_storage rustsecp256k1_v0_7_0_pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)]; +extern const rustsecp256k1_v0_8_0_ge_storage rustsecp256k1_v0_8_0_pre_g[ECMULT_TABLE_SIZE(WINDOW_G)]; +extern const rustsecp256k1_v0_8_0_ge_storage rustsecp256k1_v0_8_0_pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)]; #endif /* defined(EXHAUSTIVE_TEST_ORDER) */ #ifdef __cplusplus diff --git a/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult_gen.c b/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult_gen.c index eeb8fb94f..b71977b53 100644 --- a/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult_gen.c +++ b/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult_gen.c @@ -11,7 +11,7 @@ # error Cannot compile precomputed_ecmult_gen.c in exhaustive test mode #endif /* EXHAUSTIVE_TEST_ORDER */ #define S(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) SECP256K1_GE_STORAGE_CONST(0x##a##u,0x##b##u,0x##c##u,0x##d##u,0x##e##u,0x##f##u,0x##g##u,0x##h##u,0x##i##u,0x##j##u,0x##k##u,0x##l##u,0x##m##u,0x##n##u,0x##o##u,0x##p##u) -const rustsecp256k1_v0_7_0_ge_storage rustsecp256k1_v0_7_0_ecmult_gen_prec_table[ECMULT_GEN_PREC_N(ECMULT_GEN_PREC_BITS)][ECMULT_GEN_PREC_G(ECMULT_GEN_PREC_BITS)] = { +const rustsecp256k1_v0_8_0_ge_storage rustsecp256k1_v0_8_0_ecmult_gen_prec_table[ECMULT_GEN_PREC_N(ECMULT_GEN_PREC_BITS)][ECMULT_GEN_PREC_G(ECMULT_GEN_PREC_BITS)] = { #if ECMULT_GEN_PREC_BITS == 2 {S(3a9ed373,6eed3eec,9aeb5ac0,21b54652,56817b1f,8de6cd0,fbcee548,ba044bb5,7bcc5928,bdc9c023,dfc663b8,9e4f6969,ab751798,8e600ec1,d242010c,45c7974a), S(e44d7675,c3cb2857,4e133c01,a74f4afc,5ce684f8,4a789711,603f7c4f,50abef58,25bcb62f,fe2e2ce2,196ad86c,a006e20,8c64d21b,b25320a3,b5574b9c,1e1bfb4b), diff --git a/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult_gen.h b/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult_gen.h index 3c83602cb..35aa0c2a7 100644 --- a/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult_gen.h +++ b/secp256k1-sys/depend/secp256k1/src/precomputed_ecmult_gen.h @@ -14,9 +14,9 @@ extern "C" { #include "group.h" #include "ecmult_gen.h" #ifdef EXHAUSTIVE_TEST_ORDER -static rustsecp256k1_v0_7_0_ge_storage rustsecp256k1_v0_7_0_ecmult_gen_prec_table[ECMULT_GEN_PREC_N(ECMULT_GEN_PREC_BITS)][ECMULT_GEN_PREC_G(ECMULT_GEN_PREC_BITS)]; +static rustsecp256k1_v0_8_0_ge_storage rustsecp256k1_v0_8_0_ecmult_gen_prec_table[ECMULT_GEN_PREC_N(ECMULT_GEN_PREC_BITS)][ECMULT_GEN_PREC_G(ECMULT_GEN_PREC_BITS)]; #else -extern const rustsecp256k1_v0_7_0_ge_storage rustsecp256k1_v0_7_0_ecmult_gen_prec_table[ECMULT_GEN_PREC_N(ECMULT_GEN_PREC_BITS)][ECMULT_GEN_PREC_G(ECMULT_GEN_PREC_BITS)]; +extern const rustsecp256k1_v0_8_0_ge_storage rustsecp256k1_v0_8_0_ecmult_gen_prec_table[ECMULT_GEN_PREC_N(ECMULT_GEN_PREC_BITS)][ECMULT_GEN_PREC_G(ECMULT_GEN_PREC_BITS)]; #endif /* defined(EXHAUSTIVE_TEST_ORDER) */ #ifdef __cplusplus diff --git a/secp256k1-sys/depend/secp256k1/src/scalar.h b/secp256k1-sys/depend/secp256k1/src/scalar.h index b2e6a9fd4..4ed4c8c8d 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar.h @@ -24,82 +24,82 @@ #endif /** Clear a scalar to prevent the leak of sensitive data. */ -static void rustsecp256k1_v0_7_0_scalar_clear(rustsecp256k1_v0_7_0_scalar *r); +static void rustsecp256k1_v0_8_0_scalar_clear(rustsecp256k1_v0_8_0_scalar *r); /** Access bits from a scalar. All requested bits must belong to the same 32-bit limb. */ -static unsigned int rustsecp256k1_v0_7_0_scalar_get_bits(const rustsecp256k1_v0_7_0_scalar *a, unsigned int offset, unsigned int count); +static unsigned int rustsecp256k1_v0_8_0_scalar_get_bits(const rustsecp256k1_v0_8_0_scalar *a, unsigned int offset, unsigned int count); /** Access bits from a scalar. Not constant time. */ -static unsigned int rustsecp256k1_v0_7_0_scalar_get_bits_var(const rustsecp256k1_v0_7_0_scalar *a, unsigned int offset, unsigned int count); +static unsigned int rustsecp256k1_v0_8_0_scalar_get_bits_var(const rustsecp256k1_v0_8_0_scalar *a, unsigned int offset, unsigned int count); /** Set a scalar from a big endian byte array. The scalar will be reduced modulo group order `n`. * In: bin: pointer to a 32-byte array. * Out: r: scalar to be set. * overflow: non-zero if the scalar was bigger or equal to `n` before reduction, zero otherwise (can be NULL). */ -static void rustsecp256k1_v0_7_0_scalar_set_b32(rustsecp256k1_v0_7_0_scalar *r, const unsigned char *bin, int *overflow); +static void rustsecp256k1_v0_8_0_scalar_set_b32(rustsecp256k1_v0_8_0_scalar *r, const unsigned char *bin, int *overflow); /** Set a scalar from a big endian byte array and returns 1 if it is a valid * seckey and 0 otherwise. */ -static int rustsecp256k1_v0_7_0_scalar_set_b32_seckey(rustsecp256k1_v0_7_0_scalar *r, const unsigned char *bin); +static int rustsecp256k1_v0_8_0_scalar_set_b32_seckey(rustsecp256k1_v0_8_0_scalar *r, const unsigned char *bin); /** Set a scalar to an unsigned integer. */ -static void rustsecp256k1_v0_7_0_scalar_set_int(rustsecp256k1_v0_7_0_scalar *r, unsigned int v); +static void rustsecp256k1_v0_8_0_scalar_set_int(rustsecp256k1_v0_8_0_scalar *r, unsigned int v); /** Convert a scalar to a byte array. */ -static void rustsecp256k1_v0_7_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_7_0_scalar* a); +static void rustsecp256k1_v0_8_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_8_0_scalar* a); /** Add two scalars together (modulo the group order). Returns whether it overflowed. */ -static int rustsecp256k1_v0_7_0_scalar_add(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a, const rustsecp256k1_v0_7_0_scalar *b); +static int rustsecp256k1_v0_8_0_scalar_add(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a, const rustsecp256k1_v0_8_0_scalar *b); /** Conditionally add a power of two to a scalar. The result is not allowed to overflow. */ -static void rustsecp256k1_v0_7_0_scalar_cadd_bit(rustsecp256k1_v0_7_0_scalar *r, unsigned int bit, int flag); +static void rustsecp256k1_v0_8_0_scalar_cadd_bit(rustsecp256k1_v0_8_0_scalar *r, unsigned int bit, int flag); /** Multiply two scalars (modulo the group order). */ -static void rustsecp256k1_v0_7_0_scalar_mul(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a, const rustsecp256k1_v0_7_0_scalar *b); +static void rustsecp256k1_v0_8_0_scalar_mul(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a, const rustsecp256k1_v0_8_0_scalar *b); /** Shift a scalar right by some amount strictly between 0 and 16, returning * the low bits that were shifted off */ -static int rustsecp256k1_v0_7_0_scalar_shr_int(rustsecp256k1_v0_7_0_scalar *r, int n); +static int rustsecp256k1_v0_8_0_scalar_shr_int(rustsecp256k1_v0_8_0_scalar *r, int n); /** Compute the inverse of a scalar (modulo the group order). */ -static void rustsecp256k1_v0_7_0_scalar_inverse(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a); +static void rustsecp256k1_v0_8_0_scalar_inverse(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a); /** Compute the inverse of a scalar (modulo the group order), without constant-time guarantee. */ -static void rustsecp256k1_v0_7_0_scalar_inverse_var(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a); +static void rustsecp256k1_v0_8_0_scalar_inverse_var(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a); /** Compute the complement of a scalar (modulo the group order). */ -static void rustsecp256k1_v0_7_0_scalar_negate(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a); +static void rustsecp256k1_v0_8_0_scalar_negate(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a); /** Check whether a scalar equals zero. */ -static int rustsecp256k1_v0_7_0_scalar_is_zero(const rustsecp256k1_v0_7_0_scalar *a); +static int rustsecp256k1_v0_8_0_scalar_is_zero(const rustsecp256k1_v0_8_0_scalar *a); /** Check whether a scalar equals one. */ -static int rustsecp256k1_v0_7_0_scalar_is_one(const rustsecp256k1_v0_7_0_scalar *a); +static int rustsecp256k1_v0_8_0_scalar_is_one(const rustsecp256k1_v0_8_0_scalar *a); /** Check whether a scalar, considered as an nonnegative integer, is even. */ -static int rustsecp256k1_v0_7_0_scalar_is_even(const rustsecp256k1_v0_7_0_scalar *a); +static int rustsecp256k1_v0_8_0_scalar_is_even(const rustsecp256k1_v0_8_0_scalar *a); /** Check whether a scalar is higher than the group order divided by 2. */ -static int rustsecp256k1_v0_7_0_scalar_is_high(const rustsecp256k1_v0_7_0_scalar *a); +static int rustsecp256k1_v0_8_0_scalar_is_high(const rustsecp256k1_v0_8_0_scalar *a); /** Conditionally negate a number, in constant time. * Returns -1 if the number was negated, 1 otherwise */ -static int rustsecp256k1_v0_7_0_scalar_cond_negate(rustsecp256k1_v0_7_0_scalar *a, int flag); +static int rustsecp256k1_v0_8_0_scalar_cond_negate(rustsecp256k1_v0_8_0_scalar *a, int flag); /** Compare two scalars. */ -static int rustsecp256k1_v0_7_0_scalar_eq(const rustsecp256k1_v0_7_0_scalar *a, const rustsecp256k1_v0_7_0_scalar *b); +static int rustsecp256k1_v0_8_0_scalar_eq(const rustsecp256k1_v0_8_0_scalar *a, const rustsecp256k1_v0_8_0_scalar *b); /** Find r1 and r2 such that r1+r2*2^128 = k. */ -static void rustsecp256k1_v0_7_0_scalar_split_128(rustsecp256k1_v0_7_0_scalar *r1, rustsecp256k1_v0_7_0_scalar *r2, const rustsecp256k1_v0_7_0_scalar *k); +static void rustsecp256k1_v0_8_0_scalar_split_128(rustsecp256k1_v0_8_0_scalar *r1, rustsecp256k1_v0_8_0_scalar *r2, const rustsecp256k1_v0_8_0_scalar *k); /** Find r1 and r2 such that r1+r2*lambda = k, - * where r1 and r2 or their negations are maximum 128 bits long (see rustsecp256k1_v0_7_0_ge_mul_lambda). */ -static void rustsecp256k1_v0_7_0_scalar_split_lambda(rustsecp256k1_v0_7_0_scalar *r1, rustsecp256k1_v0_7_0_scalar *r2, const rustsecp256k1_v0_7_0_scalar *k); + * where r1 and r2 or their negations are maximum 128 bits long (see rustsecp256k1_v0_8_0_ge_mul_lambda). */ +static void rustsecp256k1_v0_8_0_scalar_split_lambda(rustsecp256k1_v0_8_0_scalar *r1, rustsecp256k1_v0_8_0_scalar *r2, const rustsecp256k1_v0_8_0_scalar *k); /** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */ -static void rustsecp256k1_v0_7_0_scalar_mul_shift_var(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a, const rustsecp256k1_v0_7_0_scalar *b, unsigned int shift); +static void rustsecp256k1_v0_8_0_scalar_mul_shift_var(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a, const rustsecp256k1_v0_8_0_scalar *b, unsigned int shift); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/ -static void rustsecp256k1_v0_7_0_scalar_cmov(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a, int flag); +static void rustsecp256k1_v0_8_0_scalar_cmov(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a, int flag); #endif /* SECP256K1_SCALAR_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_4x64.h b/secp256k1-sys/depend/secp256k1/src/scalar_4x64.h index 069f2b86e..7d0542e45 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_4x64.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_4x64.h @@ -12,7 +12,7 @@ /** A scalar modulo the group order of the secp256k1 curve. */ typedef struct { uint64_t d[4]; -} rustsecp256k1_v0_7_0_scalar; +} rustsecp256k1_v0_8_0_scalar; #define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{((uint64_t)(d1)) << 32 | (d0), ((uint64_t)(d3)) << 32 | (d2), ((uint64_t)(d5)) << 32 | (d4), ((uint64_t)(d7)) << 32 | (d6)}} diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_4x64_impl.h b/secp256k1-sys/depend/secp256k1/src/scalar_4x64_impl.h index 605bcf599..f43b85e99 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_4x64_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_4x64_impl.h @@ -7,6 +7,7 @@ #ifndef SECP256K1_SCALAR_REPR_IMPL_H #define SECP256K1_SCALAR_REPR_IMPL_H +#include "int128.h" #include "modinv64_impl.h" /* Limbs of the secp256k1 order. */ @@ -26,37 +27,37 @@ #define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) #define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL) -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_scalar_clear(rustsecp256k1_v0_7_0_scalar *r) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_scalar_clear(rustsecp256k1_v0_8_0_scalar *r) { r->d[0] = 0; r->d[1] = 0; r->d[2] = 0; r->d[3] = 0; } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_scalar_set_int(rustsecp256k1_v0_7_0_scalar *r, unsigned int v) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_scalar_set_int(rustsecp256k1_v0_8_0_scalar *r, unsigned int v) { r->d[0] = v; r->d[1] = 0; r->d[2] = 0; r->d[3] = 0; } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_7_0_scalar_get_bits(const rustsecp256k1_v0_7_0_scalar *a, unsigned int offset, unsigned int count) { +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_8_0_scalar_get_bits(const rustsecp256k1_v0_8_0_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6); return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1); } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_7_0_scalar_get_bits_var(const rustsecp256k1_v0_7_0_scalar *a, unsigned int offset, unsigned int count) { +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_8_0_scalar_get_bits_var(const rustsecp256k1_v0_8_0_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK(count < 32); VERIFY_CHECK(offset + count <= 256); if ((offset + count - 1) >> 6 == offset >> 6) { - return rustsecp256k1_v0_7_0_scalar_get_bits(a, offset, count); + return rustsecp256k1_v0_8_0_scalar_get_bits(a, offset, count); } else { VERIFY_CHECK((offset >> 6) + 1 < 4); return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1); } } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_check_overflow(const rustsecp256k1_v0_7_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_scalar_check_overflow(const rustsecp256k1_v0_8_0_scalar *a) { int yes = 0; int no = 0; no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */ @@ -68,94 +69,110 @@ SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_check_overflow(const rus return yes; } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_reduce(rustsecp256k1_v0_7_0_scalar *r, unsigned int overflow) { - uint128_t t; +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_scalar_reduce(rustsecp256k1_v0_8_0_scalar *r, unsigned int overflow) { + rustsecp256k1_v0_8_0_uint128 t; VERIFY_CHECK(overflow <= 1); - t = (uint128_t)r->d[0] + overflow * SECP256K1_N_C_0; - r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint128_t)r->d[1] + overflow * SECP256K1_N_C_1; - r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint128_t)r->d[2] + overflow * SECP256K1_N_C_2; - r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint64_t)r->d[3]; - r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; + rustsecp256k1_v0_8_0_u128_from_u64(&t, r->d[0]); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, overflow * SECP256K1_N_C_0); + r->d[0] = rustsecp256k1_v0_8_0_u128_to_u64(&t); rustsecp256k1_v0_8_0_u128_rshift(&t, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, r->d[1]); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, overflow * SECP256K1_N_C_1); + r->d[1] = rustsecp256k1_v0_8_0_u128_to_u64(&t); rustsecp256k1_v0_8_0_u128_rshift(&t, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, r->d[2]); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, overflow * SECP256K1_N_C_2); + r->d[2] = rustsecp256k1_v0_8_0_u128_to_u64(&t); rustsecp256k1_v0_8_0_u128_rshift(&t, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, r->d[3]); + r->d[3] = rustsecp256k1_v0_8_0_u128_to_u64(&t); return overflow; } -static int rustsecp256k1_v0_7_0_scalar_add(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a, const rustsecp256k1_v0_7_0_scalar *b) { +static int rustsecp256k1_v0_8_0_scalar_add(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a, const rustsecp256k1_v0_8_0_scalar *b) { int overflow; - uint128_t t = (uint128_t)a->d[0] + b->d[0]; - r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint128_t)a->d[1] + b->d[1]; - r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint128_t)a->d[2] + b->d[2]; - r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint128_t)a->d[3] + b->d[3]; - r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - overflow = t + rustsecp256k1_v0_7_0_scalar_check_overflow(r); + rustsecp256k1_v0_8_0_uint128 t; + rustsecp256k1_v0_8_0_u128_from_u64(&t, a->d[0]); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, b->d[0]); + r->d[0] = rustsecp256k1_v0_8_0_u128_to_u64(&t); rustsecp256k1_v0_8_0_u128_rshift(&t, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, a->d[1]); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, b->d[1]); + r->d[1] = rustsecp256k1_v0_8_0_u128_to_u64(&t); rustsecp256k1_v0_8_0_u128_rshift(&t, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, a->d[2]); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, b->d[2]); + r->d[2] = rustsecp256k1_v0_8_0_u128_to_u64(&t); rustsecp256k1_v0_8_0_u128_rshift(&t, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, a->d[3]); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, b->d[3]); + r->d[3] = rustsecp256k1_v0_8_0_u128_to_u64(&t); rustsecp256k1_v0_8_0_u128_rshift(&t, 64); + overflow = rustsecp256k1_v0_8_0_u128_to_u64(&t) + rustsecp256k1_v0_8_0_scalar_check_overflow(r); VERIFY_CHECK(overflow == 0 || overflow == 1); - rustsecp256k1_v0_7_0_scalar_reduce(r, overflow); + rustsecp256k1_v0_8_0_scalar_reduce(r, overflow); return overflow; } -static void rustsecp256k1_v0_7_0_scalar_cadd_bit(rustsecp256k1_v0_7_0_scalar *r, unsigned int bit, int flag) { - uint128_t t; +static void rustsecp256k1_v0_8_0_scalar_cadd_bit(rustsecp256k1_v0_8_0_scalar *r, unsigned int bit, int flag) { + rustsecp256k1_v0_8_0_uint128 t; VERIFY_CHECK(bit < 256); bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */ - t = (uint128_t)r->d[0] + (((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F)); - r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint128_t)r->d[1] + (((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F)); - r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint128_t)r->d[2] + (((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F)); - r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint128_t)r->d[3] + (((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F)); - r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; + rustsecp256k1_v0_8_0_u128_from_u64(&t, r->d[0]); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F)); + r->d[0] = rustsecp256k1_v0_8_0_u128_to_u64(&t); rustsecp256k1_v0_8_0_u128_rshift(&t, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, r->d[1]); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F)); + r->d[1] = rustsecp256k1_v0_8_0_u128_to_u64(&t); rustsecp256k1_v0_8_0_u128_rshift(&t, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, r->d[2]); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F)); + r->d[2] = rustsecp256k1_v0_8_0_u128_to_u64(&t); rustsecp256k1_v0_8_0_u128_rshift(&t, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, r->d[3]); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F)); + r->d[3] = rustsecp256k1_v0_8_0_u128_to_u64(&t); #ifdef VERIFY - VERIFY_CHECK((t >> 64) == 0); - VERIFY_CHECK(rustsecp256k1_v0_7_0_scalar_check_overflow(r) == 0); + VERIFY_CHECK(rustsecp256k1_v0_8_0_u128_hi_u64(&t) == 0); #endif } -static void rustsecp256k1_v0_7_0_scalar_set_b32(rustsecp256k1_v0_7_0_scalar *r, const unsigned char *b32, int *overflow) { +static void rustsecp256k1_v0_8_0_scalar_set_b32(rustsecp256k1_v0_8_0_scalar *r, const unsigned char *b32, int *overflow) { int over; r->d[0] = (uint64_t)b32[31] | (uint64_t)b32[30] << 8 | (uint64_t)b32[29] << 16 | (uint64_t)b32[28] << 24 | (uint64_t)b32[27] << 32 | (uint64_t)b32[26] << 40 | (uint64_t)b32[25] << 48 | (uint64_t)b32[24] << 56; r->d[1] = (uint64_t)b32[23] | (uint64_t)b32[22] << 8 | (uint64_t)b32[21] << 16 | (uint64_t)b32[20] << 24 | (uint64_t)b32[19] << 32 | (uint64_t)b32[18] << 40 | (uint64_t)b32[17] << 48 | (uint64_t)b32[16] << 56; r->d[2] = (uint64_t)b32[15] | (uint64_t)b32[14] << 8 | (uint64_t)b32[13] << 16 | (uint64_t)b32[12] << 24 | (uint64_t)b32[11] << 32 | (uint64_t)b32[10] << 40 | (uint64_t)b32[9] << 48 | (uint64_t)b32[8] << 56; r->d[3] = (uint64_t)b32[7] | (uint64_t)b32[6] << 8 | (uint64_t)b32[5] << 16 | (uint64_t)b32[4] << 24 | (uint64_t)b32[3] << 32 | (uint64_t)b32[2] << 40 | (uint64_t)b32[1] << 48 | (uint64_t)b32[0] << 56; - over = rustsecp256k1_v0_7_0_scalar_reduce(r, rustsecp256k1_v0_7_0_scalar_check_overflow(r)); + over = rustsecp256k1_v0_8_0_scalar_reduce(r, rustsecp256k1_v0_8_0_scalar_check_overflow(r)); if (overflow) { *overflow = over; } } -static void rustsecp256k1_v0_7_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_7_0_scalar* a) { +static void rustsecp256k1_v0_8_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_8_0_scalar* a) { bin[0] = a->d[3] >> 56; bin[1] = a->d[3] >> 48; bin[2] = a->d[3] >> 40; bin[3] = a->d[3] >> 32; bin[4] = a->d[3] >> 24; bin[5] = a->d[3] >> 16; bin[6] = a->d[3] >> 8; bin[7] = a->d[3]; bin[8] = a->d[2] >> 56; bin[9] = a->d[2] >> 48; bin[10] = a->d[2] >> 40; bin[11] = a->d[2] >> 32; bin[12] = a->d[2] >> 24; bin[13] = a->d[2] >> 16; bin[14] = a->d[2] >> 8; bin[15] = a->d[2]; bin[16] = a->d[1] >> 56; bin[17] = a->d[1] >> 48; bin[18] = a->d[1] >> 40; bin[19] = a->d[1] >> 32; bin[20] = a->d[1] >> 24; bin[21] = a->d[1] >> 16; bin[22] = a->d[1] >> 8; bin[23] = a->d[1]; bin[24] = a->d[0] >> 56; bin[25] = a->d[0] >> 48; bin[26] = a->d[0] >> 40; bin[27] = a->d[0] >> 32; bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0]; } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_is_zero(const rustsecp256k1_v0_7_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_scalar_is_zero(const rustsecp256k1_v0_8_0_scalar *a) { return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0; } -static void rustsecp256k1_v0_7_0_scalar_negate(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a) { - uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (rustsecp256k1_v0_7_0_scalar_is_zero(a) == 0); - uint128_t t = (uint128_t)(~a->d[0]) + SECP256K1_N_0 + 1; - r->d[0] = t & nonzero; t >>= 64; - t += (uint128_t)(~a->d[1]) + SECP256K1_N_1; - r->d[1] = t & nonzero; t >>= 64; - t += (uint128_t)(~a->d[2]) + SECP256K1_N_2; - r->d[2] = t & nonzero; t >>= 64; - t += (uint128_t)(~a->d[3]) + SECP256K1_N_3; - r->d[3] = t & nonzero; -} - -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_is_one(const rustsecp256k1_v0_7_0_scalar *a) { +static void rustsecp256k1_v0_8_0_scalar_negate(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a) { + uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (rustsecp256k1_v0_8_0_scalar_is_zero(a) == 0); + rustsecp256k1_v0_8_0_uint128 t; + rustsecp256k1_v0_8_0_u128_from_u64(&t, ~a->d[0]); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, SECP256K1_N_0 + 1); + r->d[0] = rustsecp256k1_v0_8_0_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_8_0_u128_rshift(&t, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, ~a->d[1]); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, SECP256K1_N_1); + r->d[1] = rustsecp256k1_v0_8_0_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_8_0_u128_rshift(&t, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, ~a->d[2]); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, SECP256K1_N_2); + r->d[2] = rustsecp256k1_v0_8_0_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_8_0_u128_rshift(&t, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, ~a->d[3]); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, SECP256K1_N_3); + r->d[3] = rustsecp256k1_v0_8_0_u128_to_u64(&t) & nonzero; +} + +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_scalar_is_one(const rustsecp256k1_v0_8_0_scalar *a) { return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0; } -static int rustsecp256k1_v0_7_0_scalar_is_high(const rustsecp256k1_v0_7_0_scalar *a) { +static int rustsecp256k1_v0_8_0_scalar_is_high(const rustsecp256k1_v0_8_0_scalar *a) { int yes = 0; int no = 0; no |= (a->d[3] < SECP256K1_N_H_3); @@ -167,19 +184,24 @@ static int rustsecp256k1_v0_7_0_scalar_is_high(const rustsecp256k1_v0_7_0_scalar return yes; } -static int rustsecp256k1_v0_7_0_scalar_cond_negate(rustsecp256k1_v0_7_0_scalar *r, int flag) { +static int rustsecp256k1_v0_8_0_scalar_cond_negate(rustsecp256k1_v0_8_0_scalar *r, int flag) { /* If we are flag = 0, mask = 00...00 and this is a no-op; - * if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_7_0_scalar_negate */ + * if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_8_0_scalar_negate */ uint64_t mask = !flag - 1; - uint64_t nonzero = (rustsecp256k1_v0_7_0_scalar_is_zero(r) != 0) - 1; - uint128_t t = (uint128_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); - r->d[0] = t & nonzero; t >>= 64; - t += (uint128_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); - r->d[1] = t & nonzero; t >>= 64; - t += (uint128_t)(r->d[2] ^ mask) + (SECP256K1_N_2 & mask); - r->d[2] = t & nonzero; t >>= 64; - t += (uint128_t)(r->d[3] ^ mask) + (SECP256K1_N_3 & mask); - r->d[3] = t & nonzero; + uint64_t nonzero = (rustsecp256k1_v0_8_0_scalar_is_zero(r) != 0) - 1; + rustsecp256k1_v0_8_0_uint128 t; + rustsecp256k1_v0_8_0_u128_from_u64(&t, r->d[0] ^ mask); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, (SECP256K1_N_0 + 1) & mask); + r->d[0] = rustsecp256k1_v0_8_0_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_8_0_u128_rshift(&t, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, r->d[1] ^ mask); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, SECP256K1_N_1 & mask); + r->d[1] = rustsecp256k1_v0_8_0_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_8_0_u128_rshift(&t, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, r->d[2] ^ mask); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, SECP256K1_N_2 & mask); + r->d[2] = rustsecp256k1_v0_8_0_u128_to_u64(&t) & nonzero; rustsecp256k1_v0_8_0_u128_rshift(&t, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, r->d[3] ^ mask); + rustsecp256k1_v0_8_0_u128_accum_u64(&t, SECP256K1_N_3 & mask); + r->d[3] = rustsecp256k1_v0_8_0_u128_to_u64(&t) & nonzero; return 2 * (mask == 0) - 1; } @@ -189,9 +211,10 @@ static int rustsecp256k1_v0_7_0_scalar_cond_negate(rustsecp256k1_v0_7_0_scalar * #define muladd(a,b) { \ uint64_t tl, th; \ { \ - uint128_t t = (uint128_t)a * b; \ - th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \ - tl = t; \ + rustsecp256k1_v0_8_0_uint128 t; \ + rustsecp256k1_v0_8_0_u128_mul(&t, a, b); \ + th = rustsecp256k1_v0_8_0_u128_hi_u64(&t); /* at most 0xFFFFFFFFFFFFFFFE */ \ + tl = rustsecp256k1_v0_8_0_u128_to_u64(&t); \ } \ c0 += tl; /* overflow is handled on the next line */ \ th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \ @@ -204,9 +227,10 @@ static int rustsecp256k1_v0_7_0_scalar_cond_negate(rustsecp256k1_v0_7_0_scalar * #define muladd_fast(a,b) { \ uint64_t tl, th; \ { \ - uint128_t t = (uint128_t)a * b; \ - th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \ - tl = t; \ + rustsecp256k1_v0_8_0_uint128 t; \ + rustsecp256k1_v0_8_0_u128_mul(&t, a, b); \ + th = rustsecp256k1_v0_8_0_u128_hi_u64(&t); /* at most 0xFFFFFFFFFFFFFFFE */ \ + tl = rustsecp256k1_v0_8_0_u128_to_u64(&t); \ } \ c0 += tl; /* overflow is handled on the next line */ \ th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \ @@ -247,7 +271,7 @@ static int rustsecp256k1_v0_7_0_scalar_cond_negate(rustsecp256k1_v0_7_0_scalar * VERIFY_CHECK(c2 == 0); \ } -static void rustsecp256k1_v0_7_0_scalar_reduce_512(rustsecp256k1_v0_7_0_scalar *r, const uint64_t *l) { +static void rustsecp256k1_v0_8_0_scalar_reduce_512(rustsecp256k1_v0_8_0_scalar *r, const uint64_t *l) { #ifdef USE_ASM_X86_64 /* Reduce 512 bits into 385. */ uint64_t m0, m1, m2, m3, m4, m5, m6; @@ -484,8 +508,8 @@ static void rustsecp256k1_v0_7_0_scalar_reduce_512(rustsecp256k1_v0_7_0_scalar * : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1) : "rax", "rdx", "r8", "r9", "r10", "cc", "memory"); #else - uint128_t c; - uint64_t c0, c1, c2; + rustsecp256k1_v0_8_0_uint128 c128; + uint64_t c, c0, c1, c2; uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7]; uint64_t m0, m1, m2, m3, m4, m5; uint32_t m6; @@ -542,21 +566,25 @@ static void rustsecp256k1_v0_7_0_scalar_reduce_512(rustsecp256k1_v0_7_0_scalar * /* Reduce 258 bits into 256. */ /* r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. */ - c = p0 + (uint128_t)SECP256K1_N_C_0 * p4; - r->d[0] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; - c += p1 + (uint128_t)SECP256K1_N_C_1 * p4; - r->d[1] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; - c += p2 + (uint128_t)p4; - r->d[2] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; - c += p3; - r->d[3] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; + rustsecp256k1_v0_8_0_u128_from_u64(&c128, p0); + rustsecp256k1_v0_8_0_u128_accum_mul(&c128, SECP256K1_N_C_0, p4); + r->d[0] = rustsecp256k1_v0_8_0_u128_to_u64(&c128); rustsecp256k1_v0_8_0_u128_rshift(&c128, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&c128, p1); + rustsecp256k1_v0_8_0_u128_accum_mul(&c128, SECP256K1_N_C_1, p4); + r->d[1] = rustsecp256k1_v0_8_0_u128_to_u64(&c128); rustsecp256k1_v0_8_0_u128_rshift(&c128, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&c128, p2); + rustsecp256k1_v0_8_0_u128_accum_u64(&c128, p4); + r->d[2] = rustsecp256k1_v0_8_0_u128_to_u64(&c128); rustsecp256k1_v0_8_0_u128_rshift(&c128, 64); + rustsecp256k1_v0_8_0_u128_accum_u64(&c128, p3); + r->d[3] = rustsecp256k1_v0_8_0_u128_to_u64(&c128); + c = rustsecp256k1_v0_8_0_u128_hi_u64(&c128); #endif /* Final reduction of r. */ - rustsecp256k1_v0_7_0_scalar_reduce(r, c + rustsecp256k1_v0_7_0_scalar_check_overflow(r)); + rustsecp256k1_v0_8_0_scalar_reduce(r, c + rustsecp256k1_v0_8_0_scalar_check_overflow(r)); } -static void rustsecp256k1_v0_7_0_scalar_mul_512(uint64_t l[8], const rustsecp256k1_v0_7_0_scalar *a, const rustsecp256k1_v0_7_0_scalar *b) { +static void rustsecp256k1_v0_8_0_scalar_mul_512(uint64_t l[8], const rustsecp256k1_v0_8_0_scalar *a, const rustsecp256k1_v0_8_0_scalar *b) { #ifdef USE_ASM_X86_64 const uint64_t *pb = b->d; __asm__ __volatile__( @@ -730,13 +758,13 @@ static void rustsecp256k1_v0_7_0_scalar_mul_512(uint64_t l[8], const rustsecp256 #undef extract #undef extract_fast -static void rustsecp256k1_v0_7_0_scalar_mul(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a, const rustsecp256k1_v0_7_0_scalar *b) { +static void rustsecp256k1_v0_8_0_scalar_mul(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a, const rustsecp256k1_v0_8_0_scalar *b) { uint64_t l[8]; - rustsecp256k1_v0_7_0_scalar_mul_512(l, a, b); - rustsecp256k1_v0_7_0_scalar_reduce_512(r, l); + rustsecp256k1_v0_8_0_scalar_mul_512(l, a, b); + rustsecp256k1_v0_8_0_scalar_reduce_512(r, l); } -static int rustsecp256k1_v0_7_0_scalar_shr_int(rustsecp256k1_v0_7_0_scalar *r, int n) { +static int rustsecp256k1_v0_8_0_scalar_shr_int(rustsecp256k1_v0_8_0_scalar *r, int n) { int ret; VERIFY_CHECK(n > 0); VERIFY_CHECK(n < 16); @@ -748,7 +776,7 @@ static int rustsecp256k1_v0_7_0_scalar_shr_int(rustsecp256k1_v0_7_0_scalar *r, i return ret; } -static void rustsecp256k1_v0_7_0_scalar_split_128(rustsecp256k1_v0_7_0_scalar *r1, rustsecp256k1_v0_7_0_scalar *r2, const rustsecp256k1_v0_7_0_scalar *k) { +static void rustsecp256k1_v0_8_0_scalar_split_128(rustsecp256k1_v0_8_0_scalar *r1, rustsecp256k1_v0_8_0_scalar *r2, const rustsecp256k1_v0_8_0_scalar *k) { r1->d[0] = k->d[0]; r1->d[1] = k->d[1]; r1->d[2] = 0; @@ -759,17 +787,17 @@ static void rustsecp256k1_v0_7_0_scalar_split_128(rustsecp256k1_v0_7_0_scalar *r r2->d[3] = 0; } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_eq(const rustsecp256k1_v0_7_0_scalar *a, const rustsecp256k1_v0_7_0_scalar *b) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_scalar_eq(const rustsecp256k1_v0_8_0_scalar *a, const rustsecp256k1_v0_8_0_scalar *b) { return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0; } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_scalar_mul_shift_var(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a, const rustsecp256k1_v0_7_0_scalar *b, unsigned int shift) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_scalar_mul_shift_var(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a, const rustsecp256k1_v0_8_0_scalar *b, unsigned int shift) { uint64_t l[8]; unsigned int shiftlimbs; unsigned int shiftlow; unsigned int shifthigh; VERIFY_CHECK(shift >= 256); - rustsecp256k1_v0_7_0_scalar_mul_512(l, a, b); + rustsecp256k1_v0_8_0_scalar_mul_512(l, a, b); shiftlimbs = shift >> 6; shiftlow = shift & 0x3F; shifthigh = 64 - shiftlow; @@ -777,10 +805,10 @@ SECP256K1_INLINE static void rustsecp256k1_v0_7_0_scalar_mul_shift_var(rustsecp2 r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0; - rustsecp256k1_v0_7_0_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1); + rustsecp256k1_v0_8_0_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1); } -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_scalar_cmov(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_scalar_cmov(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a, int flag) { uint64_t mask0, mask1; VG_CHECK_VERIFY(r->d, sizeof(r->d)); mask0 = flag + ~((uint64_t)0); @@ -791,10 +819,10 @@ static SECP256K1_INLINE void rustsecp256k1_v0_7_0_scalar_cmov(rustsecp256k1_v0_7 r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1); } -static void rustsecp256k1_v0_7_0_scalar_from_signed62(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_modinv64_signed62 *a) { +static void rustsecp256k1_v0_8_0_scalar_from_signed62(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_modinv64_signed62 *a) { const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4]; - /* The output from rustsecp256k1_v0_7_0_modinv64{_var} should be normalized to range [0,modulus), and + /* The output from rustsecp256k1_v0_8_0_modinv64{_var} should be normalized to range [0,modulus), and * have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4). */ VERIFY_CHECK(a0 >> 62 == 0); @@ -809,16 +837,16 @@ static void rustsecp256k1_v0_7_0_scalar_from_signed62(rustsecp256k1_v0_7_0_scala r->d[3] = a3 >> 6 | a4 << 56; #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_scalar_check_overflow(r) == 0); + VERIFY_CHECK(rustsecp256k1_v0_8_0_scalar_check_overflow(r) == 0); #endif } -static void rustsecp256k1_v0_7_0_scalar_to_signed62(rustsecp256k1_v0_7_0_modinv64_signed62 *r, const rustsecp256k1_v0_7_0_scalar *a) { +static void rustsecp256k1_v0_8_0_scalar_to_signed62(rustsecp256k1_v0_8_0_modinv64_signed62 *r, const rustsecp256k1_v0_8_0_scalar *a) { const uint64_t M62 = UINT64_MAX >> 2; const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3]; #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_scalar_check_overflow(a) == 0); + VERIFY_CHECK(rustsecp256k1_v0_8_0_scalar_check_overflow(a) == 0); #endif r->v[0] = a0 & M62; @@ -828,40 +856,40 @@ static void rustsecp256k1_v0_7_0_scalar_to_signed62(rustsecp256k1_v0_7_0_modinv6 r->v[4] = a3 >> 56; } -static const rustsecp256k1_v0_7_0_modinv64_modinfo rustsecp256k1_v0_7_0_const_modinfo_scalar = { +static const rustsecp256k1_v0_8_0_modinv64_modinfo rustsecp256k1_v0_8_0_const_modinfo_scalar = { {{0x3FD25E8CD0364141LL, 0x2ABB739ABD2280EELL, -0x15LL, 0, 256}}, 0x34F20099AA774EC1LL }; -static void rustsecp256k1_v0_7_0_scalar_inverse(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *x) { - rustsecp256k1_v0_7_0_modinv64_signed62 s; +static void rustsecp256k1_v0_8_0_scalar_inverse(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *x) { + rustsecp256k1_v0_8_0_modinv64_signed62 s; #ifdef VERIFY - int zero_in = rustsecp256k1_v0_7_0_scalar_is_zero(x); + int zero_in = rustsecp256k1_v0_8_0_scalar_is_zero(x); #endif - rustsecp256k1_v0_7_0_scalar_to_signed62(&s, x); - rustsecp256k1_v0_7_0_modinv64(&s, &rustsecp256k1_v0_7_0_const_modinfo_scalar); - rustsecp256k1_v0_7_0_scalar_from_signed62(r, &s); + rustsecp256k1_v0_8_0_scalar_to_signed62(&s, x); + rustsecp256k1_v0_8_0_modinv64(&s, &rustsecp256k1_v0_8_0_const_modinfo_scalar); + rustsecp256k1_v0_8_0_scalar_from_signed62(r, &s); #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_scalar_is_zero(r) == zero_in); + VERIFY_CHECK(rustsecp256k1_v0_8_0_scalar_is_zero(r) == zero_in); #endif } -static void rustsecp256k1_v0_7_0_scalar_inverse_var(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *x) { - rustsecp256k1_v0_7_0_modinv64_signed62 s; +static void rustsecp256k1_v0_8_0_scalar_inverse_var(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *x) { + rustsecp256k1_v0_8_0_modinv64_signed62 s; #ifdef VERIFY - int zero_in = rustsecp256k1_v0_7_0_scalar_is_zero(x); + int zero_in = rustsecp256k1_v0_8_0_scalar_is_zero(x); #endif - rustsecp256k1_v0_7_0_scalar_to_signed62(&s, x); - rustsecp256k1_v0_7_0_modinv64_var(&s, &rustsecp256k1_v0_7_0_const_modinfo_scalar); - rustsecp256k1_v0_7_0_scalar_from_signed62(r, &s); + rustsecp256k1_v0_8_0_scalar_to_signed62(&s, x); + rustsecp256k1_v0_8_0_modinv64_var(&s, &rustsecp256k1_v0_8_0_const_modinfo_scalar); + rustsecp256k1_v0_8_0_scalar_from_signed62(r, &s); #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_scalar_is_zero(r) == zero_in); + VERIFY_CHECK(rustsecp256k1_v0_8_0_scalar_is_zero(r) == zero_in); #endif } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_is_even(const rustsecp256k1_v0_7_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_scalar_is_even(const rustsecp256k1_v0_8_0_scalar *a) { return !(a->d[0] & 1); } diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_8x32.h b/secp256k1-sys/depend/secp256k1/src/scalar_8x32.h index 247ac53a5..8c783ffd3 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_8x32.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_8x32.h @@ -12,7 +12,7 @@ /** A scalar modulo the group order of the secp256k1 curve. */ typedef struct { uint32_t d[8]; -} rustsecp256k1_v0_7_0_scalar; +} rustsecp256k1_v0_8_0_scalar; #define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7)}} diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_8x32_impl.h b/secp256k1-sys/depend/secp256k1/src/scalar_8x32_impl.h index f913f77cc..ac6b80e9b 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_8x32_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_8x32_impl.h @@ -36,7 +36,7 @@ #define SECP256K1_N_H_6 ((uint32_t)0xFFFFFFFFUL) #define SECP256K1_N_H_7 ((uint32_t)0x7FFFFFFFUL) -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_scalar_clear(rustsecp256k1_v0_7_0_scalar *r) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_scalar_clear(rustsecp256k1_v0_8_0_scalar *r) { r->d[0] = 0; r->d[1] = 0; r->d[2] = 0; @@ -47,7 +47,7 @@ SECP256K1_INLINE static void rustsecp256k1_v0_7_0_scalar_clear(rustsecp256k1_v0_ r->d[7] = 0; } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_scalar_set_int(rustsecp256k1_v0_7_0_scalar *r, unsigned int v) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_scalar_set_int(rustsecp256k1_v0_8_0_scalar *r, unsigned int v) { r->d[0] = v; r->d[1] = 0; r->d[2] = 0; @@ -58,23 +58,23 @@ SECP256K1_INLINE static void rustsecp256k1_v0_7_0_scalar_set_int(rustsecp256k1_v r->d[7] = 0; } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_7_0_scalar_get_bits(const rustsecp256k1_v0_7_0_scalar *a, unsigned int offset, unsigned int count) { +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_8_0_scalar_get_bits(const rustsecp256k1_v0_8_0_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5); return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1); } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_7_0_scalar_get_bits_var(const rustsecp256k1_v0_7_0_scalar *a, unsigned int offset, unsigned int count) { +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_8_0_scalar_get_bits_var(const rustsecp256k1_v0_8_0_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK(count < 32); VERIFY_CHECK(offset + count <= 256); if ((offset + count - 1) >> 5 == offset >> 5) { - return rustsecp256k1_v0_7_0_scalar_get_bits(a, offset, count); + return rustsecp256k1_v0_8_0_scalar_get_bits(a, offset, count); } else { VERIFY_CHECK((offset >> 5) + 1 < 8); return ((a->d[offset >> 5] >> (offset & 0x1F)) | (a->d[(offset >> 5) + 1] << (32 - (offset & 0x1F)))) & ((((uint32_t)1) << count) - 1); } } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_check_overflow(const rustsecp256k1_v0_7_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_scalar_check_overflow(const rustsecp256k1_v0_8_0_scalar *a) { int yes = 0; int no = 0; no |= (a->d[7] < SECP256K1_N_7); /* No need for a > check. */ @@ -92,7 +92,7 @@ SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_check_overflow(const rus return yes; } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_reduce(rustsecp256k1_v0_7_0_scalar *r, uint32_t overflow) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_scalar_reduce(rustsecp256k1_v0_8_0_scalar *r, uint32_t overflow) { uint64_t t; VERIFY_CHECK(overflow <= 1); t = (uint64_t)r->d[0] + overflow * SECP256K1_N_C_0; @@ -114,7 +114,7 @@ SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_reduce(rustsecp256k1_v0_ return overflow; } -static int rustsecp256k1_v0_7_0_scalar_add(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a, const rustsecp256k1_v0_7_0_scalar *b) { +static int rustsecp256k1_v0_8_0_scalar_add(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a, const rustsecp256k1_v0_8_0_scalar *b) { int overflow; uint64_t t = (uint64_t)a->d[0] + b->d[0]; r->d[0] = t & 0xFFFFFFFFULL; t >>= 32; @@ -132,13 +132,13 @@ static int rustsecp256k1_v0_7_0_scalar_add(rustsecp256k1_v0_7_0_scalar *r, const r->d[6] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[7] + b->d[7]; r->d[7] = t & 0xFFFFFFFFULL; t >>= 32; - overflow = t + rustsecp256k1_v0_7_0_scalar_check_overflow(r); + overflow = t + rustsecp256k1_v0_8_0_scalar_check_overflow(r); VERIFY_CHECK(overflow == 0 || overflow == 1); - rustsecp256k1_v0_7_0_scalar_reduce(r, overflow); + rustsecp256k1_v0_8_0_scalar_reduce(r, overflow); return overflow; } -static void rustsecp256k1_v0_7_0_scalar_cadd_bit(rustsecp256k1_v0_7_0_scalar *r, unsigned int bit, int flag) { +static void rustsecp256k1_v0_8_0_scalar_cadd_bit(rustsecp256k1_v0_8_0_scalar *r, unsigned int bit, int flag) { uint64_t t; VERIFY_CHECK(bit < 256); bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */ @@ -160,11 +160,11 @@ static void rustsecp256k1_v0_7_0_scalar_cadd_bit(rustsecp256k1_v0_7_0_scalar *r, r->d[7] = t & 0xFFFFFFFFULL; #ifdef VERIFY VERIFY_CHECK((t >> 32) == 0); - VERIFY_CHECK(rustsecp256k1_v0_7_0_scalar_check_overflow(r) == 0); + VERIFY_CHECK(rustsecp256k1_v0_8_0_scalar_check_overflow(r) == 0); #endif } -static void rustsecp256k1_v0_7_0_scalar_set_b32(rustsecp256k1_v0_7_0_scalar *r, const unsigned char *b32, int *overflow) { +static void rustsecp256k1_v0_8_0_scalar_set_b32(rustsecp256k1_v0_8_0_scalar *r, const unsigned char *b32, int *overflow) { int over; r->d[0] = (uint32_t)b32[31] | (uint32_t)b32[30] << 8 | (uint32_t)b32[29] << 16 | (uint32_t)b32[28] << 24; r->d[1] = (uint32_t)b32[27] | (uint32_t)b32[26] << 8 | (uint32_t)b32[25] << 16 | (uint32_t)b32[24] << 24; @@ -174,13 +174,13 @@ static void rustsecp256k1_v0_7_0_scalar_set_b32(rustsecp256k1_v0_7_0_scalar *r, r->d[5] = (uint32_t)b32[11] | (uint32_t)b32[10] << 8 | (uint32_t)b32[9] << 16 | (uint32_t)b32[8] << 24; r->d[6] = (uint32_t)b32[7] | (uint32_t)b32[6] << 8 | (uint32_t)b32[5] << 16 | (uint32_t)b32[4] << 24; r->d[7] = (uint32_t)b32[3] | (uint32_t)b32[2] << 8 | (uint32_t)b32[1] << 16 | (uint32_t)b32[0] << 24; - over = rustsecp256k1_v0_7_0_scalar_reduce(r, rustsecp256k1_v0_7_0_scalar_check_overflow(r)); + over = rustsecp256k1_v0_8_0_scalar_reduce(r, rustsecp256k1_v0_8_0_scalar_check_overflow(r)); if (overflow) { *overflow = over; } } -static void rustsecp256k1_v0_7_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_7_0_scalar* a) { +static void rustsecp256k1_v0_8_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_8_0_scalar* a) { bin[0] = a->d[7] >> 24; bin[1] = a->d[7] >> 16; bin[2] = a->d[7] >> 8; bin[3] = a->d[7]; bin[4] = a->d[6] >> 24; bin[5] = a->d[6] >> 16; bin[6] = a->d[6] >> 8; bin[7] = a->d[6]; bin[8] = a->d[5] >> 24; bin[9] = a->d[5] >> 16; bin[10] = a->d[5] >> 8; bin[11] = a->d[5]; @@ -191,12 +191,12 @@ static void rustsecp256k1_v0_7_0_scalar_get_b32(unsigned char *bin, const rustse bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0]; } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_is_zero(const rustsecp256k1_v0_7_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_scalar_is_zero(const rustsecp256k1_v0_8_0_scalar *a) { return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; } -static void rustsecp256k1_v0_7_0_scalar_negate(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a) { - uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_7_0_scalar_is_zero(a) == 0); +static void rustsecp256k1_v0_8_0_scalar_negate(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a) { + uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_8_0_scalar_is_zero(a) == 0); uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1; r->d[0] = t & nonzero; t >>= 32; t += (uint64_t)(~a->d[1]) + SECP256K1_N_1; @@ -215,11 +215,11 @@ static void rustsecp256k1_v0_7_0_scalar_negate(rustsecp256k1_v0_7_0_scalar *r, c r->d[7] = t & nonzero; } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_is_one(const rustsecp256k1_v0_7_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_scalar_is_one(const rustsecp256k1_v0_8_0_scalar *a) { return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; } -static int rustsecp256k1_v0_7_0_scalar_is_high(const rustsecp256k1_v0_7_0_scalar *a) { +static int rustsecp256k1_v0_8_0_scalar_is_high(const rustsecp256k1_v0_8_0_scalar *a) { int yes = 0; int no = 0; no |= (a->d[7] < SECP256K1_N_H_7); @@ -237,11 +237,11 @@ static int rustsecp256k1_v0_7_0_scalar_is_high(const rustsecp256k1_v0_7_0_scalar return yes; } -static int rustsecp256k1_v0_7_0_scalar_cond_negate(rustsecp256k1_v0_7_0_scalar *r, int flag) { +static int rustsecp256k1_v0_8_0_scalar_cond_negate(rustsecp256k1_v0_8_0_scalar *r, int flag) { /* If we are flag = 0, mask = 00...00 and this is a no-op; - * if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_7_0_scalar_negate */ + * if we are flag = 1, mask = 11...11 and this is identical to rustsecp256k1_v0_8_0_scalar_negate */ uint32_t mask = !flag - 1; - uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_7_0_scalar_is_zero(r) == 0); + uint32_t nonzero = 0xFFFFFFFFUL * (rustsecp256k1_v0_8_0_scalar_is_zero(r) == 0); uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); r->d[0] = t & nonzero; t >>= 32; t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); @@ -326,7 +326,7 @@ static int rustsecp256k1_v0_7_0_scalar_cond_negate(rustsecp256k1_v0_7_0_scalar * VERIFY_CHECK(c2 == 0); \ } -static void rustsecp256k1_v0_7_0_scalar_reduce_512(rustsecp256k1_v0_7_0_scalar *r, const uint32_t *l) { +static void rustsecp256k1_v0_8_0_scalar_reduce_512(rustsecp256k1_v0_8_0_scalar *r, const uint32_t *l) { uint64_t c; uint32_t n0 = l[8], n1 = l[9], n2 = l[10], n3 = l[11], n4 = l[12], n5 = l[13], n6 = l[14], n7 = l[15]; uint32_t m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12; @@ -465,10 +465,10 @@ static void rustsecp256k1_v0_7_0_scalar_reduce_512(rustsecp256k1_v0_7_0_scalar * r->d[7] = c & 0xFFFFFFFFUL; c >>= 32; /* Final reduction of r. */ - rustsecp256k1_v0_7_0_scalar_reduce(r, c + rustsecp256k1_v0_7_0_scalar_check_overflow(r)); + rustsecp256k1_v0_8_0_scalar_reduce(r, c + rustsecp256k1_v0_8_0_scalar_check_overflow(r)); } -static void rustsecp256k1_v0_7_0_scalar_mul_512(uint32_t *l, const rustsecp256k1_v0_7_0_scalar *a, const rustsecp256k1_v0_7_0_scalar *b) { +static void rustsecp256k1_v0_8_0_scalar_mul_512(uint32_t *l, const rustsecp256k1_v0_8_0_scalar *a, const rustsecp256k1_v0_8_0_scalar *b) { /* 96 bit accumulator. */ uint32_t c0 = 0, c1 = 0, c2 = 0; @@ -563,13 +563,13 @@ static void rustsecp256k1_v0_7_0_scalar_mul_512(uint32_t *l, const rustsecp256k1 #undef extract #undef extract_fast -static void rustsecp256k1_v0_7_0_scalar_mul(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a, const rustsecp256k1_v0_7_0_scalar *b) { +static void rustsecp256k1_v0_8_0_scalar_mul(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a, const rustsecp256k1_v0_8_0_scalar *b) { uint32_t l[16]; - rustsecp256k1_v0_7_0_scalar_mul_512(l, a, b); - rustsecp256k1_v0_7_0_scalar_reduce_512(r, l); + rustsecp256k1_v0_8_0_scalar_mul_512(l, a, b); + rustsecp256k1_v0_8_0_scalar_reduce_512(r, l); } -static int rustsecp256k1_v0_7_0_scalar_shr_int(rustsecp256k1_v0_7_0_scalar *r, int n) { +static int rustsecp256k1_v0_8_0_scalar_shr_int(rustsecp256k1_v0_8_0_scalar *r, int n) { int ret; VERIFY_CHECK(n > 0); VERIFY_CHECK(n < 16); @@ -585,7 +585,7 @@ static int rustsecp256k1_v0_7_0_scalar_shr_int(rustsecp256k1_v0_7_0_scalar *r, i return ret; } -static void rustsecp256k1_v0_7_0_scalar_split_128(rustsecp256k1_v0_7_0_scalar *r1, rustsecp256k1_v0_7_0_scalar *r2, const rustsecp256k1_v0_7_0_scalar *k) { +static void rustsecp256k1_v0_8_0_scalar_split_128(rustsecp256k1_v0_8_0_scalar *r1, rustsecp256k1_v0_8_0_scalar *r2, const rustsecp256k1_v0_8_0_scalar *k) { r1->d[0] = k->d[0]; r1->d[1] = k->d[1]; r1->d[2] = k->d[2]; @@ -604,17 +604,17 @@ static void rustsecp256k1_v0_7_0_scalar_split_128(rustsecp256k1_v0_7_0_scalar *r r2->d[7] = 0; } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_eq(const rustsecp256k1_v0_7_0_scalar *a, const rustsecp256k1_v0_7_0_scalar *b) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_scalar_eq(const rustsecp256k1_v0_8_0_scalar *a, const rustsecp256k1_v0_8_0_scalar *b) { return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0; } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_scalar_mul_shift_var(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a, const rustsecp256k1_v0_7_0_scalar *b, unsigned int shift) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_scalar_mul_shift_var(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a, const rustsecp256k1_v0_8_0_scalar *b, unsigned int shift) { uint32_t l[16]; unsigned int shiftlimbs; unsigned int shiftlow; unsigned int shifthigh; VERIFY_CHECK(shift >= 256); - rustsecp256k1_v0_7_0_scalar_mul_512(l, a, b); + rustsecp256k1_v0_8_0_scalar_mul_512(l, a, b); shiftlimbs = shift >> 5; shiftlow = shift & 0x1F; shifthigh = 32 - shiftlow; @@ -626,10 +626,10 @@ SECP256K1_INLINE static void rustsecp256k1_v0_7_0_scalar_mul_shift_var(rustsecp2 r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0; - rustsecp256k1_v0_7_0_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1); + rustsecp256k1_v0_8_0_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1); } -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_scalar_cmov(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_scalar_cmov(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a, int flag) { uint32_t mask0, mask1; VG_CHECK_VERIFY(r->d, sizeof(r->d)); mask0 = flag + ~((uint32_t)0); @@ -644,11 +644,11 @@ static SECP256K1_INLINE void rustsecp256k1_v0_7_0_scalar_cmov(rustsecp256k1_v0_7 r->d[7] = (r->d[7] & mask0) | (a->d[7] & mask1); } -static void rustsecp256k1_v0_7_0_scalar_from_signed30(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_modinv32_signed30 *a) { +static void rustsecp256k1_v0_8_0_scalar_from_signed30(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_modinv32_signed30 *a) { const uint32_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4], a5 = a->v[5], a6 = a->v[6], a7 = a->v[7], a8 = a->v[8]; - /* The output from rustsecp256k1_v0_7_0_modinv32{_var} should be normalized to range [0,modulus), and + /* The output from rustsecp256k1_v0_8_0_modinv32{_var} should be normalized to range [0,modulus), and * have limbs in [0,2^30). The modulus is < 2^256, so the top limb must be below 2^(256-30*8). */ VERIFY_CHECK(a0 >> 30 == 0); @@ -671,17 +671,17 @@ static void rustsecp256k1_v0_7_0_scalar_from_signed30(rustsecp256k1_v0_7_0_scala r->d[7] = a7 >> 14 | a8 << 16; #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_scalar_check_overflow(r) == 0); + VERIFY_CHECK(rustsecp256k1_v0_8_0_scalar_check_overflow(r) == 0); #endif } -static void rustsecp256k1_v0_7_0_scalar_to_signed30(rustsecp256k1_v0_7_0_modinv32_signed30 *r, const rustsecp256k1_v0_7_0_scalar *a) { +static void rustsecp256k1_v0_8_0_scalar_to_signed30(rustsecp256k1_v0_8_0_modinv32_signed30 *r, const rustsecp256k1_v0_8_0_scalar *a) { const uint32_t M30 = UINT32_MAX >> 2; const uint32_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3], a4 = a->d[4], a5 = a->d[5], a6 = a->d[6], a7 = a->d[7]; #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_scalar_check_overflow(a) == 0); + VERIFY_CHECK(rustsecp256k1_v0_8_0_scalar_check_overflow(a) == 0); #endif r->v[0] = a0 & M30; @@ -695,40 +695,40 @@ static void rustsecp256k1_v0_7_0_scalar_to_signed30(rustsecp256k1_v0_7_0_modinv3 r->v[8] = a7 >> 16; } -static const rustsecp256k1_v0_7_0_modinv32_modinfo rustsecp256k1_v0_7_0_const_modinfo_scalar = { +static const rustsecp256k1_v0_8_0_modinv32_modinfo rustsecp256k1_v0_8_0_const_modinfo_scalar = { {{0x10364141L, 0x3F497A33L, 0x348A03BBL, 0x2BB739ABL, -0x146L, 0, 0, 0, 65536}}, 0x2A774EC1L }; -static void rustsecp256k1_v0_7_0_scalar_inverse(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *x) { - rustsecp256k1_v0_7_0_modinv32_signed30 s; +static void rustsecp256k1_v0_8_0_scalar_inverse(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *x) { + rustsecp256k1_v0_8_0_modinv32_signed30 s; #ifdef VERIFY - int zero_in = rustsecp256k1_v0_7_0_scalar_is_zero(x); + int zero_in = rustsecp256k1_v0_8_0_scalar_is_zero(x); #endif - rustsecp256k1_v0_7_0_scalar_to_signed30(&s, x); - rustsecp256k1_v0_7_0_modinv32(&s, &rustsecp256k1_v0_7_0_const_modinfo_scalar); - rustsecp256k1_v0_7_0_scalar_from_signed30(r, &s); + rustsecp256k1_v0_8_0_scalar_to_signed30(&s, x); + rustsecp256k1_v0_8_0_modinv32(&s, &rustsecp256k1_v0_8_0_const_modinfo_scalar); + rustsecp256k1_v0_8_0_scalar_from_signed30(r, &s); #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_scalar_is_zero(r) == zero_in); + VERIFY_CHECK(rustsecp256k1_v0_8_0_scalar_is_zero(r) == zero_in); #endif } -static void rustsecp256k1_v0_7_0_scalar_inverse_var(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *x) { - rustsecp256k1_v0_7_0_modinv32_signed30 s; +static void rustsecp256k1_v0_8_0_scalar_inverse_var(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *x) { + rustsecp256k1_v0_8_0_modinv32_signed30 s; #ifdef VERIFY - int zero_in = rustsecp256k1_v0_7_0_scalar_is_zero(x); + int zero_in = rustsecp256k1_v0_8_0_scalar_is_zero(x); #endif - rustsecp256k1_v0_7_0_scalar_to_signed30(&s, x); - rustsecp256k1_v0_7_0_modinv32_var(&s, &rustsecp256k1_v0_7_0_const_modinfo_scalar); - rustsecp256k1_v0_7_0_scalar_from_signed30(r, &s); + rustsecp256k1_v0_8_0_scalar_to_signed30(&s, x); + rustsecp256k1_v0_8_0_modinv32_var(&s, &rustsecp256k1_v0_8_0_const_modinfo_scalar); + rustsecp256k1_v0_8_0_scalar_from_signed30(r, &s); #ifdef VERIFY - VERIFY_CHECK(rustsecp256k1_v0_7_0_scalar_is_zero(r) == zero_in); + VERIFY_CHECK(rustsecp256k1_v0_8_0_scalar_is_zero(r) == zero_in); #endif } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_is_even(const rustsecp256k1_v0_7_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_scalar_is_even(const rustsecp256k1_v0_8_0_scalar *a) { return !(a->d[0] & 1); } diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_impl.h b/secp256k1-sys/depend/secp256k1/src/scalar_impl.h index 2d7a1c8e5..3a37f1ad1 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_impl.h @@ -28,13 +28,13 @@ #error "Please select wide multiplication implementation" #endif -static const rustsecp256k1_v0_7_0_scalar rustsecp256k1_v0_7_0_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); -static const rustsecp256k1_v0_7_0_scalar rustsecp256k1_v0_7_0_scalar_zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); +static const rustsecp256k1_v0_8_0_scalar rustsecp256k1_v0_8_0_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); +static const rustsecp256k1_v0_8_0_scalar rustsecp256k1_v0_8_0_scalar_zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); -static int rustsecp256k1_v0_7_0_scalar_set_b32_seckey(rustsecp256k1_v0_7_0_scalar *r, const unsigned char *bin) { +static int rustsecp256k1_v0_8_0_scalar_set_b32_seckey(rustsecp256k1_v0_8_0_scalar *r, const unsigned char *bin) { int overflow; - rustsecp256k1_v0_7_0_scalar_set_b32(r, bin, &overflow); - return (!overflow) & (!rustsecp256k1_v0_7_0_scalar_is_zero(r)); + rustsecp256k1_v0_8_0_scalar_set_b32(r, bin, &overflow); + return (!overflow) & (!rustsecp256k1_v0_8_0_scalar_is_zero(r)); } /* These parameters are generated using sage/gen_exhaustive_groups.sage. */ @@ -53,7 +53,7 @@ static int rustsecp256k1_v0_7_0_scalar_set_b32_seckey(rustsecp256k1_v0_7_0_scala * nontrivial to get full test coverage for the exhaustive tests. We therefore * (arbitrarily) set r2 = k + 5 (mod n) and r1 = k - r2 * lambda (mod n). */ -static void rustsecp256k1_v0_7_0_scalar_split_lambda(rustsecp256k1_v0_7_0_scalar *r1, rustsecp256k1_v0_7_0_scalar *r2, const rustsecp256k1_v0_7_0_scalar *k) { +static void rustsecp256k1_v0_8_0_scalar_split_lambda(rustsecp256k1_v0_8_0_scalar *r1, rustsecp256k1_v0_8_0_scalar *r2, const rustsecp256k1_v0_8_0_scalar *k) { *r2 = (*k + 5) % EXHAUSTIVE_TEST_ORDER; *r1 = (*k + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER; } @@ -61,13 +61,13 @@ static void rustsecp256k1_v0_7_0_scalar_split_lambda(rustsecp256k1_v0_7_0_scalar /** * The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where * lambda is: */ -static const rustsecp256k1_v0_7_0_scalar rustsecp256k1_v0_7_0_const_lambda = SECP256K1_SCALAR_CONST( +static const rustsecp256k1_v0_8_0_scalar rustsecp256k1_v0_8_0_const_lambda = SECP256K1_SCALAR_CONST( 0x5363AD4CUL, 0xC05C30E0UL, 0xA5261C02UL, 0x8812645AUL, 0x122E22EAUL, 0x20816678UL, 0xDF02967CUL, 0x1B23BD72UL ); #ifdef VERIFY -static void rustsecp256k1_v0_7_0_scalar_split_lambda_verify(const rustsecp256k1_v0_7_0_scalar *r1, const rustsecp256k1_v0_7_0_scalar *r2, const rustsecp256k1_v0_7_0_scalar *k); +static void rustsecp256k1_v0_8_0_scalar_split_lambda_verify(const rustsecp256k1_v0_8_0_scalar *r1, const rustsecp256k1_v0_8_0_scalar *r2, const rustsecp256k1_v0_8_0_scalar *k); #endif /* @@ -120,44 +120,44 @@ static void rustsecp256k1_v0_7_0_scalar_split_lambda_verify(const rustsecp256k1_ * * See proof below. */ -static void rustsecp256k1_v0_7_0_scalar_split_lambda(rustsecp256k1_v0_7_0_scalar *r1, rustsecp256k1_v0_7_0_scalar *r2, const rustsecp256k1_v0_7_0_scalar *k) { - rustsecp256k1_v0_7_0_scalar c1, c2; - static const rustsecp256k1_v0_7_0_scalar minus_b1 = SECP256K1_SCALAR_CONST( +static void rustsecp256k1_v0_8_0_scalar_split_lambda(rustsecp256k1_v0_8_0_scalar *r1, rustsecp256k1_v0_8_0_scalar *r2, const rustsecp256k1_v0_8_0_scalar *k) { + rustsecp256k1_v0_8_0_scalar c1, c2; + static const rustsecp256k1_v0_8_0_scalar minus_b1 = SECP256K1_SCALAR_CONST( 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL, 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL ); - static const rustsecp256k1_v0_7_0_scalar minus_b2 = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_8_0_scalar minus_b2 = SECP256K1_SCALAR_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL ); - static const rustsecp256k1_v0_7_0_scalar g1 = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_8_0_scalar g1 = SECP256K1_SCALAR_CONST( 0x3086D221UL, 0xA7D46BCDUL, 0xE86C90E4UL, 0x9284EB15UL, 0x3DAA8A14UL, 0x71E8CA7FUL, 0xE893209AUL, 0x45DBB031UL ); - static const rustsecp256k1_v0_7_0_scalar g2 = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_8_0_scalar g2 = SECP256K1_SCALAR_CONST( 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL, 0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL ); VERIFY_CHECK(r1 != k); VERIFY_CHECK(r2 != k); /* these _var calls are constant time since the shift amount is constant */ - rustsecp256k1_v0_7_0_scalar_mul_shift_var(&c1, k, &g1, 384); - rustsecp256k1_v0_7_0_scalar_mul_shift_var(&c2, k, &g2, 384); - rustsecp256k1_v0_7_0_scalar_mul(&c1, &c1, &minus_b1); - rustsecp256k1_v0_7_0_scalar_mul(&c2, &c2, &minus_b2); - rustsecp256k1_v0_7_0_scalar_add(r2, &c1, &c2); - rustsecp256k1_v0_7_0_scalar_mul(r1, r2, &rustsecp256k1_v0_7_0_const_lambda); - rustsecp256k1_v0_7_0_scalar_negate(r1, r1); - rustsecp256k1_v0_7_0_scalar_add(r1, r1, k); + rustsecp256k1_v0_8_0_scalar_mul_shift_var(&c1, k, &g1, 384); + rustsecp256k1_v0_8_0_scalar_mul_shift_var(&c2, k, &g2, 384); + rustsecp256k1_v0_8_0_scalar_mul(&c1, &c1, &minus_b1); + rustsecp256k1_v0_8_0_scalar_mul(&c2, &c2, &minus_b2); + rustsecp256k1_v0_8_0_scalar_add(r2, &c1, &c2); + rustsecp256k1_v0_8_0_scalar_mul(r1, r2, &rustsecp256k1_v0_8_0_const_lambda); + rustsecp256k1_v0_8_0_scalar_negate(r1, r1); + rustsecp256k1_v0_8_0_scalar_add(r1, r1, k); #ifdef VERIFY - rustsecp256k1_v0_7_0_scalar_split_lambda_verify(r1, r2, k); + rustsecp256k1_v0_8_0_scalar_split_lambda_verify(r1, r2, k); #endif } #ifdef VERIFY /* - * Proof for rustsecp256k1_v0_7_0_scalar_split_lambda's bounds. + * Proof for rustsecp256k1_v0_8_0_scalar_split_lambda's bounds. * * Let * - epsilon1 = 2^256 * |g1/2^384 - b2/d| @@ -260,8 +260,8 @@ static void rustsecp256k1_v0_7_0_scalar_split_lambda(rustsecp256k1_v0_7_0_scalar * * Q.E.D. */ -static void rustsecp256k1_v0_7_0_scalar_split_lambda_verify(const rustsecp256k1_v0_7_0_scalar *r1, const rustsecp256k1_v0_7_0_scalar *r2, const rustsecp256k1_v0_7_0_scalar *k) { - rustsecp256k1_v0_7_0_scalar s; +static void rustsecp256k1_v0_8_0_scalar_split_lambda_verify(const rustsecp256k1_v0_8_0_scalar *r1, const rustsecp256k1_v0_8_0_scalar *r2, const rustsecp256k1_v0_8_0_scalar *k) { + rustsecp256k1_v0_8_0_scalar s; unsigned char buf1[32]; unsigned char buf2[32]; @@ -277,19 +277,19 @@ static void rustsecp256k1_v0_7_0_scalar_split_lambda_verify(const rustsecp256k1_ 0x8a, 0x65, 0x28, 0x7b, 0xd4, 0x71, 0x79, 0xfb, 0x2b, 0xe0, 0x88, 0x46, 0xce, 0xa2, 0x67, 0xed }; - rustsecp256k1_v0_7_0_scalar_mul(&s, &rustsecp256k1_v0_7_0_const_lambda, r2); - rustsecp256k1_v0_7_0_scalar_add(&s, &s, r1); - VERIFY_CHECK(rustsecp256k1_v0_7_0_scalar_eq(&s, k)); + rustsecp256k1_v0_8_0_scalar_mul(&s, &rustsecp256k1_v0_8_0_const_lambda, r2); + rustsecp256k1_v0_8_0_scalar_add(&s, &s, r1); + VERIFY_CHECK(rustsecp256k1_v0_8_0_scalar_eq(&s, k)); - rustsecp256k1_v0_7_0_scalar_negate(&s, r1); - rustsecp256k1_v0_7_0_scalar_get_b32(buf1, r1); - rustsecp256k1_v0_7_0_scalar_get_b32(buf2, &s); - VERIFY_CHECK(rustsecp256k1_v0_7_0_memcmp_var(buf1, k1_bound, 32) < 0 || rustsecp256k1_v0_7_0_memcmp_var(buf2, k1_bound, 32) < 0); + rustsecp256k1_v0_8_0_scalar_negate(&s, r1); + rustsecp256k1_v0_8_0_scalar_get_b32(buf1, r1); + rustsecp256k1_v0_8_0_scalar_get_b32(buf2, &s); + VERIFY_CHECK(rustsecp256k1_v0_8_0_memcmp_var(buf1, k1_bound, 32) < 0 || rustsecp256k1_v0_8_0_memcmp_var(buf2, k1_bound, 32) < 0); - rustsecp256k1_v0_7_0_scalar_negate(&s, r2); - rustsecp256k1_v0_7_0_scalar_get_b32(buf1, r2); - rustsecp256k1_v0_7_0_scalar_get_b32(buf2, &s); - VERIFY_CHECK(rustsecp256k1_v0_7_0_memcmp_var(buf1, k2_bound, 32) < 0 || rustsecp256k1_v0_7_0_memcmp_var(buf2, k2_bound, 32) < 0); + rustsecp256k1_v0_8_0_scalar_negate(&s, r2); + rustsecp256k1_v0_8_0_scalar_get_b32(buf1, r2); + rustsecp256k1_v0_8_0_scalar_get_b32(buf2, &s); + VERIFY_CHECK(rustsecp256k1_v0_8_0_memcmp_var(buf1, k2_bound, 32) < 0 || rustsecp256k1_v0_8_0_memcmp_var(buf2, k2_bound, 32) < 0); } #endif /* VERIFY */ #endif /* !defined(EXHAUSTIVE_TEST_ORDER) */ diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_low.h b/secp256k1-sys/depend/secp256k1/src/scalar_low.h index 93f57f2cf..f5edffa7a 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_low.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_low.h @@ -10,7 +10,7 @@ #include /** A scalar modulo the group order of the secp256k1 curve. */ -typedef uint32_t rustsecp256k1_v0_7_0_scalar; +typedef uint32_t rustsecp256k1_v0_8_0_scalar; #define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) (d0) diff --git a/secp256k1-sys/depend/secp256k1/src/scalar_low_impl.h b/secp256k1-sys/depend/secp256k1/src/scalar_low_impl.h index 09df13ada..338d6a403 100644 --- a/secp256k1-sys/depend/secp256k1/src/scalar_low_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scalar_low_impl.h @@ -11,43 +11,43 @@ #include -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_is_even(const rustsecp256k1_v0_7_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_scalar_is_even(const rustsecp256k1_v0_8_0_scalar *a) { return !(*a & 1); } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_scalar_clear(rustsecp256k1_v0_7_0_scalar *r) { *r = 0; } -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_scalar_set_int(rustsecp256k1_v0_7_0_scalar *r, unsigned int v) { *r = v; } +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_scalar_clear(rustsecp256k1_v0_8_0_scalar *r) { *r = 0; } +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_scalar_set_int(rustsecp256k1_v0_8_0_scalar *r, unsigned int v) { *r = v; } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_7_0_scalar_get_bits(const rustsecp256k1_v0_7_0_scalar *a, unsigned int offset, unsigned int count) { +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_8_0_scalar_get_bits(const rustsecp256k1_v0_8_0_scalar *a, unsigned int offset, unsigned int count) { if (offset < 32) return ((*a >> offset) & ((((uint32_t)1) << count) - 1)); else return 0; } -SECP256K1_INLINE static unsigned int rustsecp256k1_v0_7_0_scalar_get_bits_var(const rustsecp256k1_v0_7_0_scalar *a, unsigned int offset, unsigned int count) { - return rustsecp256k1_v0_7_0_scalar_get_bits(a, offset, count); +SECP256K1_INLINE static unsigned int rustsecp256k1_v0_8_0_scalar_get_bits_var(const rustsecp256k1_v0_8_0_scalar *a, unsigned int offset, unsigned int count) { + return rustsecp256k1_v0_8_0_scalar_get_bits(a, offset, count); } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_check_overflow(const rustsecp256k1_v0_7_0_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; } +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_scalar_check_overflow(const rustsecp256k1_v0_8_0_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; } -static int rustsecp256k1_v0_7_0_scalar_add(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a, const rustsecp256k1_v0_7_0_scalar *b) { +static int rustsecp256k1_v0_8_0_scalar_add(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a, const rustsecp256k1_v0_8_0_scalar *b) { *r = (*a + *b) % EXHAUSTIVE_TEST_ORDER; return *r < *b; } -static void rustsecp256k1_v0_7_0_scalar_cadd_bit(rustsecp256k1_v0_7_0_scalar *r, unsigned int bit, int flag) { +static void rustsecp256k1_v0_8_0_scalar_cadd_bit(rustsecp256k1_v0_8_0_scalar *r, unsigned int bit, int flag) { if (flag && bit < 32) *r += ((uint32_t)1 << bit); #ifdef VERIFY VERIFY_CHECK(bit < 32); /* Verify that adding (1 << bit) will not overflow any in-range scalar *r by overflowing the underlying uint32_t. */ VERIFY_CHECK(((uint32_t)1 << bit) - 1 <= UINT32_MAX - EXHAUSTIVE_TEST_ORDER); - VERIFY_CHECK(rustsecp256k1_v0_7_0_scalar_check_overflow(r) == 0); + VERIFY_CHECK(rustsecp256k1_v0_8_0_scalar_check_overflow(r) == 0); #endif } -static void rustsecp256k1_v0_7_0_scalar_set_b32(rustsecp256k1_v0_7_0_scalar *r, const unsigned char *b32, int *overflow) { +static void rustsecp256k1_v0_8_0_scalar_set_b32(rustsecp256k1_v0_8_0_scalar *r, const unsigned char *b32, int *overflow) { int i; int over = 0; *r = 0; @@ -61,16 +61,16 @@ static void rustsecp256k1_v0_7_0_scalar_set_b32(rustsecp256k1_v0_7_0_scalar *r, if (overflow) *overflow = over; } -static void rustsecp256k1_v0_7_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_7_0_scalar* a) { +static void rustsecp256k1_v0_8_0_scalar_get_b32(unsigned char *bin, const rustsecp256k1_v0_8_0_scalar* a) { memset(bin, 0, 32); bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a; } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_is_zero(const rustsecp256k1_v0_7_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_scalar_is_zero(const rustsecp256k1_v0_8_0_scalar *a) { return *a == 0; } -static void rustsecp256k1_v0_7_0_scalar_negate(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a) { +static void rustsecp256k1_v0_8_0_scalar_negate(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a) { if (*a == 0) { *r = 0; } else { @@ -78,24 +78,24 @@ static void rustsecp256k1_v0_7_0_scalar_negate(rustsecp256k1_v0_7_0_scalar *r, c } } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_is_one(const rustsecp256k1_v0_7_0_scalar *a) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_scalar_is_one(const rustsecp256k1_v0_8_0_scalar *a) { return *a == 1; } -static int rustsecp256k1_v0_7_0_scalar_is_high(const rustsecp256k1_v0_7_0_scalar *a) { +static int rustsecp256k1_v0_8_0_scalar_is_high(const rustsecp256k1_v0_8_0_scalar *a) { return *a > EXHAUSTIVE_TEST_ORDER / 2; } -static int rustsecp256k1_v0_7_0_scalar_cond_negate(rustsecp256k1_v0_7_0_scalar *r, int flag) { - if (flag) rustsecp256k1_v0_7_0_scalar_negate(r, r); +static int rustsecp256k1_v0_8_0_scalar_cond_negate(rustsecp256k1_v0_8_0_scalar *r, int flag) { + if (flag) rustsecp256k1_v0_8_0_scalar_negate(r, r); return flag ? -1 : 1; } -static void rustsecp256k1_v0_7_0_scalar_mul(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a, const rustsecp256k1_v0_7_0_scalar *b) { +static void rustsecp256k1_v0_8_0_scalar_mul(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a, const rustsecp256k1_v0_8_0_scalar *b) { *r = (*a * *b) % EXHAUSTIVE_TEST_ORDER; } -static int rustsecp256k1_v0_7_0_scalar_shr_int(rustsecp256k1_v0_7_0_scalar *r, int n) { +static int rustsecp256k1_v0_8_0_scalar_shr_int(rustsecp256k1_v0_8_0_scalar *r, int n) { int ret; VERIFY_CHECK(n > 0); VERIFY_CHECK(n < 16); @@ -104,16 +104,16 @@ static int rustsecp256k1_v0_7_0_scalar_shr_int(rustsecp256k1_v0_7_0_scalar *r, i return ret; } -static void rustsecp256k1_v0_7_0_scalar_split_128(rustsecp256k1_v0_7_0_scalar *r1, rustsecp256k1_v0_7_0_scalar *r2, const rustsecp256k1_v0_7_0_scalar *a) { +static void rustsecp256k1_v0_8_0_scalar_split_128(rustsecp256k1_v0_8_0_scalar *r1, rustsecp256k1_v0_8_0_scalar *r2, const rustsecp256k1_v0_8_0_scalar *a) { *r1 = *a; *r2 = 0; } -SECP256K1_INLINE static int rustsecp256k1_v0_7_0_scalar_eq(const rustsecp256k1_v0_7_0_scalar *a, const rustsecp256k1_v0_7_0_scalar *b) { +SECP256K1_INLINE static int rustsecp256k1_v0_8_0_scalar_eq(const rustsecp256k1_v0_8_0_scalar *a, const rustsecp256k1_v0_8_0_scalar *b) { return *a == *b; } -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_scalar_cmov(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_scalar_cmov(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *a, int flag) { uint32_t mask0, mask1; VG_CHECK_VERIFY(r, sizeof(*r)); mask0 = flag + ~((uint32_t)0); @@ -121,7 +121,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_7_0_scalar_cmov(rustsecp256k1_v0_7 *r = (*r & mask0) | (*a & mask1); } -static void rustsecp256k1_v0_7_0_scalar_inverse(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *x) { +static void rustsecp256k1_v0_8_0_scalar_inverse(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *x) { int i; *r = 0; for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) @@ -132,8 +132,8 @@ static void rustsecp256k1_v0_7_0_scalar_inverse(rustsecp256k1_v0_7_0_scalar *r, VERIFY_CHECK(*r != 0); } -static void rustsecp256k1_v0_7_0_scalar_inverse_var(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_scalar *x) { - rustsecp256k1_v0_7_0_scalar_inverse(r, x); +static void rustsecp256k1_v0_8_0_scalar_inverse_var(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_scalar *x) { + rustsecp256k1_v0_8_0_scalar_inverse(r, x); } #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/scratch.h b/secp256k1-sys/depend/secp256k1/src/scratch.h index 2291210b5..770f847cb 100644 --- a/secp256k1-sys/depend/secp256k1/src/scratch.h +++ b/secp256k1-sys/depend/secp256k1/src/scratch.h @@ -9,7 +9,7 @@ /* The typedef is used internally; the struct name is used in the public API * (where it is exposed as a different typedef) */ -typedef struct rustsecp256k1_v0_7_0_scratch_space_struct { +typedef struct rustsecp256k1_v0_8_0_scratch_space_struct { /** guard against interpreting this object as other types */ unsigned char magic[8]; /** actual allocated data */ @@ -19,24 +19,24 @@ typedef struct rustsecp256k1_v0_7_0_scratch_space_struct { size_t alloc_size; /** maximum size available to allocate */ size_t max_size; -} rustsecp256k1_v0_7_0_scratch; +} rustsecp256k1_v0_8_0_scratch; -static rustsecp256k1_v0_7_0_scratch* rustsecp256k1_v0_7_0_scratch_create(const rustsecp256k1_v0_7_0_callback* error_callback, size_t max_size); +static rustsecp256k1_v0_8_0_scratch* rustsecp256k1_v0_8_0_scratch_create(const rustsecp256k1_v0_8_0_callback* error_callback, size_t max_size); -static void rustsecp256k1_v0_7_0_scratch_destroy(const rustsecp256k1_v0_7_0_callback* error_callback, rustsecp256k1_v0_7_0_scratch* scratch); +static void rustsecp256k1_v0_8_0_scratch_destroy(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch* scratch); /** Returns an opaque object used to "checkpoint" a scratch space. Used - * with `rustsecp256k1_v0_7_0_scratch_apply_checkpoint` to undo allocations. */ -static size_t rustsecp256k1_v0_7_0_scratch_checkpoint(const rustsecp256k1_v0_7_0_callback* error_callback, const rustsecp256k1_v0_7_0_scratch* scratch); + * with `rustsecp256k1_v0_8_0_scratch_apply_checkpoint` to undo allocations. */ +static size_t rustsecp256k1_v0_8_0_scratch_checkpoint(const rustsecp256k1_v0_8_0_callback* error_callback, const rustsecp256k1_v0_8_0_scratch* scratch); -/** Applies a check point received from `rustsecp256k1_v0_7_0_scratch_checkpoint`, +/** Applies a check point received from `rustsecp256k1_v0_8_0_scratch_checkpoint`, * undoing all allocations since that point. */ -static void rustsecp256k1_v0_7_0_scratch_apply_checkpoint(const rustsecp256k1_v0_7_0_callback* error_callback, rustsecp256k1_v0_7_0_scratch* scratch, size_t checkpoint); +static void rustsecp256k1_v0_8_0_scratch_apply_checkpoint(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch* scratch, size_t checkpoint); /** Returns the maximum allocation the scratch space will allow */ -static size_t rustsecp256k1_v0_7_0_scratch_max_allocation(const rustsecp256k1_v0_7_0_callback* error_callback, const rustsecp256k1_v0_7_0_scratch* scratch, size_t n_objects); +static size_t rustsecp256k1_v0_8_0_scratch_max_allocation(const rustsecp256k1_v0_8_0_callback* error_callback, const rustsecp256k1_v0_8_0_scratch* scratch, size_t n_objects); /** Returns a pointer into the most recently allocated frame, or NULL if there is insufficient available space */ -static void *rustsecp256k1_v0_7_0_scratch_alloc(const rustsecp256k1_v0_7_0_callback* error_callback, rustsecp256k1_v0_7_0_scratch* scratch, size_t n); +static void *rustsecp256k1_v0_8_0_scratch_alloc(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch* scratch, size_t n); #endif diff --git a/secp256k1-sys/depend/secp256k1/src/scratch_impl.h b/secp256k1-sys/depend/secp256k1/src/scratch_impl.h index d1883e03c..31d2ddf8f 100644 --- a/secp256k1-sys/depend/secp256k1/src/scratch_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/scratch_impl.h @@ -10,29 +10,79 @@ #include "util.h" #include "scratch.h" -static size_t rustsecp256k1_v0_7_0_scratch_checkpoint(const rustsecp256k1_v0_7_0_callback* error_callback, const rustsecp256k1_v0_7_0_scratch* scratch) { - if (rustsecp256k1_v0_7_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { - rustsecp256k1_v0_7_0_callback_call(error_callback, "invalid scratch space"); +static rustsecp256k1_v0_8_0_scratch* rustsecp256k1_v0_8_0_scratch_create(const rustsecp256k1_v0_8_0_callback* error_callback, size_t size) { + const size_t base_alloc = ROUND_TO_ALIGN(sizeof(rustsecp256k1_v0_8_0_scratch)); + void *alloc = checked_malloc(error_callback, base_alloc + size); + rustsecp256k1_v0_8_0_scratch* ret = (rustsecp256k1_v0_8_0_scratch *)alloc; + if (ret != NULL) { + memset(ret, 0, sizeof(*ret)); + memcpy(ret->magic, "scratch", 8); + ret->data = (void *) ((char *) alloc + base_alloc); + ret->max_size = size; + } + return ret; +} + +static void rustsecp256k1_v0_8_0_scratch_destroy(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch* scratch) { + if (scratch != NULL) { + VERIFY_CHECK(scratch->alloc_size == 0); /* all checkpoints should be applied */ + if (rustsecp256k1_v0_8_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_8_0_callback_call(error_callback, "invalid scratch space"); + return; + } + memset(scratch->magic, 0, sizeof(scratch->magic)); + free(scratch); + } +} + +static rustsecp256k1_v0_8_0_scratch* rustsecp256k1_v0_8_0_scratch_create(const rustsecp256k1_v0_8_0_callback* error_callback, size_t size) { + const size_t base_alloc = ROUND_TO_ALIGN(sizeof(rustsecp256k1_v0_8_0_scratch)); + void *alloc = checked_malloc(error_callback, base_alloc + size); + rustsecp256k1_v0_8_0_scratch* ret = (rustsecp256k1_v0_8_0_scratch *)alloc; + if (ret != NULL) { + memset(ret, 0, sizeof(*ret)); + memcpy(ret->magic, "scratch", 8); + ret->data = (void *) ((char *) alloc + base_alloc); + ret->max_size = size; + } + return ret; +} + +static void rustsecp256k1_v0_8_0_scratch_destroy(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch* scratch) { + if (scratch != NULL) { + if (rustsecp256k1_v0_8_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_8_0_callback_call(error_callback, "invalid scratch space"); + return; + } + VERIFY_CHECK(scratch->alloc_size == 0); /* all checkpoints should be applied */ + memset(scratch->magic, 0, sizeof(scratch->magic)); + free(scratch); + } +} + +static size_t rustsecp256k1_v0_8_0_scratch_checkpoint(const rustsecp256k1_v0_8_0_callback* error_callback, const rustsecp256k1_v0_8_0_scratch* scratch) { + if (rustsecp256k1_v0_8_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_8_0_callback_call(error_callback, "invalid scratch space"); return 0; } return scratch->alloc_size; } -static void rustsecp256k1_v0_7_0_scratch_apply_checkpoint(const rustsecp256k1_v0_7_0_callback* error_callback, rustsecp256k1_v0_7_0_scratch* scratch, size_t checkpoint) { - if (rustsecp256k1_v0_7_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { - rustsecp256k1_v0_7_0_callback_call(error_callback, "invalid scratch space"); +static void rustsecp256k1_v0_8_0_scratch_apply_checkpoint(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch* scratch, size_t checkpoint) { + if (rustsecp256k1_v0_8_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_8_0_callback_call(error_callback, "invalid scratch space"); return; } if (checkpoint > scratch->alloc_size) { - rustsecp256k1_v0_7_0_callback_call(error_callback, "invalid checkpoint"); + rustsecp256k1_v0_8_0_callback_call(error_callback, "invalid checkpoint"); return; } scratch->alloc_size = checkpoint; } -static size_t rustsecp256k1_v0_7_0_scratch_max_allocation(const rustsecp256k1_v0_7_0_callback* error_callback, const rustsecp256k1_v0_7_0_scratch* scratch, size_t objects) { - if (rustsecp256k1_v0_7_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { - rustsecp256k1_v0_7_0_callback_call(error_callback, "invalid scratch space"); +static size_t rustsecp256k1_v0_8_0_scratch_max_allocation(const rustsecp256k1_v0_8_0_callback* error_callback, const rustsecp256k1_v0_8_0_scratch* scratch, size_t objects) { + if (rustsecp256k1_v0_8_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_8_0_callback_call(error_callback, "invalid scratch space"); return 0; } /* Ensure that multiplication will not wrap around */ @@ -45,7 +95,7 @@ static size_t rustsecp256k1_v0_7_0_scratch_max_allocation(const rustsecp256k1_v0 return scratch->max_size - scratch->alloc_size - objects * (ALIGNMENT - 1); } -static void *rustsecp256k1_v0_7_0_scratch_alloc(const rustsecp256k1_v0_7_0_callback* error_callback, rustsecp256k1_v0_7_0_scratch* scratch, size_t size) { +static void *rustsecp256k1_v0_8_0_scratch_alloc(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch* scratch, size_t size) { void *ret; size_t rounded_size; @@ -56,8 +106,8 @@ static void *rustsecp256k1_v0_7_0_scratch_alloc(const rustsecp256k1_v0_7_0_callb } size = rounded_size; - if (rustsecp256k1_v0_7_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { - rustsecp256k1_v0_7_0_callback_call(error_callback, "invalid scratch space"); + if (rustsecp256k1_v0_8_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_8_0_callback_call(error_callback, "invalid scratch space"); return NULL; } diff --git a/secp256k1-sys/depend/secp256k1/src/scratch_impl.h.orig b/secp256k1-sys/depend/secp256k1/src/scratch_impl.h.orig new file mode 100644 index 000000000..488e61935 --- /dev/null +++ b/secp256k1-sys/depend/secp256k1/src/scratch_impl.h.orig @@ -0,0 +1,99 @@ +/*********************************************************************** + * Copyright (c) 2017 Andrew Poelstra * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or https://www.opensource.org/licenses/mit-license.php.* + ***********************************************************************/ + +#ifndef SECP256K1_SCRATCH_IMPL_H +#define SECP256K1_SCRATCH_IMPL_H + +#include "util.h" +#include "scratch.h" + +static rustsecp256k1_v0_8_0_scratch* rustsecp256k1_v0_8_0_scratch_create(const rustsecp256k1_v0_8_0_callback* error_callback, size_t size) { + const size_t base_alloc = ROUND_TO_ALIGN(sizeof(rustsecp256k1_v0_8_0_scratch)); + void *alloc = checked_malloc(error_callback, base_alloc + size); + rustsecp256k1_v0_8_0_scratch* ret = (rustsecp256k1_v0_8_0_scratch *)alloc; + if (ret != NULL) { + memset(ret, 0, sizeof(*ret)); + memcpy(ret->magic, "scratch", 8); + ret->data = (void *) ((char *) alloc + base_alloc); + ret->max_size = size; + } + return ret; +} + +static void rustsecp256k1_v0_8_0_scratch_destroy(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch* scratch) { + if (scratch != NULL) { + if (rustsecp256k1_v0_8_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_8_0_callback_call(error_callback, "invalid scratch space"); + return; + } + VERIFY_CHECK(scratch->alloc_size == 0); /* all checkpoints should be applied */ + memset(scratch->magic, 0, sizeof(scratch->magic)); + free(scratch); + } +} + +static size_t rustsecp256k1_v0_8_0_scratch_checkpoint(const rustsecp256k1_v0_8_0_callback* error_callback, const rustsecp256k1_v0_8_0_scratch* scratch) { + if (rustsecp256k1_v0_8_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_8_0_callback_call(error_callback, "invalid scratch space"); + return 0; + } + return scratch->alloc_size; +} + +static void rustsecp256k1_v0_8_0_scratch_apply_checkpoint(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch* scratch, size_t checkpoint) { + if (rustsecp256k1_v0_8_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_8_0_callback_call(error_callback, "invalid scratch space"); + return; + } + if (checkpoint > scratch->alloc_size) { + rustsecp256k1_v0_8_0_callback_call(error_callback, "invalid checkpoint"); + return; + } + scratch->alloc_size = checkpoint; +} + +static size_t rustsecp256k1_v0_8_0_scratch_max_allocation(const rustsecp256k1_v0_8_0_callback* error_callback, const rustsecp256k1_v0_8_0_scratch* scratch, size_t objects) { + if (rustsecp256k1_v0_8_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_8_0_callback_call(error_callback, "invalid scratch space"); + return 0; + } + /* Ensure that multiplication will not wrap around */ + if (ALIGNMENT > 1 && objects > SIZE_MAX/(ALIGNMENT - 1)) { + return 0; + } + if (scratch->max_size - scratch->alloc_size <= objects * (ALIGNMENT - 1)) { + return 0; + } + return scratch->max_size - scratch->alloc_size - objects * (ALIGNMENT - 1); +} + +static void *rustsecp256k1_v0_8_0_scratch_alloc(const rustsecp256k1_v0_8_0_callback* error_callback, rustsecp256k1_v0_8_0_scratch* scratch, size_t size) { + void *ret; + size_t rounded_size; + + rounded_size = ROUND_TO_ALIGN(size); + /* Check that rounding did not wrap around */ + if (rounded_size < size) { + return NULL; + } + size = rounded_size; + + if (rustsecp256k1_v0_8_0_memcmp_var(scratch->magic, "scratch", 8) != 0) { + rustsecp256k1_v0_8_0_callback_call(error_callback, "invalid scratch space"); + return NULL; + } + + if (size > scratch->max_size - scratch->alloc_size) { + return NULL; + } + ret = (void *) ((char *) scratch->data + scratch->alloc_size); + memset(ret, 0, size); + scratch->alloc_size += size; + + return ret; +} + +#endif diff --git a/secp256k1-sys/depend/secp256k1/src/secp256k1.c b/secp256k1-sys/depend/secp256k1/src/secp256k1.c index 7d9dc0a24..a7f7d57f3 100644 --- a/secp256k1-sys/depend/secp256k1/src/secp256k1.c +++ b/secp256k1-sys/depend/secp256k1/src/secp256k1.c @@ -4,6 +4,17 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ +/* This is a C project. It should not be compiled with a C++ compiler, + * and we error out if we detect one. + * + * We still want to be able to test the project with a C++ compiler + * because it is still good to know if this will lead to real trouble, so + * there is a possibility to override the check. But be warned that + * compiling with a C++ compiler is not supported. */ +#if defined(__cplusplus) && !defined(SECP256K1_CPLUSPLUS_TEST_OVERRIDE) +#error Trying to compile a C project with a C++ compiler. +#endif + #define SECP256K1_BUILD #include "../include/secp256k1.h" @@ -11,6 +22,7 @@ #include "assumptions.h" #include "util.h" + #include "field_impl.h" #include "scalar_impl.h" #include "group_impl.h" @@ -20,6 +32,7 @@ #include "ecdsa_impl.h" #include "eckey_impl.h" #include "hash_impl.h" +#include "int128_impl.h" #include "scratch_impl.h" #include "selftest.h" @@ -33,39 +46,48 @@ #define ARG_CHECK(cond) do { \ if (EXPECT(!(cond), 0)) { \ - rustsecp256k1_v0_7_0_callback_call(&ctx->illegal_callback, #cond); \ + rustsecp256k1_v0_8_0_callback_call(&ctx->illegal_callback, #cond); \ return 0; \ } \ } while(0) #define ARG_CHECK_NO_RETURN(cond) do { \ if (EXPECT(!(cond), 0)) { \ - rustsecp256k1_v0_7_0_callback_call(&ctx->illegal_callback, #cond); \ + rustsecp256k1_v0_8_0_callback_call(&ctx->illegal_callback, #cond); \ } \ } while(0) -struct rustsecp256k1_v0_7_0_context_struct { - rustsecp256k1_v0_7_0_ecmult_gen_context ecmult_gen_ctx; - rustsecp256k1_v0_7_0_callback illegal_callback; - rustsecp256k1_v0_7_0_callback error_callback; +/* Note that whenever you change the context struct, you must also change the + * context_eq function. */ +struct rustsecp256k1_v0_8_0_context_struct { + rustsecp256k1_v0_8_0_ecmult_gen_context ecmult_gen_ctx; + rustsecp256k1_v0_8_0_callback illegal_callback; + rustsecp256k1_v0_8_0_callback error_callback; int declassify; }; -static const rustsecp256k1_v0_7_0_context rustsecp256k1_v0_7_0_context_no_precomp_ = { +static const rustsecp256k1_v0_8_0_context rustsecp256k1_v0_8_0_context_static_ = { { 0 }, - { rustsecp256k1_v0_7_0_default_illegal_callback_fn, 0 }, - { rustsecp256k1_v0_7_0_default_error_callback_fn, 0 }, + { rustsecp256k1_v0_8_0_default_illegal_callback_fn, 0 }, + { rustsecp256k1_v0_8_0_default_error_callback_fn, 0 }, 0 }; -const rustsecp256k1_v0_7_0_context *rustsecp256k1_v0_7_0_context_no_precomp = &rustsecp256k1_v0_7_0_context_no_precomp_; +const rustsecp256k1_v0_8_0_context *rustsecp256k1_v0_8_0_context_static = &rustsecp256k1_v0_8_0_context_static_; +const rustsecp256k1_v0_8_0_context *rustsecp256k1_v0_8_0_context_no_precomp = &rustsecp256k1_v0_8_0_context_static_; -size_t rustsecp256k1_v0_7_0_context_preallocated_size(unsigned int flags) { - size_t ret = sizeof(rustsecp256k1_v0_7_0_context); +void rustsecp256k1_v0_8_0_selftest(void) { + if (!rustsecp256k1_v0_8_0_selftest_passes()) { + rustsecp256k1_v0_8_0_callback_call(&default_error_callback, "self test failed"); + } +} + +size_t rustsecp256k1_v0_8_0_context_preallocated_size(unsigned int flags) { + size_t ret = sizeof(rustsecp256k1_v0_8_0_context); /* A return value of 0 is reserved as an indicator for errors when we call this function internally. */ VERIFY_CHECK(ret != 0); if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) { - rustsecp256k1_v0_7_0_callback_call(&default_illegal_callback, + rustsecp256k1_v0_8_0_callback_call(&default_illegal_callback, "Invalid flags"); return 0; } @@ -73,67 +95,65 @@ size_t rustsecp256k1_v0_7_0_context_preallocated_size(unsigned int flags) { return ret; } -size_t rustsecp256k1_v0_7_0_context_preallocated_clone_size(const rustsecp256k1_v0_7_0_context* ctx) { - size_t ret = sizeof(rustsecp256k1_v0_7_0_context); +size_t rustsecp256k1_v0_8_0_context_preallocated_clone_size(const rustsecp256k1_v0_8_0_context* ctx) { + size_t ret = sizeof(rustsecp256k1_v0_8_0_context); VERIFY_CHECK(ctx != NULL); return ret; } -rustsecp256k1_v0_7_0_context* rustsecp256k1_v0_7_0_context_preallocated_create(void* prealloc, unsigned int flags) { +rustsecp256k1_v0_8_0_context* rustsecp256k1_v0_8_0_context_preallocated_create(void* prealloc, unsigned int flags) { size_t prealloc_size; - rustsecp256k1_v0_7_0_context* ret; + rustsecp256k1_v0_8_0_context* ret; - if (!rustsecp256k1_v0_7_0_selftest()) { - rustsecp256k1_v0_7_0_callback_call(&default_error_callback, "self test failed"); - } + rustsecp256k1_v0_8_0_selftest(); - prealloc_size = rustsecp256k1_v0_7_0_context_preallocated_size(flags); + prealloc_size = rustsecp256k1_v0_8_0_context_preallocated_size(flags); if (prealloc_size == 0) { return NULL; } VERIFY_CHECK(prealloc != NULL); - ret = (rustsecp256k1_v0_7_0_context*)prealloc; + ret = (rustsecp256k1_v0_8_0_context*)prealloc; ret->illegal_callback = default_illegal_callback; ret->error_callback = default_error_callback; - /* Flags have been checked by rustsecp256k1_v0_7_0_context_preallocated_size. */ + /* Flags have been checked by rustsecp256k1_v0_8_0_context_preallocated_size. */ VERIFY_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_CONTEXT); - rustsecp256k1_v0_7_0_ecmult_gen_context_build(&ret->ecmult_gen_ctx); + rustsecp256k1_v0_8_0_ecmult_gen_context_build(&ret->ecmult_gen_ctx); ret->declassify = !!(flags & SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY); return ret; } -rustsecp256k1_v0_7_0_context* rustsecp256k1_v0_7_0_context_preallocated_clone(const rustsecp256k1_v0_7_0_context* ctx, void* prealloc) { - rustsecp256k1_v0_7_0_context* ret; +rustsecp256k1_v0_8_0_context* rustsecp256k1_v0_8_0_context_preallocated_clone(const rustsecp256k1_v0_8_0_context* ctx, void* prealloc) { + rustsecp256k1_v0_8_0_context* ret; VERIFY_CHECK(ctx != NULL); ARG_CHECK(prealloc != NULL); - ret = (rustsecp256k1_v0_7_0_context*)prealloc; + ret = (rustsecp256k1_v0_8_0_context*)prealloc; *ret = *ctx; return ret; } -void rustsecp256k1_v0_7_0_context_preallocated_destroy(rustsecp256k1_v0_7_0_context* ctx) { - ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_7_0_context_no_precomp); +void rustsecp256k1_v0_8_0_context_preallocated_destroy(rustsecp256k1_v0_8_0_context* ctx) { + ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_8_0_context_static); if (ctx != NULL) { - rustsecp256k1_v0_7_0_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx); + rustsecp256k1_v0_8_0_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx); } } -void rustsecp256k1_v0_7_0_context_set_illegal_callback(rustsecp256k1_v0_7_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) { - ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_7_0_context_no_precomp); +void rustsecp256k1_v0_8_0_context_set_illegal_callback(rustsecp256k1_v0_8_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) { + ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_8_0_context_static); if (fun == NULL) { - fun = rustsecp256k1_v0_7_0_default_illegal_callback_fn; + fun = rustsecp256k1_v0_8_0_default_illegal_callback_fn; } ctx->illegal_callback.fn = fun; ctx->illegal_callback.data = data; } -void rustsecp256k1_v0_7_0_context_set_error_callback(rustsecp256k1_v0_7_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) { - ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_7_0_context_no_precomp); +void rustsecp256k1_v0_8_0_context_set_error_callback(rustsecp256k1_v0_8_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) { + ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_8_0_context_static); if (fun == NULL) { - fun = rustsecp256k1_v0_7_0_default_error_callback_fn; + fun = rustsecp256k1_v0_8_0_default_error_callback_fn; } ctx->error_callback.fn = fun; ctx->error_callback.data = data; @@ -143,7 +163,7 @@ void rustsecp256k1_v0_7_0_context_set_error_callback(rustsecp256k1_v0_7_0_contex * of the software. This is setup for use with valgrind but could be substituted with * the appropriate instrumentation for other analysis tools. */ -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_declassify(const rustsecp256k1_v0_7_0_context* ctx, const void *p, size_t len) { +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_declassify(const rustsecp256k1_v0_8_0_context* ctx, const void *p, size_t len) { #if defined(VALGRIND) if (EXPECT(ctx->declassify,0)) VALGRIND_MAKE_MEM_DEFINED(p, len); #else @@ -153,59 +173,59 @@ static SECP256K1_INLINE void rustsecp256k1_v0_7_0_declassify(const rustsecp256k1 #endif } -static int rustsecp256k1_v0_7_0_pubkey_load(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_ge* ge, const rustsecp256k1_v0_7_0_pubkey* pubkey) { - if (sizeof(rustsecp256k1_v0_7_0_ge_storage) == 64) { - /* When the rustsecp256k1_v0_7_0_ge_storage type is exactly 64 byte, use its - * representation inside rustsecp256k1_v0_7_0_pubkey, as conversion is very fast. - * Note that rustsecp256k1_v0_7_0_pubkey_save must use the same representation. */ - rustsecp256k1_v0_7_0_ge_storage s; +static int rustsecp256k1_v0_8_0_pubkey_load(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_ge* ge, const rustsecp256k1_v0_8_0_pubkey* pubkey) { + if (sizeof(rustsecp256k1_v0_8_0_ge_storage) == 64) { + /* When the rustsecp256k1_v0_8_0_ge_storage type is exactly 64 byte, use its + * representation inside rustsecp256k1_v0_8_0_pubkey, as conversion is very fast. + * Note that rustsecp256k1_v0_8_0_pubkey_save must use the same representation. */ + rustsecp256k1_v0_8_0_ge_storage s; memcpy(&s, &pubkey->data[0], sizeof(s)); - rustsecp256k1_v0_7_0_ge_from_storage(ge, &s); + rustsecp256k1_v0_8_0_ge_from_storage(ge, &s); } else { /* Otherwise, fall back to 32-byte big endian for X and Y. */ - rustsecp256k1_v0_7_0_fe x, y; - rustsecp256k1_v0_7_0_fe_set_b32(&x, pubkey->data); - rustsecp256k1_v0_7_0_fe_set_b32(&y, pubkey->data + 32); - rustsecp256k1_v0_7_0_ge_set_xy(ge, &x, &y); + rustsecp256k1_v0_8_0_fe x, y; + rustsecp256k1_v0_8_0_fe_set_b32(&x, pubkey->data); + rustsecp256k1_v0_8_0_fe_set_b32(&y, pubkey->data + 32); + rustsecp256k1_v0_8_0_ge_set_xy(ge, &x, &y); } - ARG_CHECK(!rustsecp256k1_v0_7_0_fe_is_zero(&ge->x)); + ARG_CHECK(!rustsecp256k1_v0_8_0_fe_is_zero(&ge->x)); return 1; } -static void rustsecp256k1_v0_7_0_pubkey_save(rustsecp256k1_v0_7_0_pubkey* pubkey, rustsecp256k1_v0_7_0_ge* ge) { - if (sizeof(rustsecp256k1_v0_7_0_ge_storage) == 64) { - rustsecp256k1_v0_7_0_ge_storage s; - rustsecp256k1_v0_7_0_ge_to_storage(&s, ge); +static void rustsecp256k1_v0_8_0_pubkey_save(rustsecp256k1_v0_8_0_pubkey* pubkey, rustsecp256k1_v0_8_0_ge* ge) { + if (sizeof(rustsecp256k1_v0_8_0_ge_storage) == 64) { + rustsecp256k1_v0_8_0_ge_storage s; + rustsecp256k1_v0_8_0_ge_to_storage(&s, ge); memcpy(&pubkey->data[0], &s, sizeof(s)); } else { - VERIFY_CHECK(!rustsecp256k1_v0_7_0_ge_is_infinity(ge)); - rustsecp256k1_v0_7_0_fe_normalize_var(&ge->x); - rustsecp256k1_v0_7_0_fe_normalize_var(&ge->y); - rustsecp256k1_v0_7_0_fe_get_b32(pubkey->data, &ge->x); - rustsecp256k1_v0_7_0_fe_get_b32(pubkey->data + 32, &ge->y); + VERIFY_CHECK(!rustsecp256k1_v0_8_0_ge_is_infinity(ge)); + rustsecp256k1_v0_8_0_fe_normalize_var(&ge->x); + rustsecp256k1_v0_8_0_fe_normalize_var(&ge->y); + rustsecp256k1_v0_8_0_fe_get_b32(pubkey->data, &ge->x); + rustsecp256k1_v0_8_0_fe_get_b32(pubkey->data + 32, &ge->y); } } -int rustsecp256k1_v0_7_0_ec_pubkey_parse(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_pubkey* pubkey, const unsigned char *input, size_t inputlen) { - rustsecp256k1_v0_7_0_ge Q; +int rustsecp256k1_v0_8_0_ec_pubkey_parse(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_pubkey* pubkey, const unsigned char *input, size_t inputlen) { + rustsecp256k1_v0_8_0_ge Q; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); ARG_CHECK(input != NULL); - if (!rustsecp256k1_v0_7_0_eckey_pubkey_parse(&Q, input, inputlen)) { + if (!rustsecp256k1_v0_8_0_eckey_pubkey_parse(&Q, input, inputlen)) { return 0; } - if (!rustsecp256k1_v0_7_0_ge_is_in_correct_subgroup(&Q)) { + if (!rustsecp256k1_v0_8_0_ge_is_in_correct_subgroup(&Q)) { return 0; } - rustsecp256k1_v0_7_0_pubkey_save(pubkey, &Q); - rustsecp256k1_v0_7_0_ge_clear(&Q); + rustsecp256k1_v0_8_0_pubkey_save(pubkey, &Q); + rustsecp256k1_v0_8_0_ge_clear(&Q); return 1; } -int rustsecp256k1_v0_7_0_ec_pubkey_serialize(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_7_0_pubkey* pubkey, unsigned int flags) { - rustsecp256k1_v0_7_0_ge Q; +int rustsecp256k1_v0_8_0_ec_pubkey_serialize(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_8_0_pubkey* pubkey, unsigned int flags) { + rustsecp256k1_v0_8_0_ge Q; size_t len; int ret = 0; @@ -218,8 +238,8 @@ int rustsecp256k1_v0_7_0_ec_pubkey_serialize(const rustsecp256k1_v0_7_0_context* memset(output, 0, len); ARG_CHECK(pubkey != NULL); ARG_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_COMPRESSION); - if (rustsecp256k1_v0_7_0_pubkey_load(ctx, &Q, pubkey)) { - ret = rustsecp256k1_v0_7_0_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION); + if (rustsecp256k1_v0_8_0_pubkey_load(ctx, &Q, pubkey)) { + ret = rustsecp256k1_v0_8_0_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION); if (ret) { *outputlen = len; } @@ -227,9 +247,9 @@ int rustsecp256k1_v0_7_0_ec_pubkey_serialize(const rustsecp256k1_v0_7_0_context* return ret; } -int rustsecp256k1_v0_7_0_ec_pubkey_cmp(const rustsecp256k1_v0_7_0_context* ctx, const rustsecp256k1_v0_7_0_pubkey* pubkey0, const rustsecp256k1_v0_7_0_pubkey* pubkey1) { +int rustsecp256k1_v0_8_0_ec_pubkey_cmp(const rustsecp256k1_v0_8_0_context* ctx, const rustsecp256k1_v0_8_0_pubkey* pubkey0, const rustsecp256k1_v0_8_0_pubkey* pubkey1) { unsigned char out[2][33]; - const rustsecp256k1_v0_7_0_pubkey* pk[2]; + const rustsecp256k1_v0_8_0_pubkey* pk[2]; int i; VERIFY_CHECK(ctx != NULL); @@ -242,7 +262,7 @@ int rustsecp256k1_v0_7_0_ec_pubkey_cmp(const rustsecp256k1_v0_7_0_context* ctx, * results in consistent comparisons even if NULL or invalid pubkeys are * involved and prevents edge cases such as sorting algorithms that use * this function and do not terminate as a result. */ - if (!rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, out[i], &out_size, pk[i], SECP256K1_EC_COMPRESSED)) { + if (!rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, out[i], &out_size, pk[i], SECP256K1_EC_COMPRESSED)) { /* Note that ec_pubkey_serialize should already set the output to * zero in that case, but it's not guaranteed by the API, we can't * test it and writing a VERIFY_CHECK is more complex than @@ -250,42 +270,42 @@ int rustsecp256k1_v0_7_0_ec_pubkey_cmp(const rustsecp256k1_v0_7_0_context* ctx, memset(out[i], 0, sizeof(out[i])); } } - return rustsecp256k1_v0_7_0_memcmp_var(out[0], out[1], sizeof(out[0])); + return rustsecp256k1_v0_8_0_memcmp_var(out[0], out[1], sizeof(out[0])); } -static void rustsecp256k1_v0_7_0_ecdsa_signature_load(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_scalar* r, rustsecp256k1_v0_7_0_scalar* s, const rustsecp256k1_v0_7_0_ecdsa_signature* sig) { +static void rustsecp256k1_v0_8_0_ecdsa_signature_load(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_scalar* r, rustsecp256k1_v0_8_0_scalar* s, const rustsecp256k1_v0_8_0_ecdsa_signature* sig) { (void)ctx; - if (sizeof(rustsecp256k1_v0_7_0_scalar) == 32) { - /* When the rustsecp256k1_v0_7_0_scalar type is exactly 32 byte, use its - * representation inside rustsecp256k1_v0_7_0_ecdsa_signature, as conversion is very fast. - * Note that rustsecp256k1_v0_7_0_ecdsa_signature_save must use the same representation. */ + if (sizeof(rustsecp256k1_v0_8_0_scalar) == 32) { + /* When the rustsecp256k1_v0_8_0_scalar type is exactly 32 byte, use its + * representation inside rustsecp256k1_v0_8_0_ecdsa_signature, as conversion is very fast. + * Note that rustsecp256k1_v0_8_0_ecdsa_signature_save must use the same representation. */ memcpy(r, &sig->data[0], 32); memcpy(s, &sig->data[32], 32); } else { - rustsecp256k1_v0_7_0_scalar_set_b32(r, &sig->data[0], NULL); - rustsecp256k1_v0_7_0_scalar_set_b32(s, &sig->data[32], NULL); + rustsecp256k1_v0_8_0_scalar_set_b32(r, &sig->data[0], NULL); + rustsecp256k1_v0_8_0_scalar_set_b32(s, &sig->data[32], NULL); } } -static void rustsecp256k1_v0_7_0_ecdsa_signature_save(rustsecp256k1_v0_7_0_ecdsa_signature* sig, const rustsecp256k1_v0_7_0_scalar* r, const rustsecp256k1_v0_7_0_scalar* s) { - if (sizeof(rustsecp256k1_v0_7_0_scalar) == 32) { +static void rustsecp256k1_v0_8_0_ecdsa_signature_save(rustsecp256k1_v0_8_0_ecdsa_signature* sig, const rustsecp256k1_v0_8_0_scalar* r, const rustsecp256k1_v0_8_0_scalar* s) { + if (sizeof(rustsecp256k1_v0_8_0_scalar) == 32) { memcpy(&sig->data[0], r, 32); memcpy(&sig->data[32], s, 32); } else { - rustsecp256k1_v0_7_0_scalar_get_b32(&sig->data[0], r); - rustsecp256k1_v0_7_0_scalar_get_b32(&sig->data[32], s); + rustsecp256k1_v0_8_0_scalar_get_b32(&sig->data[0], r); + rustsecp256k1_v0_8_0_scalar_get_b32(&sig->data[32], s); } } -int rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { - rustsecp256k1_v0_7_0_scalar r, s; +int rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { + rustsecp256k1_v0_8_0_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(input != NULL); - if (rustsecp256k1_v0_7_0_ecdsa_sig_parse(&r, &s, input, inputlen)) { - rustsecp256k1_v0_7_0_ecdsa_signature_save(sig, &r, &s); + if (rustsecp256k1_v0_8_0_ecdsa_sig_parse(&r, &s, input, inputlen)) { + rustsecp256k1_v0_8_0_ecdsa_signature_save(sig, &r, &s); return 1; } else { memset(sig, 0, sizeof(*sig)); @@ -293,8 +313,8 @@ int rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(const rustsecp256k1_v0_7_0_co } } -int rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_ecdsa_signature* sig, const unsigned char *input64) { - rustsecp256k1_v0_7_0_scalar r, s; +int rustsecp256k1_v0_8_0_ecdsa_signature_parse_compact(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_ecdsa_signature* sig, const unsigned char *input64) { + rustsecp256k1_v0_8_0_scalar r, s; int ret = 1; int overflow = 0; @@ -302,76 +322,76 @@ int rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact(const rustsecp256k1_v0_7_ ARG_CHECK(sig != NULL); ARG_CHECK(input64 != NULL); - rustsecp256k1_v0_7_0_scalar_set_b32(&r, &input64[0], &overflow); + rustsecp256k1_v0_8_0_scalar_set_b32(&r, &input64[0], &overflow); ret &= !overflow; - rustsecp256k1_v0_7_0_scalar_set_b32(&s, &input64[32], &overflow); + rustsecp256k1_v0_8_0_scalar_set_b32(&s, &input64[32], &overflow); ret &= !overflow; if (ret) { - rustsecp256k1_v0_7_0_ecdsa_signature_save(sig, &r, &s); + rustsecp256k1_v0_8_0_ecdsa_signature_save(sig, &r, &s); } else { memset(sig, 0, sizeof(*sig)); } return ret; } -int rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_7_0_ecdsa_signature* sig) { - rustsecp256k1_v0_7_0_scalar r, s; +int rustsecp256k1_v0_8_0_ecdsa_signature_serialize_der(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_8_0_ecdsa_signature* sig) { + rustsecp256k1_v0_8_0_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output != NULL); ARG_CHECK(outputlen != NULL); ARG_CHECK(sig != NULL); - rustsecp256k1_v0_7_0_ecdsa_signature_load(ctx, &r, &s, sig); - return rustsecp256k1_v0_7_0_ecdsa_sig_serialize(output, outputlen, &r, &s); + rustsecp256k1_v0_8_0_ecdsa_signature_load(ctx, &r, &s, sig); + return rustsecp256k1_v0_8_0_ecdsa_sig_serialize(output, outputlen, &r, &s); } -int rustsecp256k1_v0_7_0_ecdsa_signature_serialize_compact(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *output64, const rustsecp256k1_v0_7_0_ecdsa_signature* sig) { - rustsecp256k1_v0_7_0_scalar r, s; +int rustsecp256k1_v0_8_0_ecdsa_signature_serialize_compact(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output64, const rustsecp256k1_v0_8_0_ecdsa_signature* sig) { + rustsecp256k1_v0_8_0_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output64 != NULL); ARG_CHECK(sig != NULL); - rustsecp256k1_v0_7_0_ecdsa_signature_load(ctx, &r, &s, sig); - rustsecp256k1_v0_7_0_scalar_get_b32(&output64[0], &r); - rustsecp256k1_v0_7_0_scalar_get_b32(&output64[32], &s); + rustsecp256k1_v0_8_0_ecdsa_signature_load(ctx, &r, &s, sig); + rustsecp256k1_v0_8_0_scalar_get_b32(&output64[0], &r); + rustsecp256k1_v0_8_0_scalar_get_b32(&output64[32], &s); return 1; } -int rustsecp256k1_v0_7_0_ecdsa_signature_normalize(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_ecdsa_signature *sigout, const rustsecp256k1_v0_7_0_ecdsa_signature *sigin) { - rustsecp256k1_v0_7_0_scalar r, s; +int rustsecp256k1_v0_8_0_ecdsa_signature_normalize(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_ecdsa_signature *sigout, const rustsecp256k1_v0_8_0_ecdsa_signature *sigin) { + rustsecp256k1_v0_8_0_scalar r, s; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sigin != NULL); - rustsecp256k1_v0_7_0_ecdsa_signature_load(ctx, &r, &s, sigin); - ret = rustsecp256k1_v0_7_0_scalar_is_high(&s); + rustsecp256k1_v0_8_0_ecdsa_signature_load(ctx, &r, &s, sigin); + ret = rustsecp256k1_v0_8_0_scalar_is_high(&s); if (sigout != NULL) { if (ret) { - rustsecp256k1_v0_7_0_scalar_negate(&s, &s); + rustsecp256k1_v0_8_0_scalar_negate(&s, &s); } - rustsecp256k1_v0_7_0_ecdsa_signature_save(sigout, &r, &s); + rustsecp256k1_v0_8_0_ecdsa_signature_save(sigout, &r, &s); } return ret; } -int rustsecp256k1_v0_7_0_ecdsa_verify(const rustsecp256k1_v0_7_0_context* ctx, const rustsecp256k1_v0_7_0_ecdsa_signature *sig, const unsigned char *msghash32, const rustsecp256k1_v0_7_0_pubkey *pubkey) { - rustsecp256k1_v0_7_0_ge q; - rustsecp256k1_v0_7_0_scalar r, s; - rustsecp256k1_v0_7_0_scalar m; +int rustsecp256k1_v0_8_0_ecdsa_verify(const rustsecp256k1_v0_8_0_context* ctx, const rustsecp256k1_v0_8_0_ecdsa_signature *sig, const unsigned char *msghash32, const rustsecp256k1_v0_8_0_pubkey *pubkey) { + rustsecp256k1_v0_8_0_ge q; + rustsecp256k1_v0_8_0_scalar r, s; + rustsecp256k1_v0_8_0_scalar m; VERIFY_CHECK(ctx != NULL); ARG_CHECK(msghash32 != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(pubkey != NULL); - rustsecp256k1_v0_7_0_scalar_set_b32(&m, msghash32, NULL); - rustsecp256k1_v0_7_0_ecdsa_signature_load(ctx, &r, &s, sig); - return (!rustsecp256k1_v0_7_0_scalar_is_high(&s) && - rustsecp256k1_v0_7_0_pubkey_load(ctx, &q, pubkey) && - rustsecp256k1_v0_7_0_ecdsa_sig_verify(&r, &s, &q, &m)); + rustsecp256k1_v0_8_0_scalar_set_b32(&m, msghash32, NULL); + rustsecp256k1_v0_8_0_ecdsa_signature_load(ctx, &r, &s, sig); + return (!rustsecp256k1_v0_8_0_scalar_is_high(&s) && + rustsecp256k1_v0_8_0_pubkey_load(ctx, &q, pubkey) && + rustsecp256k1_v0_8_0_ecdsa_sig_verify(&r, &s, &q, &m)); } static SECP256K1_INLINE void buffer_append(unsigned char *buf, unsigned int *offset, const void *data, unsigned int len) { @@ -382,10 +402,14 @@ static SECP256K1_INLINE void buffer_append(unsigned char *buf, unsigned int *off static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) { unsigned char keydata[112]; unsigned int offset = 0; - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256 rng; + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256 rng; unsigned int i; + rustsecp256k1_v0_8_0_scalar msg; + unsigned char msgmod32[32]; + rustsecp256k1_v0_8_0_scalar_set_b32(&msg, msg32, NULL); + rustsecp256k1_v0_8_0_scalar_get_b32(msgmod32, &msg); /* We feed a byte array to the PRNG as input, consisting of: - * - the private key (32 bytes) and message (32 bytes), see RFC 6979 3.2d. + * - the private key (32 bytes) and reduced message (32 bytes), see RFC 6979 3.2d. * - optionally 32 extra bytes of data, see RFC 6979 3.6 Additional Data. * - optionally 16 extra bytes with the algorithm name. * Because the arguments have distinct fixed lengths it is not possible for @@ -393,58 +417,58 @@ static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *m * nonces. */ buffer_append(keydata, &offset, key32, 32); - buffer_append(keydata, &offset, msg32, 32); + buffer_append(keydata, &offset, msgmod32, 32); if (data != NULL) { buffer_append(keydata, &offset, data, 32); } if (algo16 != NULL) { buffer_append(keydata, &offset, algo16, 16); } - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_initialize(&rng, keydata, offset); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_initialize(&rng, keydata, offset); memset(keydata, 0, sizeof(keydata)); for (i = 0; i <= counter; i++) { - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); } - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_finalize(&rng); return 1; } -const rustsecp256k1_v0_7_0_nonce_function rustsecp256k1_v0_7_0_nonce_function_rfc6979 = nonce_function_rfc6979; -const rustsecp256k1_v0_7_0_nonce_function rustsecp256k1_v0_7_0_nonce_function_default = nonce_function_rfc6979; +const rustsecp256k1_v0_8_0_nonce_function rustsecp256k1_v0_8_0_nonce_function_rfc6979 = nonce_function_rfc6979; +const rustsecp256k1_v0_8_0_nonce_function rustsecp256k1_v0_8_0_nonce_function_default = nonce_function_rfc6979; -static int rustsecp256k1_v0_7_0_ecdsa_sign_inner(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_scalar* r, rustsecp256k1_v0_7_0_scalar* s, int* recid, const unsigned char *msg32, const unsigned char *seckey, rustsecp256k1_v0_7_0_nonce_function noncefp, const void* noncedata) { - rustsecp256k1_v0_7_0_scalar sec, non, msg; +static int rustsecp256k1_v0_8_0_ecdsa_sign_inner(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_scalar* r, rustsecp256k1_v0_8_0_scalar* s, int* recid, const unsigned char *msg32, const unsigned char *seckey, rustsecp256k1_v0_8_0_nonce_function noncefp, const void* noncedata) { + rustsecp256k1_v0_8_0_scalar sec, non, msg; int ret = 0; int is_sec_valid; unsigned char nonce32[32]; unsigned int count = 0; /* Default initialization here is important so we won't pass uninit values to the cmov in the end */ - *r = rustsecp256k1_v0_7_0_scalar_zero; - *s = rustsecp256k1_v0_7_0_scalar_zero; + *r = rustsecp256k1_v0_8_0_scalar_zero; + *s = rustsecp256k1_v0_8_0_scalar_zero; if (recid) { *recid = 0; } if (noncefp == NULL) { - noncefp = rustsecp256k1_v0_7_0_nonce_function_default; + noncefp = rustsecp256k1_v0_8_0_nonce_function_default; } /* Fail if the secret key is invalid. */ - is_sec_valid = rustsecp256k1_v0_7_0_scalar_set_b32_seckey(&sec, seckey); - rustsecp256k1_v0_7_0_scalar_cmov(&sec, &rustsecp256k1_v0_7_0_scalar_one, !is_sec_valid); - rustsecp256k1_v0_7_0_scalar_set_b32(&msg, msg32, NULL); + is_sec_valid = rustsecp256k1_v0_8_0_scalar_set_b32_seckey(&sec, seckey); + rustsecp256k1_v0_8_0_scalar_cmov(&sec, &rustsecp256k1_v0_8_0_scalar_one, !is_sec_valid); + rustsecp256k1_v0_8_0_scalar_set_b32(&msg, msg32, NULL); while (1) { int is_nonce_valid; ret = !!noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count); if (!ret) { break; } - is_nonce_valid = rustsecp256k1_v0_7_0_scalar_set_b32_seckey(&non, nonce32); + is_nonce_valid = rustsecp256k1_v0_8_0_scalar_set_b32_seckey(&non, nonce32); /* The nonce is still secret here, but it being invalid is is less likely than 1:2^255. */ - rustsecp256k1_v0_7_0_declassify(ctx, &is_nonce_valid, sizeof(is_nonce_valid)); + rustsecp256k1_v0_8_0_declassify(ctx, &is_nonce_valid, sizeof(is_nonce_valid)); if (is_nonce_valid) { - ret = rustsecp256k1_v0_7_0_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, r, s, &sec, &msg, &non, recid); + ret = rustsecp256k1_v0_8_0_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, r, s, &sec, &msg, &non, recid); /* The final signature is no longer a secret, nor is the fact that we were successful or not. */ - rustsecp256k1_v0_7_0_declassify(ctx, &ret, sizeof(ret)); + rustsecp256k1_v0_8_0_declassify(ctx, &ret, sizeof(ret)); if (ret) { break; } @@ -456,202 +480,202 @@ static int rustsecp256k1_v0_7_0_ecdsa_sign_inner(const rustsecp256k1_v0_7_0_cont * used as a branching variable. */ ret &= is_sec_valid; memset(nonce32, 0, 32); - rustsecp256k1_v0_7_0_scalar_clear(&msg); - rustsecp256k1_v0_7_0_scalar_clear(&non); - rustsecp256k1_v0_7_0_scalar_clear(&sec); - rustsecp256k1_v0_7_0_scalar_cmov(r, &rustsecp256k1_v0_7_0_scalar_zero, !ret); - rustsecp256k1_v0_7_0_scalar_cmov(s, &rustsecp256k1_v0_7_0_scalar_zero, !ret); + rustsecp256k1_v0_8_0_scalar_clear(&msg); + rustsecp256k1_v0_8_0_scalar_clear(&non); + rustsecp256k1_v0_8_0_scalar_clear(&sec); + rustsecp256k1_v0_8_0_scalar_cmov(r, &rustsecp256k1_v0_8_0_scalar_zero, !ret); + rustsecp256k1_v0_8_0_scalar_cmov(s, &rustsecp256k1_v0_8_0_scalar_zero, !ret); if (recid) { const int zero = 0; - rustsecp256k1_v0_7_0_int_cmov(recid, &zero, !ret); + rustsecp256k1_v0_8_0_int_cmov(recid, &zero, !ret); } return ret; } -int rustsecp256k1_v0_7_0_ecdsa_sign(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_ecdsa_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_7_0_nonce_function noncefp, const void* noncedata) { - rustsecp256k1_v0_7_0_scalar r, s; +int rustsecp256k1_v0_8_0_ecdsa_sign(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_ecdsa_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_8_0_nonce_function noncefp, const void* noncedata) { + rustsecp256k1_v0_8_0_scalar r, s; int ret; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_7_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_8_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(msghash32 != NULL); ARG_CHECK(signature != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_7_0_ecdsa_sign_inner(ctx, &r, &s, NULL, msghash32, seckey, noncefp, noncedata); - rustsecp256k1_v0_7_0_ecdsa_signature_save(signature, &r, &s); + ret = rustsecp256k1_v0_8_0_ecdsa_sign_inner(ctx, &r, &s, NULL, msghash32, seckey, noncefp, noncedata); + rustsecp256k1_v0_8_0_ecdsa_signature_save(signature, &r, &s); return ret; } -int rustsecp256k1_v0_7_0_ec_seckey_verify(const rustsecp256k1_v0_7_0_context* ctx, const unsigned char *seckey) { - rustsecp256k1_v0_7_0_scalar sec; +int rustsecp256k1_v0_8_0_ec_seckey_verify(const rustsecp256k1_v0_8_0_context* ctx, const unsigned char *seckey) { + rustsecp256k1_v0_8_0_scalar sec; int ret; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_7_0_scalar_set_b32_seckey(&sec, seckey); - rustsecp256k1_v0_7_0_scalar_clear(&sec); + ret = rustsecp256k1_v0_8_0_scalar_set_b32_seckey(&sec, seckey); + rustsecp256k1_v0_8_0_scalar_clear(&sec); return ret; } -static int rustsecp256k1_v0_7_0_ec_pubkey_create_helper(const rustsecp256k1_v0_7_0_ecmult_gen_context *ecmult_gen_ctx, rustsecp256k1_v0_7_0_scalar *seckey_scalar, rustsecp256k1_v0_7_0_ge *p, const unsigned char *seckey) { - rustsecp256k1_v0_7_0_gej pj; +static int rustsecp256k1_v0_8_0_ec_pubkey_create_helper(const rustsecp256k1_v0_8_0_ecmult_gen_context *ecmult_gen_ctx, rustsecp256k1_v0_8_0_scalar *seckey_scalar, rustsecp256k1_v0_8_0_ge *p, const unsigned char *seckey) { + rustsecp256k1_v0_8_0_gej pj; int ret; - ret = rustsecp256k1_v0_7_0_scalar_set_b32_seckey(seckey_scalar, seckey); - rustsecp256k1_v0_7_0_scalar_cmov(seckey_scalar, &rustsecp256k1_v0_7_0_scalar_one, !ret); + ret = rustsecp256k1_v0_8_0_scalar_set_b32_seckey(seckey_scalar, seckey); + rustsecp256k1_v0_8_0_scalar_cmov(seckey_scalar, &rustsecp256k1_v0_8_0_scalar_one, !ret); - rustsecp256k1_v0_7_0_ecmult_gen(ecmult_gen_ctx, &pj, seckey_scalar); - rustsecp256k1_v0_7_0_ge_set_gej(p, &pj); + rustsecp256k1_v0_8_0_ecmult_gen(ecmult_gen_ctx, &pj, seckey_scalar); + rustsecp256k1_v0_8_0_ge_set_gej(p, &pj); return ret; } -int rustsecp256k1_v0_7_0_ec_pubkey_create(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_pubkey *pubkey, const unsigned char *seckey) { - rustsecp256k1_v0_7_0_ge p; - rustsecp256k1_v0_7_0_scalar seckey_scalar; +int rustsecp256k1_v0_8_0_ec_pubkey_create(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_pubkey *pubkey, const unsigned char *seckey) { + rustsecp256k1_v0_8_0_ge p; + rustsecp256k1_v0_8_0_scalar seckey_scalar; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); - ARG_CHECK(rustsecp256k1_v0_7_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_8_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_7_0_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &seckey_scalar, &p, seckey); - rustsecp256k1_v0_7_0_pubkey_save(pubkey, &p); - rustsecp256k1_v0_7_0_memczero(pubkey, sizeof(*pubkey), !ret); + ret = rustsecp256k1_v0_8_0_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &seckey_scalar, &p, seckey); + rustsecp256k1_v0_8_0_pubkey_save(pubkey, &p); + rustsecp256k1_v0_8_0_memczero(pubkey, sizeof(*pubkey), !ret); - rustsecp256k1_v0_7_0_scalar_clear(&seckey_scalar); + rustsecp256k1_v0_8_0_scalar_clear(&seckey_scalar); return ret; } -int rustsecp256k1_v0_7_0_ec_seckey_negate(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *seckey) { - rustsecp256k1_v0_7_0_scalar sec; +int rustsecp256k1_v0_8_0_ec_seckey_negate(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey) { + rustsecp256k1_v0_8_0_scalar sec; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_7_0_scalar_set_b32_seckey(&sec, seckey); - rustsecp256k1_v0_7_0_scalar_cmov(&sec, &rustsecp256k1_v0_7_0_scalar_zero, !ret); - rustsecp256k1_v0_7_0_scalar_negate(&sec, &sec); - rustsecp256k1_v0_7_0_scalar_get_b32(seckey, &sec); + ret = rustsecp256k1_v0_8_0_scalar_set_b32_seckey(&sec, seckey); + rustsecp256k1_v0_8_0_scalar_cmov(&sec, &rustsecp256k1_v0_8_0_scalar_zero, !ret); + rustsecp256k1_v0_8_0_scalar_negate(&sec, &sec); + rustsecp256k1_v0_8_0_scalar_get_b32(seckey, &sec); - rustsecp256k1_v0_7_0_scalar_clear(&sec); + rustsecp256k1_v0_8_0_scalar_clear(&sec); return ret; } -int rustsecp256k1_v0_7_0_ec_privkey_negate(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *seckey) { - return rustsecp256k1_v0_7_0_ec_seckey_negate(ctx, seckey); +int rustsecp256k1_v0_8_0_ec_privkey_negate(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey) { + return rustsecp256k1_v0_8_0_ec_seckey_negate(ctx, seckey); } -int rustsecp256k1_v0_7_0_ec_pubkey_negate(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_pubkey *pubkey) { +int rustsecp256k1_v0_8_0_ec_pubkey_negate(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_pubkey *pubkey) { int ret = 0; - rustsecp256k1_v0_7_0_ge p; + rustsecp256k1_v0_8_0_ge p; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); - ret = rustsecp256k1_v0_7_0_pubkey_load(ctx, &p, pubkey); + ret = rustsecp256k1_v0_8_0_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); if (ret) { - rustsecp256k1_v0_7_0_ge_neg(&p, &p); - rustsecp256k1_v0_7_0_pubkey_save(pubkey, &p); + rustsecp256k1_v0_8_0_ge_neg(&p, &p); + rustsecp256k1_v0_8_0_pubkey_save(pubkey, &p); } return ret; } -static int rustsecp256k1_v0_7_0_ec_seckey_tweak_add_helper(rustsecp256k1_v0_7_0_scalar *sec, const unsigned char *tweak32) { - rustsecp256k1_v0_7_0_scalar term; +static int rustsecp256k1_v0_8_0_ec_seckey_tweak_add_helper(rustsecp256k1_v0_8_0_scalar *sec, const unsigned char *tweak32) { + rustsecp256k1_v0_8_0_scalar term; int overflow = 0; int ret = 0; - rustsecp256k1_v0_7_0_scalar_set_b32(&term, tweak32, &overflow); - ret = (!overflow) & rustsecp256k1_v0_7_0_eckey_privkey_tweak_add(sec, &term); - rustsecp256k1_v0_7_0_scalar_clear(&term); + rustsecp256k1_v0_8_0_scalar_set_b32(&term, tweak32, &overflow); + ret = (!overflow) & rustsecp256k1_v0_8_0_eckey_privkey_tweak_add(sec, &term); + rustsecp256k1_v0_8_0_scalar_clear(&term); return ret; } -int rustsecp256k1_v0_7_0_ec_seckey_tweak_add(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - rustsecp256k1_v0_7_0_scalar sec; +int rustsecp256k1_v0_8_0_ec_seckey_tweak_add(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + rustsecp256k1_v0_8_0_scalar sec; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); ARG_CHECK(tweak32 != NULL); - ret = rustsecp256k1_v0_7_0_scalar_set_b32_seckey(&sec, seckey); - ret &= rustsecp256k1_v0_7_0_ec_seckey_tweak_add_helper(&sec, tweak32); - rustsecp256k1_v0_7_0_scalar_cmov(&sec, &rustsecp256k1_v0_7_0_scalar_zero, !ret); - rustsecp256k1_v0_7_0_scalar_get_b32(seckey, &sec); + ret = rustsecp256k1_v0_8_0_scalar_set_b32_seckey(&sec, seckey); + ret &= rustsecp256k1_v0_8_0_ec_seckey_tweak_add_helper(&sec, tweak32); + rustsecp256k1_v0_8_0_scalar_cmov(&sec, &rustsecp256k1_v0_8_0_scalar_zero, !ret); + rustsecp256k1_v0_8_0_scalar_get_b32(seckey, &sec); - rustsecp256k1_v0_7_0_scalar_clear(&sec); + rustsecp256k1_v0_8_0_scalar_clear(&sec); return ret; } -int rustsecp256k1_v0_7_0_ec_privkey_tweak_add(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - return rustsecp256k1_v0_7_0_ec_seckey_tweak_add(ctx, seckey, tweak32); +int rustsecp256k1_v0_8_0_ec_privkey_tweak_add(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + return rustsecp256k1_v0_8_0_ec_seckey_tweak_add(ctx, seckey, tweak32); } -static int rustsecp256k1_v0_7_0_ec_pubkey_tweak_add_helper(rustsecp256k1_v0_7_0_ge *p, const unsigned char *tweak32) { - rustsecp256k1_v0_7_0_scalar term; +static int rustsecp256k1_v0_8_0_ec_pubkey_tweak_add_helper(rustsecp256k1_v0_8_0_ge *p, const unsigned char *tweak32) { + rustsecp256k1_v0_8_0_scalar term; int overflow = 0; - rustsecp256k1_v0_7_0_scalar_set_b32(&term, tweak32, &overflow); - return !overflow && rustsecp256k1_v0_7_0_eckey_pubkey_tweak_add(p, &term); + rustsecp256k1_v0_8_0_scalar_set_b32(&term, tweak32, &overflow); + return !overflow && rustsecp256k1_v0_8_0_eckey_pubkey_tweak_add(p, &term); } -int rustsecp256k1_v0_7_0_ec_pubkey_tweak_add(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_pubkey *pubkey, const unsigned char *tweak32) { - rustsecp256k1_v0_7_0_ge p; +int rustsecp256k1_v0_8_0_ec_pubkey_tweak_add(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_pubkey *pubkey, const unsigned char *tweak32) { + rustsecp256k1_v0_8_0_ge p; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); ARG_CHECK(tweak32 != NULL); - ret = rustsecp256k1_v0_7_0_pubkey_load(ctx, &p, pubkey); + ret = rustsecp256k1_v0_8_0_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); - ret = ret && rustsecp256k1_v0_7_0_ec_pubkey_tweak_add_helper(&p, tweak32); + ret = ret && rustsecp256k1_v0_8_0_ec_pubkey_tweak_add_helper(&p, tweak32); if (ret) { - rustsecp256k1_v0_7_0_pubkey_save(pubkey, &p); + rustsecp256k1_v0_8_0_pubkey_save(pubkey, &p); } return ret; } -int rustsecp256k1_v0_7_0_ec_seckey_tweak_mul(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - rustsecp256k1_v0_7_0_scalar factor; - rustsecp256k1_v0_7_0_scalar sec; +int rustsecp256k1_v0_8_0_ec_seckey_tweak_mul(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + rustsecp256k1_v0_8_0_scalar factor; + rustsecp256k1_v0_8_0_scalar sec; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); ARG_CHECK(tweak32 != NULL); - rustsecp256k1_v0_7_0_scalar_set_b32(&factor, tweak32, &overflow); - ret = rustsecp256k1_v0_7_0_scalar_set_b32_seckey(&sec, seckey); - ret &= (!overflow) & rustsecp256k1_v0_7_0_eckey_privkey_tweak_mul(&sec, &factor); - rustsecp256k1_v0_7_0_scalar_cmov(&sec, &rustsecp256k1_v0_7_0_scalar_zero, !ret); - rustsecp256k1_v0_7_0_scalar_get_b32(seckey, &sec); + rustsecp256k1_v0_8_0_scalar_set_b32(&factor, tweak32, &overflow); + ret = rustsecp256k1_v0_8_0_scalar_set_b32_seckey(&sec, seckey); + ret &= (!overflow) & rustsecp256k1_v0_8_0_eckey_privkey_tweak_mul(&sec, &factor); + rustsecp256k1_v0_8_0_scalar_cmov(&sec, &rustsecp256k1_v0_8_0_scalar_zero, !ret); + rustsecp256k1_v0_8_0_scalar_get_b32(seckey, &sec); - rustsecp256k1_v0_7_0_scalar_clear(&sec); - rustsecp256k1_v0_7_0_scalar_clear(&factor); + rustsecp256k1_v0_8_0_scalar_clear(&sec); + rustsecp256k1_v0_8_0_scalar_clear(&factor); return ret; } -int rustsecp256k1_v0_7_0_ec_privkey_tweak_mul(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - return rustsecp256k1_v0_7_0_ec_seckey_tweak_mul(ctx, seckey, tweak32); +int rustsecp256k1_v0_8_0_ec_privkey_tweak_mul(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + return rustsecp256k1_v0_8_0_ec_seckey_tweak_mul(ctx, seckey, tweak32); } -int rustsecp256k1_v0_7_0_ec_pubkey_tweak_mul(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_pubkey *pubkey, const unsigned char *tweak32) { - rustsecp256k1_v0_7_0_ge p; - rustsecp256k1_v0_7_0_scalar factor; +int rustsecp256k1_v0_8_0_ec_pubkey_tweak_mul(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_pubkey *pubkey, const unsigned char *tweak32) { + rustsecp256k1_v0_8_0_ge p; + rustsecp256k1_v0_8_0_scalar factor; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); ARG_CHECK(tweak32 != NULL); - rustsecp256k1_v0_7_0_scalar_set_b32(&factor, tweak32, &overflow); - ret = !overflow && rustsecp256k1_v0_7_0_pubkey_load(ctx, &p, pubkey); + rustsecp256k1_v0_8_0_scalar_set_b32(&factor, tweak32, &overflow); + ret = !overflow && rustsecp256k1_v0_8_0_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); if (ret) { - if (rustsecp256k1_v0_7_0_eckey_pubkey_tweak_mul(&p, &factor)) { - rustsecp256k1_v0_7_0_pubkey_save(pubkey, &p); + if (rustsecp256k1_v0_8_0_eckey_pubkey_tweak_mul(&p, &factor)) { + rustsecp256k1_v0_8_0_pubkey_save(pubkey, &p); } else { ret = 0; } @@ -660,18 +684,18 @@ int rustsecp256k1_v0_7_0_ec_pubkey_tweak_mul(const rustsecp256k1_v0_7_0_context* return ret; } -int rustsecp256k1_v0_7_0_context_randomize(rustsecp256k1_v0_7_0_context* ctx, const unsigned char *seed32) { +int rustsecp256k1_v0_8_0_context_randomize(rustsecp256k1_v0_8_0_context* ctx, const unsigned char *seed32) { VERIFY_CHECK(ctx != NULL); - if (rustsecp256k1_v0_7_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { - rustsecp256k1_v0_7_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); + if (rustsecp256k1_v0_8_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { + rustsecp256k1_v0_8_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); } return 1; } -int rustsecp256k1_v0_7_0_ec_pubkey_combine(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_pubkey *pubnonce, const rustsecp256k1_v0_7_0_pubkey * const *pubnonces, size_t n) { +int rustsecp256k1_v0_8_0_ec_pubkey_combine(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_pubkey *pubnonce, const rustsecp256k1_v0_8_0_pubkey * const *pubnonces, size_t n) { size_t i; - rustsecp256k1_v0_7_0_gej Qj; - rustsecp256k1_v0_7_0_ge Q; + rustsecp256k1_v0_8_0_gej Qj; + rustsecp256k1_v0_8_0_ge Q; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubnonce != NULL); @@ -679,31 +703,31 @@ int rustsecp256k1_v0_7_0_ec_pubkey_combine(const rustsecp256k1_v0_7_0_context* c ARG_CHECK(n >= 1); ARG_CHECK(pubnonces != NULL); - rustsecp256k1_v0_7_0_gej_set_infinity(&Qj); + rustsecp256k1_v0_8_0_gej_set_infinity(&Qj); for (i = 0; i < n; i++) { ARG_CHECK(pubnonces[i] != NULL); - rustsecp256k1_v0_7_0_pubkey_load(ctx, &Q, pubnonces[i]); - rustsecp256k1_v0_7_0_gej_add_ge(&Qj, &Qj, &Q); + rustsecp256k1_v0_8_0_pubkey_load(ctx, &Q, pubnonces[i]); + rustsecp256k1_v0_8_0_gej_add_ge(&Qj, &Qj, &Q); } - if (rustsecp256k1_v0_7_0_gej_is_infinity(&Qj)) { + if (rustsecp256k1_v0_8_0_gej_is_infinity(&Qj)) { return 0; } - rustsecp256k1_v0_7_0_ge_set_gej(&Q, &Qj); - rustsecp256k1_v0_7_0_pubkey_save(pubnonce, &Q); + rustsecp256k1_v0_8_0_ge_set_gej(&Q, &Qj); + rustsecp256k1_v0_8_0_pubkey_save(pubnonce, &Q); return 1; } -int rustsecp256k1_v0_7_0_tagged_sha256(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *hash32, const unsigned char *tag, size_t taglen, const unsigned char *msg, size_t msglen) { - rustsecp256k1_v0_7_0_sha256 sha; +int rustsecp256k1_v0_8_0_tagged_sha256(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *hash32, const unsigned char *tag, size_t taglen, const unsigned char *msg, size_t msglen) { + rustsecp256k1_v0_8_0_sha256 sha; VERIFY_CHECK(ctx != NULL); ARG_CHECK(hash32 != NULL); ARG_CHECK(tag != NULL); ARG_CHECK(msg != NULL); - rustsecp256k1_v0_7_0_sha256_initialize_tagged(&sha, tag, taglen); - rustsecp256k1_v0_7_0_sha256_write(&sha, msg, msglen); - rustsecp256k1_v0_7_0_sha256_finalize(&sha, hash32); + rustsecp256k1_v0_8_0_sha256_initialize_tagged(&sha, tag, taglen); + rustsecp256k1_v0_8_0_sha256_write(&sha, msg, msglen); + rustsecp256k1_v0_8_0_sha256_finalize(&sha, hash32); return 1; } diff --git a/secp256k1-sys/depend/secp256k1/src/secp256k1.c.orig b/secp256k1-sys/depend/secp256k1/src/secp256k1.c.orig index bcf77ae08..a08597330 100644 --- a/secp256k1-sys/depend/secp256k1/src/secp256k1.c.orig +++ b/secp256k1-sys/depend/secp256k1/src/secp256k1.c.orig @@ -4,6 +4,17 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ +/* This is a C project. It should not be compiled with a C++ compiler, + * and we error out if we detect one. + * + * We still want to be able to test the project with a C++ compiler + * because it is still good to know if this will lead to real trouble, so + * there is a possibility to override the check. But be warned that + * compiling with a C++ compiler is not supported. */ +#if defined(__cplusplus) && !defined(SECP256K1_CPLUSPLUS_TEST_OVERRIDE) +#error Trying to compile a C project with a C++ compiler. +#endif + #define SECP256K1_BUILD #include "../include/secp256k1.h" @@ -11,6 +22,7 @@ #include "assumptions.h" #include "util.h" + #include "field_impl.h" #include "scalar_impl.h" #include "group_impl.h" @@ -20,6 +32,7 @@ #include "ecdsa_impl.h" #include "eckey_impl.h" #include "hash_impl.h" +#include "int128_impl.h" #include "scratch_impl.h" #include "selftest.h" @@ -33,39 +46,48 @@ #define ARG_CHECK(cond) do { \ if (EXPECT(!(cond), 0)) { \ - rustsecp256k1_v0_7_0_callback_call(&ctx->illegal_callback, #cond); \ + rustsecp256k1_v0_8_0_callback_call(&ctx->illegal_callback, #cond); \ return 0; \ } \ } while(0) #define ARG_CHECK_NO_RETURN(cond) do { \ if (EXPECT(!(cond), 0)) { \ - rustsecp256k1_v0_7_0_callback_call(&ctx->illegal_callback, #cond); \ + rustsecp256k1_v0_8_0_callback_call(&ctx->illegal_callback, #cond); \ } \ } while(0) -struct rustsecp256k1_v0_7_0_context_struct { - rustsecp256k1_v0_7_0_ecmult_gen_context ecmult_gen_ctx; - rustsecp256k1_v0_7_0_callback illegal_callback; - rustsecp256k1_v0_7_0_callback error_callback; +/* Note that whenever you change the context struct, you must also change the + * context_eq function. */ +struct rustsecp256k1_v0_8_0_context_struct { + rustsecp256k1_v0_8_0_ecmult_gen_context ecmult_gen_ctx; + rustsecp256k1_v0_8_0_callback illegal_callback; + rustsecp256k1_v0_8_0_callback error_callback; int declassify; }; -static const rustsecp256k1_v0_7_0_context rustsecp256k1_v0_7_0_context_no_precomp_ = { +static const rustsecp256k1_v0_8_0_context rustsecp256k1_v0_8_0_context_static_ = { { 0 }, - { rustsecp256k1_v0_7_0_default_illegal_callback_fn, 0 }, - { rustsecp256k1_v0_7_0_default_error_callback_fn, 0 }, + { rustsecp256k1_v0_8_0_default_illegal_callback_fn, 0 }, + { rustsecp256k1_v0_8_0_default_error_callback_fn, 0 }, 0 }; -const rustsecp256k1_v0_7_0_context *rustsecp256k1_v0_7_0_context_no_precomp = &rustsecp256k1_v0_7_0_context_no_precomp_; +const rustsecp256k1_v0_8_0_context *rustsecp256k1_v0_8_0_context_static = &rustsecp256k1_v0_8_0_context_static_; +const rustsecp256k1_v0_8_0_context *rustsecp256k1_v0_8_0_context_no_precomp = &rustsecp256k1_v0_8_0_context_static_; -size_t rustsecp256k1_v0_7_0_context_preallocated_size(unsigned int flags) { - size_t ret = sizeof(rustsecp256k1_v0_7_0_context); +void rustsecp256k1_v0_8_0_selftest(void) { + if (!rustsecp256k1_v0_8_0_selftest_passes()) { + rustsecp256k1_v0_8_0_callback_call(&default_error_callback, "self test failed"); + } +} + +size_t rustsecp256k1_v0_8_0_context_preallocated_size(unsigned int flags) { + size_t ret = sizeof(rustsecp256k1_v0_8_0_context); /* A return value of 0 is reserved as an indicator for errors when we call this function internally. */ VERIFY_CHECK(ret != 0); if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) { - rustsecp256k1_v0_7_0_callback_call(&default_illegal_callback, + rustsecp256k1_v0_8_0_callback_call(&default_illegal_callback, "Invalid flags"); return 0; } @@ -73,41 +95,39 @@ size_t rustsecp256k1_v0_7_0_context_preallocated_size(unsigned int flags) { return ret; } -size_t rustsecp256k1_v0_7_0_context_preallocated_clone_size(const rustsecp256k1_v0_7_0_context* ctx) { - size_t ret = sizeof(rustsecp256k1_v0_7_0_context); +size_t rustsecp256k1_v0_8_0_context_preallocated_clone_size(const rustsecp256k1_v0_8_0_context* ctx) { + size_t ret = sizeof(rustsecp256k1_v0_8_0_context); VERIFY_CHECK(ctx != NULL); return ret; } -rustsecp256k1_v0_7_0_context* rustsecp256k1_v0_7_0_context_preallocated_create(void* prealloc, unsigned int flags) { +rustsecp256k1_v0_8_0_context* rustsecp256k1_v0_8_0_context_preallocated_create(void* prealloc, unsigned int flags) { size_t prealloc_size; - rustsecp256k1_v0_7_0_context* ret; + rustsecp256k1_v0_8_0_context* ret; - if (!rustsecp256k1_v0_7_0_selftest()) { - rustsecp256k1_v0_7_0_callback_call(&default_error_callback, "self test failed"); - } + rustsecp256k1_v0_8_0_selftest(); - prealloc_size = rustsecp256k1_v0_7_0_context_preallocated_size(flags); + prealloc_size = rustsecp256k1_v0_8_0_context_preallocated_size(flags); if (prealloc_size == 0) { return NULL; } VERIFY_CHECK(prealloc != NULL); - ret = (rustsecp256k1_v0_7_0_context*)prealloc; + ret = (rustsecp256k1_v0_8_0_context*)prealloc; ret->illegal_callback = default_illegal_callback; ret->error_callback = default_error_callback; - /* Flags have been checked by rustsecp256k1_v0_7_0_context_preallocated_size. */ + /* Flags have been checked by rustsecp256k1_v0_8_0_context_preallocated_size. */ VERIFY_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_CONTEXT); - rustsecp256k1_v0_7_0_ecmult_gen_context_build(&ret->ecmult_gen_ctx); + rustsecp256k1_v0_8_0_ecmult_gen_context_build(&ret->ecmult_gen_ctx); ret->declassify = !!(flags & SECP256K1_FLAGS_BIT_CONTEXT_DECLASSIFY); return ret; } -rustsecp256k1_v0_7_0_context* rustsecp256k1_v0_7_0_context_create(unsigned int flags) { - size_t const prealloc_size = rustsecp256k1_v0_7_0_context_preallocated_size(flags); - rustsecp256k1_v0_7_0_context* ctx = (rustsecp256k1_v0_7_0_context*)checked_malloc(&default_error_callback, prealloc_size); - if (EXPECT(rustsecp256k1_v0_7_0_context_preallocated_create(ctx, flags) == NULL, 0)) { +rustsecp256k1_v0_8_0_context* rustsecp256k1_v0_8_0_context_create(unsigned int flags) { + size_t const prealloc_size = rustsecp256k1_v0_8_0_context_preallocated_size(flags); + rustsecp256k1_v0_8_0_context* ctx = (rustsecp256k1_v0_8_0_context*)checked_malloc(&default_error_callback, prealloc_size); + if (EXPECT(rustsecp256k1_v0_8_0_context_preallocated_create(ctx, flags) == NULL, 0)) { free(ctx); return NULL; } @@ -115,74 +135,74 @@ rustsecp256k1_v0_7_0_context* rustsecp256k1_v0_7_0_context_create(unsigned int f return ctx; } -rustsecp256k1_v0_7_0_context* rustsecp256k1_v0_7_0_context_preallocated_clone(const rustsecp256k1_v0_7_0_context* ctx, void* prealloc) { - rustsecp256k1_v0_7_0_context* ret; +rustsecp256k1_v0_8_0_context* rustsecp256k1_v0_8_0_context_preallocated_clone(const rustsecp256k1_v0_8_0_context* ctx, void* prealloc) { + rustsecp256k1_v0_8_0_context* ret; VERIFY_CHECK(ctx != NULL); ARG_CHECK(prealloc != NULL); - ret = (rustsecp256k1_v0_7_0_context*)prealloc; + ret = (rustsecp256k1_v0_8_0_context*)prealloc; *ret = *ctx; return ret; } -rustsecp256k1_v0_7_0_context* rustsecp256k1_v0_7_0_context_clone(const rustsecp256k1_v0_7_0_context* ctx) { - rustsecp256k1_v0_7_0_context* ret; +rustsecp256k1_v0_8_0_context* rustsecp256k1_v0_8_0_context_clone(const rustsecp256k1_v0_8_0_context* ctx) { + rustsecp256k1_v0_8_0_context* ret; size_t prealloc_size; VERIFY_CHECK(ctx != NULL); - prealloc_size = rustsecp256k1_v0_7_0_context_preallocated_clone_size(ctx); - ret = (rustsecp256k1_v0_7_0_context*)checked_malloc(&ctx->error_callback, prealloc_size); - ret = rustsecp256k1_v0_7_0_context_preallocated_clone(ctx, ret); + prealloc_size = rustsecp256k1_v0_8_0_context_preallocated_clone_size(ctx); + ret = (rustsecp256k1_v0_8_0_context*)checked_malloc(&ctx->error_callback, prealloc_size); + ret = rustsecp256k1_v0_8_0_context_preallocated_clone(ctx, ret); return ret; } -void rustsecp256k1_v0_7_0_context_preallocated_destroy(rustsecp256k1_v0_7_0_context* ctx) { - ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_7_0_context_no_precomp); +void rustsecp256k1_v0_8_0_context_preallocated_destroy(rustsecp256k1_v0_8_0_context* ctx) { + ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_8_0_context_static); if (ctx != NULL) { - rustsecp256k1_v0_7_0_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx); + rustsecp256k1_v0_8_0_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx); } } -void rustsecp256k1_v0_7_0_context_destroy(rustsecp256k1_v0_7_0_context* ctx) { +void rustsecp256k1_v0_8_0_context_destroy(rustsecp256k1_v0_8_0_context* ctx) { if (ctx != NULL) { - rustsecp256k1_v0_7_0_context_preallocated_destroy(ctx); + rustsecp256k1_v0_8_0_context_preallocated_destroy(ctx); free(ctx); } } -void rustsecp256k1_v0_7_0_context_set_illegal_callback(rustsecp256k1_v0_7_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) { - ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_7_0_context_no_precomp); +void rustsecp256k1_v0_8_0_context_set_illegal_callback(rustsecp256k1_v0_8_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) { + ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_8_0_context_static); if (fun == NULL) { - fun = rustsecp256k1_v0_7_0_default_illegal_callback_fn; + fun = rustsecp256k1_v0_8_0_default_illegal_callback_fn; } ctx->illegal_callback.fn = fun; ctx->illegal_callback.data = data; } -void rustsecp256k1_v0_7_0_context_set_error_callback(rustsecp256k1_v0_7_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) { - ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_7_0_context_no_precomp); +void rustsecp256k1_v0_8_0_context_set_error_callback(rustsecp256k1_v0_8_0_context* ctx, void (*fun)(const char* message, void* data), const void* data) { + ARG_CHECK_NO_RETURN(ctx != rustsecp256k1_v0_8_0_context_static); if (fun == NULL) { - fun = rustsecp256k1_v0_7_0_default_error_callback_fn; + fun = rustsecp256k1_v0_8_0_default_error_callback_fn; } ctx->error_callback.fn = fun; ctx->error_callback.data = data; } -rustsecp256k1_v0_7_0_scratch_space* rustsecp256k1_v0_7_0_scratch_space_create(const rustsecp256k1_v0_7_0_context* ctx, size_t max_size) { +rustsecp256k1_v0_8_0_scratch_space* rustsecp256k1_v0_8_0_scratch_space_create(const rustsecp256k1_v0_8_0_context* ctx, size_t max_size) { VERIFY_CHECK(ctx != NULL); - return rustsecp256k1_v0_7_0_scratch_create(&ctx->error_callback, max_size); + return rustsecp256k1_v0_8_0_scratch_create(&ctx->error_callback, max_size); } -void rustsecp256k1_v0_7_0_scratch_space_destroy(const rustsecp256k1_v0_7_0_context *ctx, rustsecp256k1_v0_7_0_scratch_space* scratch) { +void rustsecp256k1_v0_8_0_scratch_space_destroy(const rustsecp256k1_v0_8_0_context *ctx, rustsecp256k1_v0_8_0_scratch_space* scratch) { VERIFY_CHECK(ctx != NULL); - rustsecp256k1_v0_7_0_scratch_destroy(&ctx->error_callback, scratch); + rustsecp256k1_v0_8_0_scratch_destroy(&ctx->error_callback, scratch); } /* Mark memory as no-longer-secret for the purpose of analysing constant-time behaviour * of the software. This is setup for use with valgrind but could be substituted with * the appropriate instrumentation for other analysis tools. */ -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_declassify(const rustsecp256k1_v0_7_0_context* ctx, const void *p, size_t len) { +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_declassify(const rustsecp256k1_v0_8_0_context* ctx, const void *p, size_t len) { #if defined(VALGRIND) if (EXPECT(ctx->declassify,0)) VALGRIND_MAKE_MEM_DEFINED(p, len); #else @@ -192,59 +212,59 @@ static SECP256K1_INLINE void rustsecp256k1_v0_7_0_declassify(const rustsecp256k1 #endif } -static int rustsecp256k1_v0_7_0_pubkey_load(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_ge* ge, const rustsecp256k1_v0_7_0_pubkey* pubkey) { - if (sizeof(rustsecp256k1_v0_7_0_ge_storage) == 64) { - /* When the rustsecp256k1_v0_7_0_ge_storage type is exactly 64 byte, use its - * representation inside rustsecp256k1_v0_7_0_pubkey, as conversion is very fast. - * Note that rustsecp256k1_v0_7_0_pubkey_save must use the same representation. */ - rustsecp256k1_v0_7_0_ge_storage s; +static int rustsecp256k1_v0_8_0_pubkey_load(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_ge* ge, const rustsecp256k1_v0_8_0_pubkey* pubkey) { + if (sizeof(rustsecp256k1_v0_8_0_ge_storage) == 64) { + /* When the rustsecp256k1_v0_8_0_ge_storage type is exactly 64 byte, use its + * representation inside rustsecp256k1_v0_8_0_pubkey, as conversion is very fast. + * Note that rustsecp256k1_v0_8_0_pubkey_save must use the same representation. */ + rustsecp256k1_v0_8_0_ge_storage s; memcpy(&s, &pubkey->data[0], sizeof(s)); - rustsecp256k1_v0_7_0_ge_from_storage(ge, &s); + rustsecp256k1_v0_8_0_ge_from_storage(ge, &s); } else { /* Otherwise, fall back to 32-byte big endian for X and Y. */ - rustsecp256k1_v0_7_0_fe x, y; - rustsecp256k1_v0_7_0_fe_set_b32(&x, pubkey->data); - rustsecp256k1_v0_7_0_fe_set_b32(&y, pubkey->data + 32); - rustsecp256k1_v0_7_0_ge_set_xy(ge, &x, &y); + rustsecp256k1_v0_8_0_fe x, y; + rustsecp256k1_v0_8_0_fe_set_b32(&x, pubkey->data); + rustsecp256k1_v0_8_0_fe_set_b32(&y, pubkey->data + 32); + rustsecp256k1_v0_8_0_ge_set_xy(ge, &x, &y); } - ARG_CHECK(!rustsecp256k1_v0_7_0_fe_is_zero(&ge->x)); + ARG_CHECK(!rustsecp256k1_v0_8_0_fe_is_zero(&ge->x)); return 1; } -static void rustsecp256k1_v0_7_0_pubkey_save(rustsecp256k1_v0_7_0_pubkey* pubkey, rustsecp256k1_v0_7_0_ge* ge) { - if (sizeof(rustsecp256k1_v0_7_0_ge_storage) == 64) { - rustsecp256k1_v0_7_0_ge_storage s; - rustsecp256k1_v0_7_0_ge_to_storage(&s, ge); +static void rustsecp256k1_v0_8_0_pubkey_save(rustsecp256k1_v0_8_0_pubkey* pubkey, rustsecp256k1_v0_8_0_ge* ge) { + if (sizeof(rustsecp256k1_v0_8_0_ge_storage) == 64) { + rustsecp256k1_v0_8_0_ge_storage s; + rustsecp256k1_v0_8_0_ge_to_storage(&s, ge); memcpy(&pubkey->data[0], &s, sizeof(s)); } else { - VERIFY_CHECK(!rustsecp256k1_v0_7_0_ge_is_infinity(ge)); - rustsecp256k1_v0_7_0_fe_normalize_var(&ge->x); - rustsecp256k1_v0_7_0_fe_normalize_var(&ge->y); - rustsecp256k1_v0_7_0_fe_get_b32(pubkey->data, &ge->x); - rustsecp256k1_v0_7_0_fe_get_b32(pubkey->data + 32, &ge->y); + VERIFY_CHECK(!rustsecp256k1_v0_8_0_ge_is_infinity(ge)); + rustsecp256k1_v0_8_0_fe_normalize_var(&ge->x); + rustsecp256k1_v0_8_0_fe_normalize_var(&ge->y); + rustsecp256k1_v0_8_0_fe_get_b32(pubkey->data, &ge->x); + rustsecp256k1_v0_8_0_fe_get_b32(pubkey->data + 32, &ge->y); } } -int rustsecp256k1_v0_7_0_ec_pubkey_parse(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_pubkey* pubkey, const unsigned char *input, size_t inputlen) { - rustsecp256k1_v0_7_0_ge Q; +int rustsecp256k1_v0_8_0_ec_pubkey_parse(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_pubkey* pubkey, const unsigned char *input, size_t inputlen) { + rustsecp256k1_v0_8_0_ge Q; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); ARG_CHECK(input != NULL); - if (!rustsecp256k1_v0_7_0_eckey_pubkey_parse(&Q, input, inputlen)) { + if (!rustsecp256k1_v0_8_0_eckey_pubkey_parse(&Q, input, inputlen)) { return 0; } - if (!rustsecp256k1_v0_7_0_ge_is_in_correct_subgroup(&Q)) { + if (!rustsecp256k1_v0_8_0_ge_is_in_correct_subgroup(&Q)) { return 0; } - rustsecp256k1_v0_7_0_pubkey_save(pubkey, &Q); - rustsecp256k1_v0_7_0_ge_clear(&Q); + rustsecp256k1_v0_8_0_pubkey_save(pubkey, &Q); + rustsecp256k1_v0_8_0_ge_clear(&Q); return 1; } -int rustsecp256k1_v0_7_0_ec_pubkey_serialize(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_7_0_pubkey* pubkey, unsigned int flags) { - rustsecp256k1_v0_7_0_ge Q; +int rustsecp256k1_v0_8_0_ec_pubkey_serialize(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_8_0_pubkey* pubkey, unsigned int flags) { + rustsecp256k1_v0_8_0_ge Q; size_t len; int ret = 0; @@ -257,8 +277,8 @@ int rustsecp256k1_v0_7_0_ec_pubkey_serialize(const rustsecp256k1_v0_7_0_context* memset(output, 0, len); ARG_CHECK(pubkey != NULL); ARG_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_COMPRESSION); - if (rustsecp256k1_v0_7_0_pubkey_load(ctx, &Q, pubkey)) { - ret = rustsecp256k1_v0_7_0_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION); + if (rustsecp256k1_v0_8_0_pubkey_load(ctx, &Q, pubkey)) { + ret = rustsecp256k1_v0_8_0_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION); if (ret) { *outputlen = len; } @@ -266,9 +286,9 @@ int rustsecp256k1_v0_7_0_ec_pubkey_serialize(const rustsecp256k1_v0_7_0_context* return ret; } -int rustsecp256k1_v0_7_0_ec_pubkey_cmp(const rustsecp256k1_v0_7_0_context* ctx, const rustsecp256k1_v0_7_0_pubkey* pubkey0, const rustsecp256k1_v0_7_0_pubkey* pubkey1) { +int rustsecp256k1_v0_8_0_ec_pubkey_cmp(const rustsecp256k1_v0_8_0_context* ctx, const rustsecp256k1_v0_8_0_pubkey* pubkey0, const rustsecp256k1_v0_8_0_pubkey* pubkey1) { unsigned char out[2][33]; - const rustsecp256k1_v0_7_0_pubkey* pk[2]; + const rustsecp256k1_v0_8_0_pubkey* pk[2]; int i; VERIFY_CHECK(ctx != NULL); @@ -281,7 +301,7 @@ int rustsecp256k1_v0_7_0_ec_pubkey_cmp(const rustsecp256k1_v0_7_0_context* ctx, * results in consistent comparisons even if NULL or invalid pubkeys are * involved and prevents edge cases such as sorting algorithms that use * this function and do not terminate as a result. */ - if (!rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, out[i], &out_size, pk[i], SECP256K1_EC_COMPRESSED)) { + if (!rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, out[i], &out_size, pk[i], SECP256K1_EC_COMPRESSED)) { /* Note that ec_pubkey_serialize should already set the output to * zero in that case, but it's not guaranteed by the API, we can't * test it and writing a VERIFY_CHECK is more complex than @@ -289,42 +309,42 @@ int rustsecp256k1_v0_7_0_ec_pubkey_cmp(const rustsecp256k1_v0_7_0_context* ctx, memset(out[i], 0, sizeof(out[i])); } } - return rustsecp256k1_v0_7_0_memcmp_var(out[0], out[1], sizeof(out[0])); + return rustsecp256k1_v0_8_0_memcmp_var(out[0], out[1], sizeof(out[0])); } -static void rustsecp256k1_v0_7_0_ecdsa_signature_load(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_scalar* r, rustsecp256k1_v0_7_0_scalar* s, const rustsecp256k1_v0_7_0_ecdsa_signature* sig) { +static void rustsecp256k1_v0_8_0_ecdsa_signature_load(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_scalar* r, rustsecp256k1_v0_8_0_scalar* s, const rustsecp256k1_v0_8_0_ecdsa_signature* sig) { (void)ctx; - if (sizeof(rustsecp256k1_v0_7_0_scalar) == 32) { - /* When the rustsecp256k1_v0_7_0_scalar type is exactly 32 byte, use its - * representation inside rustsecp256k1_v0_7_0_ecdsa_signature, as conversion is very fast. - * Note that rustsecp256k1_v0_7_0_ecdsa_signature_save must use the same representation. */ + if (sizeof(rustsecp256k1_v0_8_0_scalar) == 32) { + /* When the rustsecp256k1_v0_8_0_scalar type is exactly 32 byte, use its + * representation inside rustsecp256k1_v0_8_0_ecdsa_signature, as conversion is very fast. + * Note that rustsecp256k1_v0_8_0_ecdsa_signature_save must use the same representation. */ memcpy(r, &sig->data[0], 32); memcpy(s, &sig->data[32], 32); } else { - rustsecp256k1_v0_7_0_scalar_set_b32(r, &sig->data[0], NULL); - rustsecp256k1_v0_7_0_scalar_set_b32(s, &sig->data[32], NULL); + rustsecp256k1_v0_8_0_scalar_set_b32(r, &sig->data[0], NULL); + rustsecp256k1_v0_8_0_scalar_set_b32(s, &sig->data[32], NULL); } } -static void rustsecp256k1_v0_7_0_ecdsa_signature_save(rustsecp256k1_v0_7_0_ecdsa_signature* sig, const rustsecp256k1_v0_7_0_scalar* r, const rustsecp256k1_v0_7_0_scalar* s) { - if (sizeof(rustsecp256k1_v0_7_0_scalar) == 32) { +static void rustsecp256k1_v0_8_0_ecdsa_signature_save(rustsecp256k1_v0_8_0_ecdsa_signature* sig, const rustsecp256k1_v0_8_0_scalar* r, const rustsecp256k1_v0_8_0_scalar* s) { + if (sizeof(rustsecp256k1_v0_8_0_scalar) == 32) { memcpy(&sig->data[0], r, 32); memcpy(&sig->data[32], s, 32); } else { - rustsecp256k1_v0_7_0_scalar_get_b32(&sig->data[0], r); - rustsecp256k1_v0_7_0_scalar_get_b32(&sig->data[32], s); + rustsecp256k1_v0_8_0_scalar_get_b32(&sig->data[0], r); + rustsecp256k1_v0_8_0_scalar_get_b32(&sig->data[32], s); } } -int rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { - rustsecp256k1_v0_7_0_scalar r, s; +int rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { + rustsecp256k1_v0_8_0_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(input != NULL); - if (rustsecp256k1_v0_7_0_ecdsa_sig_parse(&r, &s, input, inputlen)) { - rustsecp256k1_v0_7_0_ecdsa_signature_save(sig, &r, &s); + if (rustsecp256k1_v0_8_0_ecdsa_sig_parse(&r, &s, input, inputlen)) { + rustsecp256k1_v0_8_0_ecdsa_signature_save(sig, &r, &s); return 1; } else { memset(sig, 0, sizeof(*sig)); @@ -332,8 +352,8 @@ int rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(const rustsecp256k1_v0_7_0_co } } -int rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_ecdsa_signature* sig, const unsigned char *input64) { - rustsecp256k1_v0_7_0_scalar r, s; +int rustsecp256k1_v0_8_0_ecdsa_signature_parse_compact(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_ecdsa_signature* sig, const unsigned char *input64) { + rustsecp256k1_v0_8_0_scalar r, s; int ret = 1; int overflow = 0; @@ -341,76 +361,76 @@ int rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact(const rustsecp256k1_v0_7_ ARG_CHECK(sig != NULL); ARG_CHECK(input64 != NULL); - rustsecp256k1_v0_7_0_scalar_set_b32(&r, &input64[0], &overflow); + rustsecp256k1_v0_8_0_scalar_set_b32(&r, &input64[0], &overflow); ret &= !overflow; - rustsecp256k1_v0_7_0_scalar_set_b32(&s, &input64[32], &overflow); + rustsecp256k1_v0_8_0_scalar_set_b32(&s, &input64[32], &overflow); ret &= !overflow; if (ret) { - rustsecp256k1_v0_7_0_ecdsa_signature_save(sig, &r, &s); + rustsecp256k1_v0_8_0_ecdsa_signature_save(sig, &r, &s); } else { memset(sig, 0, sizeof(*sig)); } return ret; } -int rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_7_0_ecdsa_signature* sig) { - rustsecp256k1_v0_7_0_scalar r, s; +int rustsecp256k1_v0_8_0_ecdsa_signature_serialize_der(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output, size_t *outputlen, const rustsecp256k1_v0_8_0_ecdsa_signature* sig) { + rustsecp256k1_v0_8_0_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output != NULL); ARG_CHECK(outputlen != NULL); ARG_CHECK(sig != NULL); - rustsecp256k1_v0_7_0_ecdsa_signature_load(ctx, &r, &s, sig); - return rustsecp256k1_v0_7_0_ecdsa_sig_serialize(output, outputlen, &r, &s); + rustsecp256k1_v0_8_0_ecdsa_signature_load(ctx, &r, &s, sig); + return rustsecp256k1_v0_8_0_ecdsa_sig_serialize(output, outputlen, &r, &s); } -int rustsecp256k1_v0_7_0_ecdsa_signature_serialize_compact(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *output64, const rustsecp256k1_v0_7_0_ecdsa_signature* sig) { - rustsecp256k1_v0_7_0_scalar r, s; +int rustsecp256k1_v0_8_0_ecdsa_signature_serialize_compact(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *output64, const rustsecp256k1_v0_8_0_ecdsa_signature* sig) { + rustsecp256k1_v0_8_0_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output64 != NULL); ARG_CHECK(sig != NULL); - rustsecp256k1_v0_7_0_ecdsa_signature_load(ctx, &r, &s, sig); - rustsecp256k1_v0_7_0_scalar_get_b32(&output64[0], &r); - rustsecp256k1_v0_7_0_scalar_get_b32(&output64[32], &s); + rustsecp256k1_v0_8_0_ecdsa_signature_load(ctx, &r, &s, sig); + rustsecp256k1_v0_8_0_scalar_get_b32(&output64[0], &r); + rustsecp256k1_v0_8_0_scalar_get_b32(&output64[32], &s); return 1; } -int rustsecp256k1_v0_7_0_ecdsa_signature_normalize(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_ecdsa_signature *sigout, const rustsecp256k1_v0_7_0_ecdsa_signature *sigin) { - rustsecp256k1_v0_7_0_scalar r, s; +int rustsecp256k1_v0_8_0_ecdsa_signature_normalize(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_ecdsa_signature *sigout, const rustsecp256k1_v0_8_0_ecdsa_signature *sigin) { + rustsecp256k1_v0_8_0_scalar r, s; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sigin != NULL); - rustsecp256k1_v0_7_0_ecdsa_signature_load(ctx, &r, &s, sigin); - ret = rustsecp256k1_v0_7_0_scalar_is_high(&s); + rustsecp256k1_v0_8_0_ecdsa_signature_load(ctx, &r, &s, sigin); + ret = rustsecp256k1_v0_8_0_scalar_is_high(&s); if (sigout != NULL) { if (ret) { - rustsecp256k1_v0_7_0_scalar_negate(&s, &s); + rustsecp256k1_v0_8_0_scalar_negate(&s, &s); } - rustsecp256k1_v0_7_0_ecdsa_signature_save(sigout, &r, &s); + rustsecp256k1_v0_8_0_ecdsa_signature_save(sigout, &r, &s); } return ret; } -int rustsecp256k1_v0_7_0_ecdsa_verify(const rustsecp256k1_v0_7_0_context* ctx, const rustsecp256k1_v0_7_0_ecdsa_signature *sig, const unsigned char *msghash32, const rustsecp256k1_v0_7_0_pubkey *pubkey) { - rustsecp256k1_v0_7_0_ge q; - rustsecp256k1_v0_7_0_scalar r, s; - rustsecp256k1_v0_7_0_scalar m; +int rustsecp256k1_v0_8_0_ecdsa_verify(const rustsecp256k1_v0_8_0_context* ctx, const rustsecp256k1_v0_8_0_ecdsa_signature *sig, const unsigned char *msghash32, const rustsecp256k1_v0_8_0_pubkey *pubkey) { + rustsecp256k1_v0_8_0_ge q; + rustsecp256k1_v0_8_0_scalar r, s; + rustsecp256k1_v0_8_0_scalar m; VERIFY_CHECK(ctx != NULL); ARG_CHECK(msghash32 != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(pubkey != NULL); - rustsecp256k1_v0_7_0_scalar_set_b32(&m, msghash32, NULL); - rustsecp256k1_v0_7_0_ecdsa_signature_load(ctx, &r, &s, sig); - return (!rustsecp256k1_v0_7_0_scalar_is_high(&s) && - rustsecp256k1_v0_7_0_pubkey_load(ctx, &q, pubkey) && - rustsecp256k1_v0_7_0_ecdsa_sig_verify(&r, &s, &q, &m)); + rustsecp256k1_v0_8_0_scalar_set_b32(&m, msghash32, NULL); + rustsecp256k1_v0_8_0_ecdsa_signature_load(ctx, &r, &s, sig); + return (!rustsecp256k1_v0_8_0_scalar_is_high(&s) && + rustsecp256k1_v0_8_0_pubkey_load(ctx, &q, pubkey) && + rustsecp256k1_v0_8_0_ecdsa_sig_verify(&r, &s, &q, &m)); } static SECP256K1_INLINE void buffer_append(unsigned char *buf, unsigned int *offset, const void *data, unsigned int len) { @@ -421,10 +441,14 @@ static SECP256K1_INLINE void buffer_append(unsigned char *buf, unsigned int *off static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) { unsigned char keydata[112]; unsigned int offset = 0; - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256 rng; + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256 rng; unsigned int i; + rustsecp256k1_v0_8_0_scalar msg; + unsigned char msgmod32[32]; + rustsecp256k1_v0_8_0_scalar_set_b32(&msg, msg32, NULL); + rustsecp256k1_v0_8_0_scalar_get_b32(msgmod32, &msg); /* We feed a byte array to the PRNG as input, consisting of: - * - the private key (32 bytes) and message (32 bytes), see RFC 6979 3.2d. + * - the private key (32 bytes) and reduced message (32 bytes), see RFC 6979 3.2d. * - optionally 32 extra bytes of data, see RFC 6979 3.6 Additional Data. * - optionally 16 extra bytes with the algorithm name. * Because the arguments have distinct fixed lengths it is not possible for @@ -432,58 +456,58 @@ static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *m * nonces. */ buffer_append(keydata, &offset, key32, 32); - buffer_append(keydata, &offset, msg32, 32); + buffer_append(keydata, &offset, msgmod32, 32); if (data != NULL) { buffer_append(keydata, &offset, data, 32); } if (algo16 != NULL) { buffer_append(keydata, &offset, algo16, 16); } - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_initialize(&rng, keydata, offset); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_initialize(&rng, keydata, offset); memset(keydata, 0, sizeof(keydata)); for (i = 0; i <= counter; i++) { - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); } - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_finalize(&rng); return 1; } -const rustsecp256k1_v0_7_0_nonce_function rustsecp256k1_v0_7_0_nonce_function_rfc6979 = nonce_function_rfc6979; -const rustsecp256k1_v0_7_0_nonce_function rustsecp256k1_v0_7_0_nonce_function_default = nonce_function_rfc6979; +const rustsecp256k1_v0_8_0_nonce_function rustsecp256k1_v0_8_0_nonce_function_rfc6979 = nonce_function_rfc6979; +const rustsecp256k1_v0_8_0_nonce_function rustsecp256k1_v0_8_0_nonce_function_default = nonce_function_rfc6979; -static int rustsecp256k1_v0_7_0_ecdsa_sign_inner(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_scalar* r, rustsecp256k1_v0_7_0_scalar* s, int* recid, const unsigned char *msg32, const unsigned char *seckey, rustsecp256k1_v0_7_0_nonce_function noncefp, const void* noncedata) { - rustsecp256k1_v0_7_0_scalar sec, non, msg; +static int rustsecp256k1_v0_8_0_ecdsa_sign_inner(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_scalar* r, rustsecp256k1_v0_8_0_scalar* s, int* recid, const unsigned char *msg32, const unsigned char *seckey, rustsecp256k1_v0_8_0_nonce_function noncefp, const void* noncedata) { + rustsecp256k1_v0_8_0_scalar sec, non, msg; int ret = 0; int is_sec_valid; unsigned char nonce32[32]; unsigned int count = 0; /* Default initialization here is important so we won't pass uninit values to the cmov in the end */ - *r = rustsecp256k1_v0_7_0_scalar_zero; - *s = rustsecp256k1_v0_7_0_scalar_zero; + *r = rustsecp256k1_v0_8_0_scalar_zero; + *s = rustsecp256k1_v0_8_0_scalar_zero; if (recid) { *recid = 0; } if (noncefp == NULL) { - noncefp = rustsecp256k1_v0_7_0_nonce_function_default; + noncefp = rustsecp256k1_v0_8_0_nonce_function_default; } /* Fail if the secret key is invalid. */ - is_sec_valid = rustsecp256k1_v0_7_0_scalar_set_b32_seckey(&sec, seckey); - rustsecp256k1_v0_7_0_scalar_cmov(&sec, &rustsecp256k1_v0_7_0_scalar_one, !is_sec_valid); - rustsecp256k1_v0_7_0_scalar_set_b32(&msg, msg32, NULL); + is_sec_valid = rustsecp256k1_v0_8_0_scalar_set_b32_seckey(&sec, seckey); + rustsecp256k1_v0_8_0_scalar_cmov(&sec, &rustsecp256k1_v0_8_0_scalar_one, !is_sec_valid); + rustsecp256k1_v0_8_0_scalar_set_b32(&msg, msg32, NULL); while (1) { int is_nonce_valid; ret = !!noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count); if (!ret) { break; } - is_nonce_valid = rustsecp256k1_v0_7_0_scalar_set_b32_seckey(&non, nonce32); + is_nonce_valid = rustsecp256k1_v0_8_0_scalar_set_b32_seckey(&non, nonce32); /* The nonce is still secret here, but it being invalid is is less likely than 1:2^255. */ - rustsecp256k1_v0_7_0_declassify(ctx, &is_nonce_valid, sizeof(is_nonce_valid)); + rustsecp256k1_v0_8_0_declassify(ctx, &is_nonce_valid, sizeof(is_nonce_valid)); if (is_nonce_valid) { - ret = rustsecp256k1_v0_7_0_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, r, s, &sec, &msg, &non, recid); + ret = rustsecp256k1_v0_8_0_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, r, s, &sec, &msg, &non, recid); /* The final signature is no longer a secret, nor is the fact that we were successful or not. */ - rustsecp256k1_v0_7_0_declassify(ctx, &ret, sizeof(ret)); + rustsecp256k1_v0_8_0_declassify(ctx, &ret, sizeof(ret)); if (ret) { break; } @@ -495,202 +519,202 @@ static int rustsecp256k1_v0_7_0_ecdsa_sign_inner(const rustsecp256k1_v0_7_0_cont * used as a branching variable. */ ret &= is_sec_valid; memset(nonce32, 0, 32); - rustsecp256k1_v0_7_0_scalar_clear(&msg); - rustsecp256k1_v0_7_0_scalar_clear(&non); - rustsecp256k1_v0_7_0_scalar_clear(&sec); - rustsecp256k1_v0_7_0_scalar_cmov(r, &rustsecp256k1_v0_7_0_scalar_zero, !ret); - rustsecp256k1_v0_7_0_scalar_cmov(s, &rustsecp256k1_v0_7_0_scalar_zero, !ret); + rustsecp256k1_v0_8_0_scalar_clear(&msg); + rustsecp256k1_v0_8_0_scalar_clear(&non); + rustsecp256k1_v0_8_0_scalar_clear(&sec); + rustsecp256k1_v0_8_0_scalar_cmov(r, &rustsecp256k1_v0_8_0_scalar_zero, !ret); + rustsecp256k1_v0_8_0_scalar_cmov(s, &rustsecp256k1_v0_8_0_scalar_zero, !ret); if (recid) { const int zero = 0; - rustsecp256k1_v0_7_0_int_cmov(recid, &zero, !ret); + rustsecp256k1_v0_8_0_int_cmov(recid, &zero, !ret); } return ret; } -int rustsecp256k1_v0_7_0_ecdsa_sign(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_ecdsa_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_7_0_nonce_function noncefp, const void* noncedata) { - rustsecp256k1_v0_7_0_scalar r, s; +int rustsecp256k1_v0_8_0_ecdsa_sign(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_ecdsa_signature *signature, const unsigned char *msghash32, const unsigned char *seckey, rustsecp256k1_v0_8_0_nonce_function noncefp, const void* noncedata) { + rustsecp256k1_v0_8_0_scalar r, s; int ret; VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1_v0_7_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_8_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(msghash32 != NULL); ARG_CHECK(signature != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_7_0_ecdsa_sign_inner(ctx, &r, &s, NULL, msghash32, seckey, noncefp, noncedata); - rustsecp256k1_v0_7_0_ecdsa_signature_save(signature, &r, &s); + ret = rustsecp256k1_v0_8_0_ecdsa_sign_inner(ctx, &r, &s, NULL, msghash32, seckey, noncefp, noncedata); + rustsecp256k1_v0_8_0_ecdsa_signature_save(signature, &r, &s); return ret; } -int rustsecp256k1_v0_7_0_ec_seckey_verify(const rustsecp256k1_v0_7_0_context* ctx, const unsigned char *seckey) { - rustsecp256k1_v0_7_0_scalar sec; +int rustsecp256k1_v0_8_0_ec_seckey_verify(const rustsecp256k1_v0_8_0_context* ctx, const unsigned char *seckey) { + rustsecp256k1_v0_8_0_scalar sec; int ret; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_7_0_scalar_set_b32_seckey(&sec, seckey); - rustsecp256k1_v0_7_0_scalar_clear(&sec); + ret = rustsecp256k1_v0_8_0_scalar_set_b32_seckey(&sec, seckey); + rustsecp256k1_v0_8_0_scalar_clear(&sec); return ret; } -static int rustsecp256k1_v0_7_0_ec_pubkey_create_helper(const rustsecp256k1_v0_7_0_ecmult_gen_context *ecmult_gen_ctx, rustsecp256k1_v0_7_0_scalar *seckey_scalar, rustsecp256k1_v0_7_0_ge *p, const unsigned char *seckey) { - rustsecp256k1_v0_7_0_gej pj; +static int rustsecp256k1_v0_8_0_ec_pubkey_create_helper(const rustsecp256k1_v0_8_0_ecmult_gen_context *ecmult_gen_ctx, rustsecp256k1_v0_8_0_scalar *seckey_scalar, rustsecp256k1_v0_8_0_ge *p, const unsigned char *seckey) { + rustsecp256k1_v0_8_0_gej pj; int ret; - ret = rustsecp256k1_v0_7_0_scalar_set_b32_seckey(seckey_scalar, seckey); - rustsecp256k1_v0_7_0_scalar_cmov(seckey_scalar, &rustsecp256k1_v0_7_0_scalar_one, !ret); + ret = rustsecp256k1_v0_8_0_scalar_set_b32_seckey(seckey_scalar, seckey); + rustsecp256k1_v0_8_0_scalar_cmov(seckey_scalar, &rustsecp256k1_v0_8_0_scalar_one, !ret); - rustsecp256k1_v0_7_0_ecmult_gen(ecmult_gen_ctx, &pj, seckey_scalar); - rustsecp256k1_v0_7_0_ge_set_gej(p, &pj); + rustsecp256k1_v0_8_0_ecmult_gen(ecmult_gen_ctx, &pj, seckey_scalar); + rustsecp256k1_v0_8_0_ge_set_gej(p, &pj); return ret; } -int rustsecp256k1_v0_7_0_ec_pubkey_create(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_pubkey *pubkey, const unsigned char *seckey) { - rustsecp256k1_v0_7_0_ge p; - rustsecp256k1_v0_7_0_scalar seckey_scalar; +int rustsecp256k1_v0_8_0_ec_pubkey_create(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_pubkey *pubkey, const unsigned char *seckey) { + rustsecp256k1_v0_8_0_ge p; + rustsecp256k1_v0_8_0_scalar seckey_scalar; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); - ARG_CHECK(rustsecp256k1_v0_7_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + ARG_CHECK(rustsecp256k1_v0_8_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_7_0_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &seckey_scalar, &p, seckey); - rustsecp256k1_v0_7_0_pubkey_save(pubkey, &p); - rustsecp256k1_v0_7_0_memczero(pubkey, sizeof(*pubkey), !ret); + ret = rustsecp256k1_v0_8_0_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &seckey_scalar, &p, seckey); + rustsecp256k1_v0_8_0_pubkey_save(pubkey, &p); + rustsecp256k1_v0_8_0_memczero(pubkey, sizeof(*pubkey), !ret); - rustsecp256k1_v0_7_0_scalar_clear(&seckey_scalar); + rustsecp256k1_v0_8_0_scalar_clear(&seckey_scalar); return ret; } -int rustsecp256k1_v0_7_0_ec_seckey_negate(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *seckey) { - rustsecp256k1_v0_7_0_scalar sec; +int rustsecp256k1_v0_8_0_ec_seckey_negate(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey) { + rustsecp256k1_v0_8_0_scalar sec; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); - ret = rustsecp256k1_v0_7_0_scalar_set_b32_seckey(&sec, seckey); - rustsecp256k1_v0_7_0_scalar_cmov(&sec, &rustsecp256k1_v0_7_0_scalar_zero, !ret); - rustsecp256k1_v0_7_0_scalar_negate(&sec, &sec); - rustsecp256k1_v0_7_0_scalar_get_b32(seckey, &sec); + ret = rustsecp256k1_v0_8_0_scalar_set_b32_seckey(&sec, seckey); + rustsecp256k1_v0_8_0_scalar_cmov(&sec, &rustsecp256k1_v0_8_0_scalar_zero, !ret); + rustsecp256k1_v0_8_0_scalar_negate(&sec, &sec); + rustsecp256k1_v0_8_0_scalar_get_b32(seckey, &sec); - rustsecp256k1_v0_7_0_scalar_clear(&sec); + rustsecp256k1_v0_8_0_scalar_clear(&sec); return ret; } -int rustsecp256k1_v0_7_0_ec_privkey_negate(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *seckey) { - return rustsecp256k1_v0_7_0_ec_seckey_negate(ctx, seckey); +int rustsecp256k1_v0_8_0_ec_privkey_negate(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey) { + return rustsecp256k1_v0_8_0_ec_seckey_negate(ctx, seckey); } -int rustsecp256k1_v0_7_0_ec_pubkey_negate(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_pubkey *pubkey) { +int rustsecp256k1_v0_8_0_ec_pubkey_negate(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_pubkey *pubkey) { int ret = 0; - rustsecp256k1_v0_7_0_ge p; + rustsecp256k1_v0_8_0_ge p; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); - ret = rustsecp256k1_v0_7_0_pubkey_load(ctx, &p, pubkey); + ret = rustsecp256k1_v0_8_0_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); if (ret) { - rustsecp256k1_v0_7_0_ge_neg(&p, &p); - rustsecp256k1_v0_7_0_pubkey_save(pubkey, &p); + rustsecp256k1_v0_8_0_ge_neg(&p, &p); + rustsecp256k1_v0_8_0_pubkey_save(pubkey, &p); } return ret; } -static int rustsecp256k1_v0_7_0_ec_seckey_tweak_add_helper(rustsecp256k1_v0_7_0_scalar *sec, const unsigned char *tweak32) { - rustsecp256k1_v0_7_0_scalar term; +static int rustsecp256k1_v0_8_0_ec_seckey_tweak_add_helper(rustsecp256k1_v0_8_0_scalar *sec, const unsigned char *tweak32) { + rustsecp256k1_v0_8_0_scalar term; int overflow = 0; int ret = 0; - rustsecp256k1_v0_7_0_scalar_set_b32(&term, tweak32, &overflow); - ret = (!overflow) & rustsecp256k1_v0_7_0_eckey_privkey_tweak_add(sec, &term); - rustsecp256k1_v0_7_0_scalar_clear(&term); + rustsecp256k1_v0_8_0_scalar_set_b32(&term, tweak32, &overflow); + ret = (!overflow) & rustsecp256k1_v0_8_0_eckey_privkey_tweak_add(sec, &term); + rustsecp256k1_v0_8_0_scalar_clear(&term); return ret; } -int rustsecp256k1_v0_7_0_ec_seckey_tweak_add(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - rustsecp256k1_v0_7_0_scalar sec; +int rustsecp256k1_v0_8_0_ec_seckey_tweak_add(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + rustsecp256k1_v0_8_0_scalar sec; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); ARG_CHECK(tweak32 != NULL); - ret = rustsecp256k1_v0_7_0_scalar_set_b32_seckey(&sec, seckey); - ret &= rustsecp256k1_v0_7_0_ec_seckey_tweak_add_helper(&sec, tweak32); - rustsecp256k1_v0_7_0_scalar_cmov(&sec, &rustsecp256k1_v0_7_0_scalar_zero, !ret); - rustsecp256k1_v0_7_0_scalar_get_b32(seckey, &sec); + ret = rustsecp256k1_v0_8_0_scalar_set_b32_seckey(&sec, seckey); + ret &= rustsecp256k1_v0_8_0_ec_seckey_tweak_add_helper(&sec, tweak32); + rustsecp256k1_v0_8_0_scalar_cmov(&sec, &rustsecp256k1_v0_8_0_scalar_zero, !ret); + rustsecp256k1_v0_8_0_scalar_get_b32(seckey, &sec); - rustsecp256k1_v0_7_0_scalar_clear(&sec); + rustsecp256k1_v0_8_0_scalar_clear(&sec); return ret; } -int rustsecp256k1_v0_7_0_ec_privkey_tweak_add(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - return rustsecp256k1_v0_7_0_ec_seckey_tweak_add(ctx, seckey, tweak32); +int rustsecp256k1_v0_8_0_ec_privkey_tweak_add(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + return rustsecp256k1_v0_8_0_ec_seckey_tweak_add(ctx, seckey, tweak32); } -static int rustsecp256k1_v0_7_0_ec_pubkey_tweak_add_helper(rustsecp256k1_v0_7_0_ge *p, const unsigned char *tweak32) { - rustsecp256k1_v0_7_0_scalar term; +static int rustsecp256k1_v0_8_0_ec_pubkey_tweak_add_helper(rustsecp256k1_v0_8_0_ge *p, const unsigned char *tweak32) { + rustsecp256k1_v0_8_0_scalar term; int overflow = 0; - rustsecp256k1_v0_7_0_scalar_set_b32(&term, tweak32, &overflow); - return !overflow && rustsecp256k1_v0_7_0_eckey_pubkey_tweak_add(p, &term); + rustsecp256k1_v0_8_0_scalar_set_b32(&term, tweak32, &overflow); + return !overflow && rustsecp256k1_v0_8_0_eckey_pubkey_tweak_add(p, &term); } -int rustsecp256k1_v0_7_0_ec_pubkey_tweak_add(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_pubkey *pubkey, const unsigned char *tweak32) { - rustsecp256k1_v0_7_0_ge p; +int rustsecp256k1_v0_8_0_ec_pubkey_tweak_add(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_pubkey *pubkey, const unsigned char *tweak32) { + rustsecp256k1_v0_8_0_ge p; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); ARG_CHECK(tweak32 != NULL); - ret = rustsecp256k1_v0_7_0_pubkey_load(ctx, &p, pubkey); + ret = rustsecp256k1_v0_8_0_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); - ret = ret && rustsecp256k1_v0_7_0_ec_pubkey_tweak_add_helper(&p, tweak32); + ret = ret && rustsecp256k1_v0_8_0_ec_pubkey_tweak_add_helper(&p, tweak32); if (ret) { - rustsecp256k1_v0_7_0_pubkey_save(pubkey, &p); + rustsecp256k1_v0_8_0_pubkey_save(pubkey, &p); } return ret; } -int rustsecp256k1_v0_7_0_ec_seckey_tweak_mul(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - rustsecp256k1_v0_7_0_scalar factor; - rustsecp256k1_v0_7_0_scalar sec; +int rustsecp256k1_v0_8_0_ec_seckey_tweak_mul(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + rustsecp256k1_v0_8_0_scalar factor; + rustsecp256k1_v0_8_0_scalar sec; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); ARG_CHECK(tweak32 != NULL); - rustsecp256k1_v0_7_0_scalar_set_b32(&factor, tweak32, &overflow); - ret = rustsecp256k1_v0_7_0_scalar_set_b32_seckey(&sec, seckey); - ret &= (!overflow) & rustsecp256k1_v0_7_0_eckey_privkey_tweak_mul(&sec, &factor); - rustsecp256k1_v0_7_0_scalar_cmov(&sec, &rustsecp256k1_v0_7_0_scalar_zero, !ret); - rustsecp256k1_v0_7_0_scalar_get_b32(seckey, &sec); + rustsecp256k1_v0_8_0_scalar_set_b32(&factor, tweak32, &overflow); + ret = rustsecp256k1_v0_8_0_scalar_set_b32_seckey(&sec, seckey); + ret &= (!overflow) & rustsecp256k1_v0_8_0_eckey_privkey_tweak_mul(&sec, &factor); + rustsecp256k1_v0_8_0_scalar_cmov(&sec, &rustsecp256k1_v0_8_0_scalar_zero, !ret); + rustsecp256k1_v0_8_0_scalar_get_b32(seckey, &sec); - rustsecp256k1_v0_7_0_scalar_clear(&sec); - rustsecp256k1_v0_7_0_scalar_clear(&factor); + rustsecp256k1_v0_8_0_scalar_clear(&sec); + rustsecp256k1_v0_8_0_scalar_clear(&factor); return ret; } -int rustsecp256k1_v0_7_0_ec_privkey_tweak_mul(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { - return rustsecp256k1_v0_7_0_ec_seckey_tweak_mul(ctx, seckey, tweak32); +int rustsecp256k1_v0_8_0_ec_privkey_tweak_mul(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *seckey, const unsigned char *tweak32) { + return rustsecp256k1_v0_8_0_ec_seckey_tweak_mul(ctx, seckey, tweak32); } -int rustsecp256k1_v0_7_0_ec_pubkey_tweak_mul(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_pubkey *pubkey, const unsigned char *tweak32) { - rustsecp256k1_v0_7_0_ge p; - rustsecp256k1_v0_7_0_scalar factor; +int rustsecp256k1_v0_8_0_ec_pubkey_tweak_mul(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_pubkey *pubkey, const unsigned char *tweak32) { + rustsecp256k1_v0_8_0_ge p; + rustsecp256k1_v0_8_0_scalar factor; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); ARG_CHECK(tweak32 != NULL); - rustsecp256k1_v0_7_0_scalar_set_b32(&factor, tweak32, &overflow); - ret = !overflow && rustsecp256k1_v0_7_0_pubkey_load(ctx, &p, pubkey); + rustsecp256k1_v0_8_0_scalar_set_b32(&factor, tweak32, &overflow); + ret = !overflow && rustsecp256k1_v0_8_0_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); if (ret) { - if (rustsecp256k1_v0_7_0_eckey_pubkey_tweak_mul(&p, &factor)) { - rustsecp256k1_v0_7_0_pubkey_save(pubkey, &p); + if (rustsecp256k1_v0_8_0_eckey_pubkey_tweak_mul(&p, &factor)) { + rustsecp256k1_v0_8_0_pubkey_save(pubkey, &p); } else { ret = 0; } @@ -699,18 +723,18 @@ int rustsecp256k1_v0_7_0_ec_pubkey_tweak_mul(const rustsecp256k1_v0_7_0_context* return ret; } -int rustsecp256k1_v0_7_0_context_randomize(rustsecp256k1_v0_7_0_context* ctx, const unsigned char *seed32) { +int rustsecp256k1_v0_8_0_context_randomize(rustsecp256k1_v0_8_0_context* ctx, const unsigned char *seed32) { VERIFY_CHECK(ctx != NULL); - if (rustsecp256k1_v0_7_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { - rustsecp256k1_v0_7_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); + if (rustsecp256k1_v0_8_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { + rustsecp256k1_v0_8_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); } return 1; } -int rustsecp256k1_v0_7_0_ec_pubkey_combine(const rustsecp256k1_v0_7_0_context* ctx, rustsecp256k1_v0_7_0_pubkey *pubnonce, const rustsecp256k1_v0_7_0_pubkey * const *pubnonces, size_t n) { +int rustsecp256k1_v0_8_0_ec_pubkey_combine(const rustsecp256k1_v0_8_0_context* ctx, rustsecp256k1_v0_8_0_pubkey *pubnonce, const rustsecp256k1_v0_8_0_pubkey * const *pubnonces, size_t n) { size_t i; - rustsecp256k1_v0_7_0_gej Qj; - rustsecp256k1_v0_7_0_ge Q; + rustsecp256k1_v0_8_0_gej Qj; + rustsecp256k1_v0_8_0_ge Q; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubnonce != NULL); @@ -718,31 +742,31 @@ int rustsecp256k1_v0_7_0_ec_pubkey_combine(const rustsecp256k1_v0_7_0_context* c ARG_CHECK(n >= 1); ARG_CHECK(pubnonces != NULL); - rustsecp256k1_v0_7_0_gej_set_infinity(&Qj); + rustsecp256k1_v0_8_0_gej_set_infinity(&Qj); for (i = 0; i < n; i++) { ARG_CHECK(pubnonces[i] != NULL); - rustsecp256k1_v0_7_0_pubkey_load(ctx, &Q, pubnonces[i]); - rustsecp256k1_v0_7_0_gej_add_ge(&Qj, &Qj, &Q); + rustsecp256k1_v0_8_0_pubkey_load(ctx, &Q, pubnonces[i]); + rustsecp256k1_v0_8_0_gej_add_ge(&Qj, &Qj, &Q); } - if (rustsecp256k1_v0_7_0_gej_is_infinity(&Qj)) { + if (rustsecp256k1_v0_8_0_gej_is_infinity(&Qj)) { return 0; } - rustsecp256k1_v0_7_0_ge_set_gej(&Q, &Qj); - rustsecp256k1_v0_7_0_pubkey_save(pubnonce, &Q); + rustsecp256k1_v0_8_0_ge_set_gej(&Q, &Qj); + rustsecp256k1_v0_8_0_pubkey_save(pubnonce, &Q); return 1; } -int rustsecp256k1_v0_7_0_tagged_sha256(const rustsecp256k1_v0_7_0_context* ctx, unsigned char *hash32, const unsigned char *tag, size_t taglen, const unsigned char *msg, size_t msglen) { - rustsecp256k1_v0_7_0_sha256 sha; +int rustsecp256k1_v0_8_0_tagged_sha256(const rustsecp256k1_v0_8_0_context* ctx, unsigned char *hash32, const unsigned char *tag, size_t taglen, const unsigned char *msg, size_t msglen) { + rustsecp256k1_v0_8_0_sha256 sha; VERIFY_CHECK(ctx != NULL); ARG_CHECK(hash32 != NULL); ARG_CHECK(tag != NULL); ARG_CHECK(msg != NULL); - rustsecp256k1_v0_7_0_sha256_initialize_tagged(&sha, tag, taglen); - rustsecp256k1_v0_7_0_sha256_write(&sha, msg, msglen); - rustsecp256k1_v0_7_0_sha256_finalize(&sha, hash32); + rustsecp256k1_v0_8_0_sha256_initialize_tagged(&sha, tag, taglen); + rustsecp256k1_v0_8_0_sha256_write(&sha, msg, msglen); + rustsecp256k1_v0_8_0_sha256_finalize(&sha, hash32); return 1; } diff --git a/secp256k1-sys/depend/secp256k1/src/selftest.h b/secp256k1-sys/depend/secp256k1/src/selftest.h index c9c468770..e017ecf0d 100644 --- a/secp256k1-sys/depend/secp256k1/src/selftest.h +++ b/secp256k1-sys/depend/secp256k1/src/selftest.h @@ -11,22 +11,22 @@ #include -static int rustsecp256k1_v0_7_0_selftest_sha256(void) { +static int rustsecp256k1_v0_8_0_selftest_sha256(void) { static const char *input63 = "For this sample, this 63-byte string will be used as input data"; static const unsigned char output32[32] = { 0xf0, 0x8a, 0x78, 0xcb, 0xba, 0xee, 0x08, 0x2b, 0x05, 0x2a, 0xe0, 0x70, 0x8f, 0x32, 0xfa, 0x1e, 0x50, 0xc5, 0xc4, 0x21, 0xaa, 0x77, 0x2b, 0xa5, 0xdb, 0xb4, 0x06, 0xa2, 0xea, 0x6b, 0xe3, 0x42, }; unsigned char out[32]; - rustsecp256k1_v0_7_0_sha256 hasher; - rustsecp256k1_v0_7_0_sha256_initialize(&hasher); - rustsecp256k1_v0_7_0_sha256_write(&hasher, (const unsigned char*)input63, 63); - rustsecp256k1_v0_7_0_sha256_finalize(&hasher, out); - return rustsecp256k1_v0_7_0_memcmp_var(out, output32, 32) == 0; + rustsecp256k1_v0_8_0_sha256 hasher; + rustsecp256k1_v0_8_0_sha256_initialize(&hasher); + rustsecp256k1_v0_8_0_sha256_write(&hasher, (const unsigned char*)input63, 63); + rustsecp256k1_v0_8_0_sha256_finalize(&hasher, out); + return rustsecp256k1_v0_8_0_memcmp_var(out, output32, 32) == 0; } -static int rustsecp256k1_v0_7_0_selftest(void) { - return rustsecp256k1_v0_7_0_selftest_sha256(); +static int rustsecp256k1_v0_8_0_selftest_passes(void) { + return rustsecp256k1_v0_8_0_selftest_sha256(); } #endif /* SECP256K1_SELFTEST_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/testrand.h b/secp256k1-sys/depend/secp256k1/src/testrand.h index 5ba3d117f..45200ed2e 100644 --- a/secp256k1-sys/depend/secp256k1/src/testrand.h +++ b/secp256k1-sys/depend/secp256k1/src/testrand.h @@ -14,37 +14,37 @@ /* A non-cryptographic RNG used only for test infrastructure. */ /** Seed the pseudorandom number generator for testing. */ -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_testrand_seed(const unsigned char *seed16); +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_testrand_seed(const unsigned char *seed16); /** Generate a pseudorandom number in the range [0..2**32-1]. */ -SECP256K1_INLINE static uint32_t rustsecp256k1_v0_7_0_testrand32(void); +SECP256K1_INLINE static uint32_t rustsecp256k1_v0_8_0_testrand32(void); /** Generate a pseudorandom number in the range [0..2**64-1]. */ -SECP256K1_INLINE static uint64_t rustsecp256k1_v0_7_0_testrand64(void); +SECP256K1_INLINE static uint64_t rustsecp256k1_v0_8_0_testrand64(void); /** Generate a pseudorandom number in the range [0..2**bits-1]. Bits must be 1 or * more. */ -SECP256K1_INLINE static uint64_t rustsecp256k1_v0_7_0_testrand_bits(int bits); +SECP256K1_INLINE static uint64_t rustsecp256k1_v0_8_0_testrand_bits(int bits); /** Generate a pseudorandom number in the range [0..range-1]. */ -static uint32_t rustsecp256k1_v0_7_0_testrand_int(uint32_t range); +static uint32_t rustsecp256k1_v0_8_0_testrand_int(uint32_t range); /** Generate a pseudorandom 32-byte array. */ -static void rustsecp256k1_v0_7_0_testrand256(unsigned char *b32); +static void rustsecp256k1_v0_8_0_testrand256(unsigned char *b32); /** Generate a pseudorandom 32-byte array with long sequences of zero and one bits. */ -static void rustsecp256k1_v0_7_0_testrand256_test(unsigned char *b32); +static void rustsecp256k1_v0_8_0_testrand256_test(unsigned char *b32); /** Generate pseudorandom bytes with long sequences of zero and one bits. */ -static void rustsecp256k1_v0_7_0_testrand_bytes_test(unsigned char *bytes, size_t len); +static void rustsecp256k1_v0_8_0_testrand_bytes_test(unsigned char *bytes, size_t len); /** Flip a single random bit in a byte array */ -static void rustsecp256k1_v0_7_0_testrand_flip(unsigned char *b, size_t len); +static void rustsecp256k1_v0_8_0_testrand_flip(unsigned char *b, size_t len); /** Initialize the test RNG using (hex encoded) array up to 16 bytes, or randomly if hexseed is NULL. */ -static void rustsecp256k1_v0_7_0_testrand_init(const char* hexseed); +static void rustsecp256k1_v0_8_0_testrand_init(const char* hexseed); /** Print final test information. */ -static void rustsecp256k1_v0_7_0_testrand_finish(void); +static void rustsecp256k1_v0_8_0_testrand_finish(void); #endif /* SECP256K1_TESTRAND_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/testrand_impl.h b/secp256k1-sys/depend/secp256k1/src/testrand_impl.h index 0837bfc9b..dd1564e77 100644 --- a/secp256k1-sys/depend/secp256k1/src/testrand_impl.h +++ b/secp256k1-sys/depend/secp256k1/src/testrand_impl.h @@ -14,65 +14,65 @@ #include "testrand.h" #include "hash.h" -static uint64_t rustsecp256k1_v0_7_0_test_state[4]; -static uint64_t rustsecp256k1_v0_7_0_test_rng_integer; -static int rustsecp256k1_v0_7_0_test_rng_integer_bits_left = 0; +static uint64_t rustsecp256k1_v0_8_0_test_state[4]; +static uint64_t rustsecp256k1_v0_8_0_test_rng_integer; +static int rustsecp256k1_v0_8_0_test_rng_integer_bits_left = 0; -SECP256K1_INLINE static void rustsecp256k1_v0_7_0_testrand_seed(const unsigned char *seed16) { +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_testrand_seed(const unsigned char *seed16) { static const unsigned char PREFIX[19] = "secp256k1 test init"; unsigned char out32[32]; - rustsecp256k1_v0_7_0_sha256 hash; + rustsecp256k1_v0_8_0_sha256 hash; int i; /* Use SHA256(PREFIX || seed16) as initial state. */ - rustsecp256k1_v0_7_0_sha256_initialize(&hash); - rustsecp256k1_v0_7_0_sha256_write(&hash, PREFIX, sizeof(PREFIX)); - rustsecp256k1_v0_7_0_sha256_write(&hash, seed16, 16); - rustsecp256k1_v0_7_0_sha256_finalize(&hash, out32); + rustsecp256k1_v0_8_0_sha256_initialize(&hash); + rustsecp256k1_v0_8_0_sha256_write(&hash, PREFIX, sizeof(PREFIX)); + rustsecp256k1_v0_8_0_sha256_write(&hash, seed16, 16); + rustsecp256k1_v0_8_0_sha256_finalize(&hash, out32); for (i = 0; i < 4; ++i) { uint64_t s = 0; int j; for (j = 0; j < 8; ++j) s = (s << 8) | out32[8*i + j]; - rustsecp256k1_v0_7_0_test_state[i] = s; + rustsecp256k1_v0_8_0_test_state[i] = s; } - rustsecp256k1_v0_7_0_test_rng_integer_bits_left = 0; + rustsecp256k1_v0_8_0_test_rng_integer_bits_left = 0; } SECP256K1_INLINE static uint64_t rotl(const uint64_t x, int k) { return (x << k) | (x >> (64 - k)); } -SECP256K1_INLINE static uint64_t rustsecp256k1_v0_7_0_testrand64(void) { +SECP256K1_INLINE static uint64_t rustsecp256k1_v0_8_0_testrand64(void) { /* Test-only Xoshiro256++ RNG. See https://prng.di.unimi.it/ */ - const uint64_t result = rotl(rustsecp256k1_v0_7_0_test_state[0] + rustsecp256k1_v0_7_0_test_state[3], 23) + rustsecp256k1_v0_7_0_test_state[0]; - const uint64_t t = rustsecp256k1_v0_7_0_test_state[1] << 17; - rustsecp256k1_v0_7_0_test_state[2] ^= rustsecp256k1_v0_7_0_test_state[0]; - rustsecp256k1_v0_7_0_test_state[3] ^= rustsecp256k1_v0_7_0_test_state[1]; - rustsecp256k1_v0_7_0_test_state[1] ^= rustsecp256k1_v0_7_0_test_state[2]; - rustsecp256k1_v0_7_0_test_state[0] ^= rustsecp256k1_v0_7_0_test_state[3]; - rustsecp256k1_v0_7_0_test_state[2] ^= t; - rustsecp256k1_v0_7_0_test_state[3] = rotl(rustsecp256k1_v0_7_0_test_state[3], 45); + const uint64_t result = rotl(rustsecp256k1_v0_8_0_test_state[0] + rustsecp256k1_v0_8_0_test_state[3], 23) + rustsecp256k1_v0_8_0_test_state[0]; + const uint64_t t = rustsecp256k1_v0_8_0_test_state[1] << 17; + rustsecp256k1_v0_8_0_test_state[2] ^= rustsecp256k1_v0_8_0_test_state[0]; + rustsecp256k1_v0_8_0_test_state[3] ^= rustsecp256k1_v0_8_0_test_state[1]; + rustsecp256k1_v0_8_0_test_state[1] ^= rustsecp256k1_v0_8_0_test_state[2]; + rustsecp256k1_v0_8_0_test_state[0] ^= rustsecp256k1_v0_8_0_test_state[3]; + rustsecp256k1_v0_8_0_test_state[2] ^= t; + rustsecp256k1_v0_8_0_test_state[3] = rotl(rustsecp256k1_v0_8_0_test_state[3], 45); return result; } -SECP256K1_INLINE static uint64_t rustsecp256k1_v0_7_0_testrand_bits(int bits) { +SECP256K1_INLINE static uint64_t rustsecp256k1_v0_8_0_testrand_bits(int bits) { uint64_t ret; - if (rustsecp256k1_v0_7_0_test_rng_integer_bits_left < bits) { - rustsecp256k1_v0_7_0_test_rng_integer = rustsecp256k1_v0_7_0_testrand64(); - rustsecp256k1_v0_7_0_test_rng_integer_bits_left = 64; + if (rustsecp256k1_v0_8_0_test_rng_integer_bits_left < bits) { + rustsecp256k1_v0_8_0_test_rng_integer = rustsecp256k1_v0_8_0_testrand64(); + rustsecp256k1_v0_8_0_test_rng_integer_bits_left = 64; } - ret = rustsecp256k1_v0_7_0_test_rng_integer; - rustsecp256k1_v0_7_0_test_rng_integer >>= bits; - rustsecp256k1_v0_7_0_test_rng_integer_bits_left -= bits; + ret = rustsecp256k1_v0_8_0_test_rng_integer; + rustsecp256k1_v0_8_0_test_rng_integer >>= bits; + rustsecp256k1_v0_8_0_test_rng_integer_bits_left -= bits; ret &= ((~((uint64_t)0)) >> (64 - bits)); return ret; } -SECP256K1_INLINE static uint32_t rustsecp256k1_v0_7_0_testrand32(void) { - return rustsecp256k1_v0_7_0_testrand_bits(32); +SECP256K1_INLINE static uint32_t rustsecp256k1_v0_8_0_testrand32(void) { + return rustsecp256k1_v0_8_0_testrand_bits(32); } -static uint32_t rustsecp256k1_v0_7_0_testrand_int(uint32_t range) { +static uint32_t rustsecp256k1_v0_8_0_testrand_int(uint32_t range) { /* We want a uniform integer between 0 and range-1, inclusive. * B is the smallest number such that range <= 2**B. * two mechanisms implemented here: @@ -104,17 +104,17 @@ static uint32_t rustsecp256k1_v0_7_0_testrand_int(uint32_t range) { mult = 1; } while(1) { - uint32_t x = rustsecp256k1_v0_7_0_testrand_bits(bits); + uint32_t x = rustsecp256k1_v0_8_0_testrand_bits(bits); if (x < trange) { return (mult == 1) ? x : (x % range); } } } -static void rustsecp256k1_v0_7_0_testrand256(unsigned char *b32) { +static void rustsecp256k1_v0_8_0_testrand256(unsigned char *b32) { int i; for (i = 0; i < 4; ++i) { - uint64_t val = rustsecp256k1_v0_7_0_testrand64(); + uint64_t val = rustsecp256k1_v0_8_0_testrand64(); b32[0] = val; b32[1] = val >> 8; b32[2] = val >> 16; @@ -127,14 +127,14 @@ static void rustsecp256k1_v0_7_0_testrand256(unsigned char *b32) { } } -static void rustsecp256k1_v0_7_0_testrand_bytes_test(unsigned char *bytes, size_t len) { +static void rustsecp256k1_v0_8_0_testrand_bytes_test(unsigned char *bytes, size_t len) { size_t bits = 0; memset(bytes, 0, len); while (bits < len * 8) { int now; uint32_t val; - now = 1 + (rustsecp256k1_v0_7_0_testrand_bits(6) * rustsecp256k1_v0_7_0_testrand_bits(5) + 16) / 31; - val = rustsecp256k1_v0_7_0_testrand_bits(1); + now = 1 + (rustsecp256k1_v0_8_0_testrand_bits(6) * rustsecp256k1_v0_8_0_testrand_bits(5) + 16) / 31; + val = rustsecp256k1_v0_8_0_testrand_bits(1); while (now > 0 && bits < len * 8) { bytes[bits / 8] |= val << (bits % 8); now--; @@ -143,15 +143,15 @@ static void rustsecp256k1_v0_7_0_testrand_bytes_test(unsigned char *bytes, size_ } } -static void rustsecp256k1_v0_7_0_testrand256_test(unsigned char *b32) { - rustsecp256k1_v0_7_0_testrand_bytes_test(b32, 32); +static void rustsecp256k1_v0_8_0_testrand256_test(unsigned char *b32) { + rustsecp256k1_v0_8_0_testrand_bytes_test(b32, 32); } -static void rustsecp256k1_v0_7_0_testrand_flip(unsigned char *b, size_t len) { - b[rustsecp256k1_v0_7_0_testrand_int(len)] ^= (1 << rustsecp256k1_v0_7_0_testrand_bits(3)); +static void rustsecp256k1_v0_8_0_testrand_flip(unsigned char *b, size_t len) { + b[rustsecp256k1_v0_8_0_testrand_int(len)] ^= (1 << rustsecp256k1_v0_8_0_testrand_bits(3)); } -static void rustsecp256k1_v0_7_0_testrand_init(const char* hexseed) { +static void rustsecp256k1_v0_8_0_testrand_init(const char* hexseed) { unsigned char seed16[16] = {0}; if (hexseed && strlen(hexseed) != 0) { int pos = 0; @@ -185,12 +185,12 @@ static void rustsecp256k1_v0_7_0_testrand_init(const char* hexseed) { } printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]); - rustsecp256k1_v0_7_0_testrand_seed(seed16); + rustsecp256k1_v0_8_0_testrand_seed(seed16); } -static void rustsecp256k1_v0_7_0_testrand_finish(void) { +static void rustsecp256k1_v0_8_0_testrand_finish(void) { unsigned char run32[32]; - rustsecp256k1_v0_7_0_testrand256(run32); + rustsecp256k1_v0_8_0_testrand256(run32); printf("random run = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", run32[0], run32[1], run32[2], run32[3], run32[4], run32[5], run32[6], run32[7], run32[8], run32[9], run32[10], run32[11], run32[12], run32[13], run32[14], run32[15]); } diff --git a/secp256k1-sys/depend/secp256k1/src/tests.c b/secp256k1-sys/depend/secp256k1/src/tests.c index 01073ce83..fd0143550 100644 --- a/secp256k1-sys/depend/secp256k1/src/tests.c +++ b/secp256k1-sys/depend/secp256k1/src/tests.c @@ -26,10 +26,13 @@ #include "modinv32_impl.h" #ifdef SECP256K1_WIDEMUL_INT128 #include "modinv64_impl.h" +#include "int128_impl.h" #endif +#define CONDITIONAL_TEST(cnt, nam) if (count < (cnt)) { printf("Skipping %s (iteration count too low)\n", nam); } else + static int count = 64; -static rustsecp256k1_v0_7_0_context *ctx = NULL; +static rustsecp256k1_v0_8_0_context *ctx = NULL; static void counting_illegal_callback_fn(const char* str, void* data) { /* Dummy callback function that just counts. */ @@ -47,85 +50,85 @@ static void uncounting_illegal_callback_fn(const char* str, void* data) { (*p)--; } -void random_field_element_test(rustsecp256k1_v0_7_0_fe *fe) { +void random_field_element_test(rustsecp256k1_v0_8_0_fe *fe) { do { unsigned char b32[32]; - rustsecp256k1_v0_7_0_testrand256_test(b32); - if (rustsecp256k1_v0_7_0_fe_set_b32(fe, b32)) { + rustsecp256k1_v0_8_0_testrand256_test(b32); + if (rustsecp256k1_v0_8_0_fe_set_b32(fe, b32)) { break; } } while(1); } -void random_field_element_magnitude(rustsecp256k1_v0_7_0_fe *fe) { - rustsecp256k1_v0_7_0_fe zero; - int n = rustsecp256k1_v0_7_0_testrand_int(9); - rustsecp256k1_v0_7_0_fe_normalize(fe); +void random_field_element_magnitude(rustsecp256k1_v0_8_0_fe *fe) { + rustsecp256k1_v0_8_0_fe zero; + int n = rustsecp256k1_v0_8_0_testrand_int(9); + rustsecp256k1_v0_8_0_fe_normalize(fe); if (n == 0) { return; } - rustsecp256k1_v0_7_0_fe_clear(&zero); - rustsecp256k1_v0_7_0_fe_negate(&zero, &zero, 0); - rustsecp256k1_v0_7_0_fe_mul_int(&zero, n - 1); - rustsecp256k1_v0_7_0_fe_add(fe, &zero); + rustsecp256k1_v0_8_0_fe_clear(&zero); + rustsecp256k1_v0_8_0_fe_negate(&zero, &zero, 0); + rustsecp256k1_v0_8_0_fe_mul_int(&zero, n - 1); + rustsecp256k1_v0_8_0_fe_add(fe, &zero); #ifdef VERIFY CHECK(fe->magnitude == n); #endif } -void random_group_element_test(rustsecp256k1_v0_7_0_ge *ge) { - rustsecp256k1_v0_7_0_fe fe; +void random_group_element_test(rustsecp256k1_v0_8_0_ge *ge) { + rustsecp256k1_v0_8_0_fe fe; do { random_field_element_test(&fe); - if (rustsecp256k1_v0_7_0_ge_set_xo_var(ge, &fe, rustsecp256k1_v0_7_0_testrand_bits(1))) { - rustsecp256k1_v0_7_0_fe_normalize(&ge->y); + if (rustsecp256k1_v0_8_0_ge_set_xo_var(ge, &fe, rustsecp256k1_v0_8_0_testrand_bits(1))) { + rustsecp256k1_v0_8_0_fe_normalize(&ge->y); break; } } while(1); ge->infinity = 0; } -void random_group_element_jacobian_test(rustsecp256k1_v0_7_0_gej *gej, const rustsecp256k1_v0_7_0_ge *ge) { - rustsecp256k1_v0_7_0_fe z2, z3; +void random_group_element_jacobian_test(rustsecp256k1_v0_8_0_gej *gej, const rustsecp256k1_v0_8_0_ge *ge) { + rustsecp256k1_v0_8_0_fe z2, z3; do { random_field_element_test(&gej->z); - if (!rustsecp256k1_v0_7_0_fe_is_zero(&gej->z)) { + if (!rustsecp256k1_v0_8_0_fe_is_zero(&gej->z)) { break; } } while(1); - rustsecp256k1_v0_7_0_fe_sqr(&z2, &gej->z); - rustsecp256k1_v0_7_0_fe_mul(&z3, &z2, &gej->z); - rustsecp256k1_v0_7_0_fe_mul(&gej->x, &ge->x, &z2); - rustsecp256k1_v0_7_0_fe_mul(&gej->y, &ge->y, &z3); + rustsecp256k1_v0_8_0_fe_sqr(&z2, &gej->z); + rustsecp256k1_v0_8_0_fe_mul(&z3, &z2, &gej->z); + rustsecp256k1_v0_8_0_fe_mul(&gej->x, &ge->x, &z2); + rustsecp256k1_v0_8_0_fe_mul(&gej->y, &ge->y, &z3); gej->infinity = ge->infinity; } -void random_gej_test(rustsecp256k1_v0_7_0_gej *gej) { - rustsecp256k1_v0_7_0_ge ge; +void random_gej_test(rustsecp256k1_v0_8_0_gej *gej) { + rustsecp256k1_v0_8_0_ge ge; random_group_element_test(&ge); random_group_element_jacobian_test(gej, &ge); } -void random_scalar_order_test(rustsecp256k1_v0_7_0_scalar *num) { +void random_scalar_order_test(rustsecp256k1_v0_8_0_scalar *num) { do { unsigned char b32[32]; int overflow = 0; - rustsecp256k1_v0_7_0_testrand256_test(b32); - rustsecp256k1_v0_7_0_scalar_set_b32(num, b32, &overflow); - if (overflow || rustsecp256k1_v0_7_0_scalar_is_zero(num)) { + rustsecp256k1_v0_8_0_testrand256_test(b32); + rustsecp256k1_v0_8_0_scalar_set_b32(num, b32, &overflow); + if (overflow || rustsecp256k1_v0_8_0_scalar_is_zero(num)) { continue; } break; } while(1); } -void random_scalar_order(rustsecp256k1_v0_7_0_scalar *num) { +void random_scalar_order(rustsecp256k1_v0_8_0_scalar *num) { do { unsigned char b32[32]; int overflow = 0; - rustsecp256k1_v0_7_0_testrand256(b32); - rustsecp256k1_v0_7_0_scalar_set_b32(num, b32, &overflow); - if (overflow || rustsecp256k1_v0_7_0_scalar_is_zero(num)) { + rustsecp256k1_v0_8_0_testrand256(b32); + rustsecp256k1_v0_8_0_scalar_set_b32(num, b32, &overflow); + if (overflow || rustsecp256k1_v0_8_0_scalar_is_zero(num)) { continue; } break; @@ -133,216 +136,197 @@ void random_scalar_order(rustsecp256k1_v0_7_0_scalar *num) { } void random_scalar_order_b32(unsigned char *b32) { - rustsecp256k1_v0_7_0_scalar num; + rustsecp256k1_v0_8_0_scalar num; random_scalar_order(&num); - rustsecp256k1_v0_7_0_scalar_get_b32(b32, &num); + rustsecp256k1_v0_8_0_scalar_get_b32(b32, &num); +} + +void run_selftest_tests(void) { + /* Test public API */ + rustsecp256k1_v0_8_0_selftest(); +} + +int ecmult_gen_context_eq(const rustsecp256k1_v0_8_0_ecmult_gen_context *a, const rustsecp256k1_v0_8_0_ecmult_gen_context *b) { + return a->built == b->built + && rustsecp256k1_v0_8_0_scalar_eq(&a->blind, &b->blind) + && rustsecp256k1_v0_8_0_gej_eq_var(&a->initial, &b->initial); +} + +int context_eq(const rustsecp256k1_v0_8_0_context *a, const rustsecp256k1_v0_8_0_context *b) { + return a->declassify == b->declassify + && ecmult_gen_context_eq(&a->ecmult_gen_ctx, &b->ecmult_gen_ctx) + && a->illegal_callback.fn == b->illegal_callback.fn + && a->illegal_callback.data == b->illegal_callback. +data + && a->error_callback.fn == b->error_callback.fn + && a->error_callback.data == b->error_callback.data; +} + +void test_deprecated_flags(void) { + unsigned int flags[] = { SECP256K1_CONTEXT_SIGN, + SECP256K1_CONTEXT_VERIFY, + SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY }; + int i; + /* Check that a context created with any of the flags in the flags array is + * identical to the NONE context. */ + for (i = 0; i < (int)(sizeof(flags)/sizeof(flags[0])); i++) { + rustsecp256k1_v0_8_0_context *tmp_ctx; + CHECK(rustsecp256k1_v0_8_0_context_preallocated_size(SECP256K1_CONTEXT_NONE) == rustsecp256k1_v0_8_0_context_preallocated_size(flags[i])); + tmp_ctx = rustsecp256k1_v0_8_0_context_create(flags[i]); + CHECK(context_eq(ctx, tmp_ctx)); + rustsecp256k1_v0_8_0_context_destroy(tmp_ctx); + } } void run_context_tests(int use_prealloc) { - rustsecp256k1_v0_7_0_pubkey pubkey; - rustsecp256k1_v0_7_0_pubkey zero_pubkey; - rustsecp256k1_v0_7_0_ecdsa_signature sig; + rustsecp256k1_v0_8_0_pubkey pubkey; + rustsecp256k1_v0_8_0_pubkey zero_pubkey; + rustsecp256k1_v0_8_0_ecdsa_signature sig; unsigned char ctmp[32]; int32_t ecount; int32_t ecount2; - rustsecp256k1_v0_7_0_context *none; - rustsecp256k1_v0_7_0_context *sign; - rustsecp256k1_v0_7_0_context *vrfy; - rustsecp256k1_v0_7_0_context *both; - rustsecp256k1_v0_7_0_context *sttc; - void *none_prealloc = NULL; - void *sign_prealloc = NULL; - void *vrfy_prealloc = NULL; - void *both_prealloc = NULL; + rustsecp256k1_v0_8_0_context *sttc; + void *ctx_prealloc = NULL; void *sttc_prealloc = NULL; - rustsecp256k1_v0_7_0_gej pubj; - rustsecp256k1_v0_7_0_ge pub; - rustsecp256k1_v0_7_0_scalar msg, key, nonce; - rustsecp256k1_v0_7_0_scalar sigr, sigs; + rustsecp256k1_v0_8_0_gej pubj; + rustsecp256k1_v0_8_0_ge pub; + rustsecp256k1_v0_8_0_scalar msg, key, nonce; + rustsecp256k1_v0_8_0_scalar sigr, sigs; + + /* Check that deprecated rustsecp256k1_v0_8_0_context_no_precomp is an alias to rustsecp256k1_v0_8_0_context_static. */ + CHECK(rustsecp256k1_v0_8_0_context_no_precomp == rustsecp256k1_v0_8_0_context_static); if (use_prealloc) { - none_prealloc = malloc(rustsecp256k1_v0_7_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); - sign_prealloc = malloc(rustsecp256k1_v0_7_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); - vrfy_prealloc = malloc(rustsecp256k1_v0_7_0_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); - both_prealloc = malloc(rustsecp256k1_v0_7_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); - sttc_prealloc = malloc(rustsecp256k1_v0_7_0_context_preallocated_clone_size(rustsecp256k1_v0_7_0_context_no_precomp)); - CHECK(none_prealloc != NULL); - CHECK(sign_prealloc != NULL); - CHECK(vrfy_prealloc != NULL); - CHECK(both_prealloc != NULL); + ctx_prealloc = malloc(rustsecp256k1_v0_8_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); + CHECK(ctx_prealloc != NULL); + ctx = rustsecp256k1_v0_8_0_context_preallocated_create(ctx_prealloc, SECP256K1_CONTEXT_NONE); + sttc_prealloc = malloc(rustsecp256k1_v0_8_0_context_preallocated_clone_size(rustsecp256k1_v0_8_0_context_static)); CHECK(sttc_prealloc != NULL); - none = rustsecp256k1_v0_7_0_context_preallocated_create(none_prealloc, SECP256K1_CONTEXT_NONE); - sign = rustsecp256k1_v0_7_0_context_preallocated_create(sign_prealloc, SECP256K1_CONTEXT_SIGN); - vrfy = rustsecp256k1_v0_7_0_context_preallocated_create(vrfy_prealloc, SECP256K1_CONTEXT_VERIFY); - both = rustsecp256k1_v0_7_0_context_preallocated_create(both_prealloc, SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - sttc = rustsecp256k1_v0_7_0_context_preallocated_clone(rustsecp256k1_v0_7_0_context_no_precomp, sttc_prealloc); + sttc = rustsecp256k1_v0_8_0_context_preallocated_clone(rustsecp256k1_v0_8_0_context_static, sttc_prealloc); } else { - none = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_NONE); - sign = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_SIGN); - vrfy = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_VERIFY); - both = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - sttc = rustsecp256k1_v0_7_0_context_clone(rustsecp256k1_v0_7_0_context_no_precomp); + sttc = rustsecp256k1_v0_8_0_context_clone(rustsecp256k1_v0_8_0_context_static); + ctx = rustsecp256k1_v0_8_0_context_create(SECP256K1_CONTEXT_NONE); } + test_deprecated_flags(); + memset(&zero_pubkey, 0, sizeof(zero_pubkey)); ecount = 0; ecount2 = 10; - rustsecp256k1_v0_7_0_context_set_illegal_callback(sttc, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount2); - /* set error callback (to a function that still aborts in case malloc() fails in rustsecp256k1_v0_7_0_context_clone() below) */ - rustsecp256k1_v0_7_0_context_set_error_callback(sign, rustsecp256k1_v0_7_0_default_illegal_callback_fn, NULL); - CHECK(sign->error_callback.fn != vrfy->error_callback.fn); - CHECK(sign->error_callback.fn == rustsecp256k1_v0_7_0_default_illegal_callback_fn); + rustsecp256k1_v0_8_0_context_set_illegal_callback(sttc, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount2); + /* set error callback (to a function that still aborts in case malloc() fails in rustsecp256k1_v0_8_0_context_clone() below) */ + rustsecp256k1_v0_8_0_context_set_error_callback(ctx, rustsecp256k1_v0_8_0_default_illegal_callback_fn, NULL); + CHECK(ctx->error_callback.fn != sttc->error_callback.fn); + CHECK(ctx->error_callback.fn == rustsecp256k1_v0_8_0_default_illegal_callback_fn); /* check if sizes for cloning are consistent */ - CHECK(rustsecp256k1_v0_7_0_context_preallocated_clone_size(none) == rustsecp256k1_v0_7_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); - CHECK(rustsecp256k1_v0_7_0_context_preallocated_clone_size(sign) == rustsecp256k1_v0_7_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); - CHECK(rustsecp256k1_v0_7_0_context_preallocated_clone_size(vrfy) == rustsecp256k1_v0_7_0_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); - CHECK(rustsecp256k1_v0_7_0_context_preallocated_clone_size(both) == rustsecp256k1_v0_7_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); - CHECK(rustsecp256k1_v0_7_0_context_preallocated_clone_size(sttc) >= sizeof(rustsecp256k1_v0_7_0_context)); + CHECK(rustsecp256k1_v0_8_0_context_preallocated_clone_size(ctx) == rustsecp256k1_v0_8_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); + CHECK(rustsecp256k1_v0_8_0_context_preallocated_clone_size(sttc) >= sizeof(rustsecp256k1_v0_8_0_context)); /*** clone and destroy all of them to make sure cloning was complete ***/ { - rustsecp256k1_v0_7_0_context *ctx_tmp; + rustsecp256k1_v0_8_0_context *ctx_tmp; if (use_prealloc) { /* clone into a non-preallocated context and then again into a new preallocated one. */ - ctx_tmp = none; none = rustsecp256k1_v0_7_0_context_clone(none); rustsecp256k1_v0_7_0_context_preallocated_destroy(ctx_tmp); - free(none_prealloc); none_prealloc = malloc(rustsecp256k1_v0_7_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(none_prealloc != NULL); - ctx_tmp = none; none = rustsecp256k1_v0_7_0_context_preallocated_clone(none, none_prealloc); rustsecp256k1_v0_7_0_context_destroy(ctx_tmp); - - ctx_tmp = sign; sign = rustsecp256k1_v0_7_0_context_clone(sign); rustsecp256k1_v0_7_0_context_preallocated_destroy(ctx_tmp); - free(sign_prealloc); sign_prealloc = malloc(rustsecp256k1_v0_7_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(sign_prealloc != NULL); - ctx_tmp = sign; sign = rustsecp256k1_v0_7_0_context_preallocated_clone(sign, sign_prealloc); rustsecp256k1_v0_7_0_context_destroy(ctx_tmp); - - ctx_tmp = vrfy; vrfy = rustsecp256k1_v0_7_0_context_clone(vrfy); rustsecp256k1_v0_7_0_context_preallocated_destroy(ctx_tmp); - free(vrfy_prealloc); vrfy_prealloc = malloc(rustsecp256k1_v0_7_0_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(vrfy_prealloc != NULL); - ctx_tmp = vrfy; vrfy = rustsecp256k1_v0_7_0_context_preallocated_clone(vrfy, vrfy_prealloc); rustsecp256k1_v0_7_0_context_destroy(ctx_tmp); - - ctx_tmp = both; both = rustsecp256k1_v0_7_0_context_clone(both); rustsecp256k1_v0_7_0_context_preallocated_destroy(ctx_tmp); - free(both_prealloc); both_prealloc = malloc(rustsecp256k1_v0_7_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(both_prealloc != NULL); - ctx_tmp = both; both = rustsecp256k1_v0_7_0_context_preallocated_clone(both, both_prealloc); rustsecp256k1_v0_7_0_context_destroy(ctx_tmp); + ctx_tmp = ctx; ctx = rustsecp256k1_v0_8_0_context_clone(ctx); rustsecp256k1_v0_8_0_context_preallocated_destroy(ctx_tmp); + free(ctx_prealloc); ctx_prealloc = malloc(rustsecp256k1_v0_8_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(ctx_prealloc != NULL); + ctx_tmp = ctx; ctx = rustsecp256k1_v0_8_0_context_preallocated_clone(ctx, ctx_prealloc); rustsecp256k1_v0_8_0_context_destroy(ctx_tmp); } else { /* clone into a preallocated context and then again into a new non-preallocated one. */ void *prealloc_tmp; - prealloc_tmp = malloc(rustsecp256k1_v0_7_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(prealloc_tmp != NULL); - ctx_tmp = none; none = rustsecp256k1_v0_7_0_context_preallocated_clone(none, prealloc_tmp); rustsecp256k1_v0_7_0_context_destroy(ctx_tmp); - ctx_tmp = none; none = rustsecp256k1_v0_7_0_context_clone(none); rustsecp256k1_v0_7_0_context_preallocated_destroy(ctx_tmp); - free(prealloc_tmp); - - prealloc_tmp = malloc(rustsecp256k1_v0_7_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(prealloc_tmp != NULL); - ctx_tmp = sign; sign = rustsecp256k1_v0_7_0_context_preallocated_clone(sign, prealloc_tmp); rustsecp256k1_v0_7_0_context_destroy(ctx_tmp); - ctx_tmp = sign; sign = rustsecp256k1_v0_7_0_context_clone(sign); rustsecp256k1_v0_7_0_context_preallocated_destroy(ctx_tmp); - free(prealloc_tmp); - - prealloc_tmp = malloc(rustsecp256k1_v0_7_0_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL); - ctx_tmp = vrfy; vrfy = rustsecp256k1_v0_7_0_context_preallocated_clone(vrfy, prealloc_tmp); rustsecp256k1_v0_7_0_context_destroy(ctx_tmp); - ctx_tmp = vrfy; vrfy = rustsecp256k1_v0_7_0_context_clone(vrfy); rustsecp256k1_v0_7_0_context_preallocated_destroy(ctx_tmp); - free(prealloc_tmp); - - prealloc_tmp = malloc(rustsecp256k1_v0_7_0_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL); - ctx_tmp = both; both = rustsecp256k1_v0_7_0_context_preallocated_clone(both, prealloc_tmp); rustsecp256k1_v0_7_0_context_destroy(ctx_tmp); - ctx_tmp = both; both = rustsecp256k1_v0_7_0_context_clone(both); rustsecp256k1_v0_7_0_context_preallocated_destroy(ctx_tmp); + prealloc_tmp = malloc(rustsecp256k1_v0_8_0_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(prealloc_tmp != NULL); + ctx_tmp = ctx; ctx = rustsecp256k1_v0_8_0_context_preallocated_clone(ctx, prealloc_tmp); rustsecp256k1_v0_8_0_context_destroy(ctx_tmp); + ctx_tmp = ctx; ctx = rustsecp256k1_v0_8_0_context_clone(ctx); rustsecp256k1_v0_8_0_context_preallocated_destroy(ctx_tmp); free(prealloc_tmp); } } /* Verify that the error callback makes it across the clone. */ - CHECK(sign->error_callback.fn != vrfy->error_callback.fn); - CHECK(sign->error_callback.fn == rustsecp256k1_v0_7_0_default_illegal_callback_fn); + CHECK(ctx->error_callback.fn != sttc->error_callback.fn); + CHECK(ctx->error_callback.fn == rustsecp256k1_v0_8_0_default_illegal_callback_fn); /* And that it resets back to default. */ - rustsecp256k1_v0_7_0_context_set_error_callback(sign, NULL, NULL); - CHECK(vrfy->error_callback.fn == sign->error_callback.fn); + rustsecp256k1_v0_8_0_context_set_error_callback(ctx, NULL, NULL); + CHECK(ctx->error_callback.fn == sttc->error_callback.fn); /*** attempt to use them ***/ random_scalar_order_test(&msg); random_scalar_order_test(&key); - rustsecp256k1_v0_7_0_ecmult_gen(&both->ecmult_gen_ctx, &pubj, &key); - rustsecp256k1_v0_7_0_ge_set_gej(&pub, &pubj); + rustsecp256k1_v0_8_0_ecmult_gen(&ctx->ecmult_gen_ctx, &pubj, &key); + rustsecp256k1_v0_8_0_ge_set_gej(&pub, &pubj); /* Verify context-type checking illegal-argument errors. */ memset(ctmp, 1, 32); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(sttc, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(sttc, &pubkey, ctmp) == 0); CHECK(ecount == 1); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(sign, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(sttc, &sig, ctmp, ctmp, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(sttc, &sig, ctmp, ctmp, NULL, NULL) == 0); CHECK(ecount == 2); VG_UNDEF(&sig, sizeof(sig)); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(sign, &sig, ctmp, ctmp, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &sig, ctmp, ctmp, NULL, NULL) == 1); VG_CHECK(&sig, sizeof(sig)); CHECK(ecount2 == 10); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(sign, &sig, ctmp, &pubkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &sig, ctmp, &pubkey) == 1); CHECK(ecount2 == 10); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(sttc, &sig, ctmp, &pubkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(sttc, &sig, ctmp, &pubkey) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_add(sign, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp) == 1); CHECK(ecount2 == 10); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_add(sttc, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_add(sttc, &pubkey, ctmp) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_mul(sign, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_mul(ctx, &pubkey, ctmp) == 1); CHECK(ecount2 == 10); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_negate(sttc, &pubkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_negate(sttc, &pubkey) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_negate(sign, &pubkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_negate(ctx, &pubkey) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_negate(sign, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_negate(ctx, NULL) == 0); CHECK(ecount2 == 11); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_negate(sttc, &zero_pubkey) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_negate(sttc, &zero_pubkey) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_mul(sttc, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_mul(sttc, &pubkey, ctmp) == 1); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_7_0_context_randomize(sttc, ctmp) == 1); + CHECK(rustsecp256k1_v0_8_0_context_randomize(sttc, ctmp) == 1); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_7_0_context_randomize(sttc, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_context_randomize(sttc, NULL) == 1); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_7_0_context_randomize(sign, ctmp) == 1); + CHECK(rustsecp256k1_v0_8_0_context_randomize(ctx, ctmp) == 1); CHECK(ecount2 == 11); - CHECK(rustsecp256k1_v0_7_0_context_randomize(sign, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_context_randomize(ctx, NULL) == 1); CHECK(ecount2 == 11); - rustsecp256k1_v0_7_0_context_set_illegal_callback(sttc, NULL, NULL); - rustsecp256k1_v0_7_0_context_set_illegal_callback(sign, NULL, NULL); + rustsecp256k1_v0_8_0_context_set_illegal_callback(sttc, NULL, NULL); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx, NULL, NULL); /* obtain a working nonce */ do { random_scalar_order_test(&nonce); - } while(!rustsecp256k1_v0_7_0_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); + } while(!rustsecp256k1_v0_8_0_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); /* try signing */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_sign(&sign->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); /* try verifying */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sigr, &sigs, &pub, &msg)); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sigr, &sigs, &pub, &msg)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sigr, &sigs, &pub, &msg)); /* cleanup */ if (use_prealloc) { - rustsecp256k1_v0_7_0_context_preallocated_destroy(none); - rustsecp256k1_v0_7_0_context_preallocated_destroy(sign); - rustsecp256k1_v0_7_0_context_preallocated_destroy(vrfy); - rustsecp256k1_v0_7_0_context_preallocated_destroy(both); - rustsecp256k1_v0_7_0_context_preallocated_destroy(sttc); - free(none_prealloc); - free(sign_prealloc); - free(vrfy_prealloc); - free(both_prealloc); + rustsecp256k1_v0_8_0_context_preallocated_destroy(ctx); + rustsecp256k1_v0_8_0_context_preallocated_destroy(sttc); + free(ctx_prealloc); free(sttc_prealloc); } else { - rustsecp256k1_v0_7_0_context_destroy(none); - rustsecp256k1_v0_7_0_context_destroy(sign); - rustsecp256k1_v0_7_0_context_destroy(vrfy); - rustsecp256k1_v0_7_0_context_destroy(both); - rustsecp256k1_v0_7_0_context_destroy(sttc); + rustsecp256k1_v0_8_0_context_destroy(ctx); + rustsecp256k1_v0_8_0_context_destroy(sttc); } /* Defined as no-op. */ - rustsecp256k1_v0_7_0_context_destroy(NULL); - rustsecp256k1_v0_7_0_context_preallocated_destroy(NULL); - + rustsecp256k1_v0_8_0_context_destroy(NULL); + rustsecp256k1_v0_8_0_context_preallocated_destroy(NULL); } void run_scratch_tests(void) { @@ -351,83 +335,85 @@ void run_scratch_tests(void) { int32_t ecount = 0; size_t checkpoint; size_t checkpoint_2; - rustsecp256k1_v0_7_0_context *none = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_NONE); - rustsecp256k1_v0_7_0_scratch_space *scratch; - rustsecp256k1_v0_7_0_scratch_space local_scratch; + rustsecp256k1_v0_8_0_scratch_space *scratch; + rustsecp256k1_v0_8_0_scratch_space local_scratch; + + ctx = rustsecp256k1_v0_8_0_context_create(SECP256K1_CONTEXT_NONE); /* Test public API */ - rustsecp256k1_v0_7_0_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); - rustsecp256k1_v0_7_0_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_8_0_context_set_error_callback(ctx, counting_illegal_callback_fn, &ecount); - scratch = rustsecp256k1_v0_7_0_scratch_space_create(none, 1000); + scratch = rustsecp256k1_v0_8_0_scratch_space_create(ctx, 1000); CHECK(scratch != NULL); CHECK(ecount == 0); /* Test internal API */ - CHECK(rustsecp256k1_v0_7_0_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000); - CHECK(rustsecp256k1_v0_7_0_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - (ALIGNMENT - 1)); + CHECK(rustsecp256k1_v0_8_0_scratch_max_allocation(&ctx->error_callback, scratch, 0) == 1000); + CHECK(rustsecp256k1_v0_8_0_scratch_max_allocation(&ctx->error_callback, scratch, 1) == 1000 - (ALIGNMENT - 1)); CHECK(scratch->alloc_size == 0); CHECK(scratch->alloc_size % ALIGNMENT == 0); /* Allocating 500 bytes succeeds */ - checkpoint = rustsecp256k1_v0_7_0_scratch_checkpoint(&none->error_callback, scratch); - CHECK(rustsecp256k1_v0_7_0_scratch_alloc(&none->error_callback, scratch, 500) != NULL); - CHECK(rustsecp256k1_v0_7_0_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc); - CHECK(rustsecp256k1_v0_7_0_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); + checkpoint = rustsecp256k1_v0_8_0_scratch_checkpoint(&ctx->error_callback, scratch); + CHECK(rustsecp256k1_v0_8_0_scratch_alloc(&ctx->error_callback, scratch, 500) != NULL); + CHECK(rustsecp256k1_v0_8_0_scratch_max_allocation(&ctx->error_callback, scratch, 0) == 1000 - adj_alloc); + CHECK(rustsecp256k1_v0_8_0_scratch_max_allocation(&ctx->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); CHECK(scratch->alloc_size != 0); CHECK(scratch->alloc_size % ALIGNMENT == 0); /* Allocating another 501 bytes fails */ - CHECK(rustsecp256k1_v0_7_0_scratch_alloc(&none->error_callback, scratch, 501) == NULL); - CHECK(rustsecp256k1_v0_7_0_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc); - CHECK(rustsecp256k1_v0_7_0_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); + CHECK(rustsecp256k1_v0_8_0_scratch_alloc(&ctx->error_callback, scratch, 501) == NULL); + CHECK(rustsecp256k1_v0_8_0_scratch_max_allocation(&ctx->error_callback, scratch, 0) == 1000 - adj_alloc); + CHECK(rustsecp256k1_v0_8_0_scratch_max_allocation(&ctx->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); CHECK(scratch->alloc_size != 0); CHECK(scratch->alloc_size % ALIGNMENT == 0); /* ...but it succeeds once we apply the checkpoint to undo it */ - rustsecp256k1_v0_7_0_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint); + rustsecp256k1_v0_8_0_scratch_apply_checkpoint(&ctx->error_callback, scratch, checkpoint); CHECK(scratch->alloc_size == 0); - CHECK(rustsecp256k1_v0_7_0_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000); - CHECK(rustsecp256k1_v0_7_0_scratch_alloc(&none->error_callback, scratch, 500) != NULL); + CHECK(rustsecp256k1_v0_8_0_scratch_max_allocation(&ctx->error_callback, scratch, 0) == 1000); + CHECK(rustsecp256k1_v0_8_0_scratch_alloc(&ctx->error_callback, scratch, 500) != NULL); CHECK(scratch->alloc_size != 0); /* try to apply a bad checkpoint */ - checkpoint_2 = rustsecp256k1_v0_7_0_scratch_checkpoint(&none->error_callback, scratch); - rustsecp256k1_v0_7_0_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint); + checkpoint_2 = rustsecp256k1_v0_8_0_scratch_checkpoint(&ctx->error_callback, scratch); + rustsecp256k1_v0_8_0_scratch_apply_checkpoint(&ctx->error_callback, scratch, checkpoint); CHECK(ecount == 0); - rustsecp256k1_v0_7_0_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint_2); /* checkpoint_2 is after checkpoint */ + rustsecp256k1_v0_8_0_scratch_apply_checkpoint(&ctx->error_callback, scratch, checkpoint_2); /* checkpoint_2 is after checkpoint */ CHECK(ecount == 1); - rustsecp256k1_v0_7_0_scratch_apply_checkpoint(&none->error_callback, scratch, (size_t) -1); /* this is just wildly invalid */ + rustsecp256k1_v0_8_0_scratch_apply_checkpoint(&ctx->error_callback, scratch, (size_t) -1); /* this is just wildly invalid */ CHECK(ecount == 2); /* try to use badly initialized scratch space */ - rustsecp256k1_v0_7_0_scratch_space_destroy(none, scratch); + rustsecp256k1_v0_8_0_scratch_space_destroy(ctx, scratch); memset(&local_scratch, 0, sizeof(local_scratch)); scratch = &local_scratch; - CHECK(!rustsecp256k1_v0_7_0_scratch_max_allocation(&none->error_callback, scratch, 0)); + CHECK(!rustsecp256k1_v0_8_0_scratch_max_allocation(&ctx->error_callback, scratch, 0)); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_7_0_scratch_alloc(&none->error_callback, scratch, 500) == NULL); + CHECK(rustsecp256k1_v0_8_0_scratch_alloc(&ctx->error_callback, scratch, 500) == NULL); CHECK(ecount == 4); - rustsecp256k1_v0_7_0_scratch_space_destroy(none, scratch); + rustsecp256k1_v0_8_0_scratch_space_destroy(ctx, scratch); CHECK(ecount == 5); /* Test that large integers do not wrap around in a bad way */ - scratch = rustsecp256k1_v0_7_0_scratch_space_create(none, 1000); + scratch = rustsecp256k1_v0_8_0_scratch_space_create(ctx, 1000); /* Try max allocation with a large number of objects. Only makes sense if * ALIGNMENT is greater than 1 because otherwise the objects take no extra * space. */ - CHECK(ALIGNMENT <= 1 || !rustsecp256k1_v0_7_0_scratch_max_allocation(&none->error_callback, scratch, (SIZE_MAX / (ALIGNMENT - 1)) + 1)); + CHECK(ALIGNMENT <= 1 || !rustsecp256k1_v0_8_0_scratch_max_allocation(&ctx->error_callback, scratch, (SIZE_MAX / (ALIGNMENT - 1)) + 1)); /* Try allocating SIZE_MAX to test wrap around which only happens if * ALIGNMENT > 1, otherwise it returns NULL anyway because the scratch * space is too small. */ - CHECK(rustsecp256k1_v0_7_0_scratch_alloc(&none->error_callback, scratch, SIZE_MAX) == NULL); - rustsecp256k1_v0_7_0_scratch_space_destroy(none, scratch); + CHECK(rustsecp256k1_v0_8_0_scratch_alloc(&ctx->error_callback, scratch, SIZE_MAX) == NULL); + rustsecp256k1_v0_8_0_scratch_space_destroy(ctx, scratch); /* cleanup */ - rustsecp256k1_v0_7_0_scratch_space_destroy(none, NULL); /* no-op */ - rustsecp256k1_v0_7_0_context_destroy(none); + rustsecp256k1_v0_8_0_scratch_space_destroy(ctx, NULL); /* no-op */ + rustsecp256k1_v0_8_0_context_destroy(ctx); } + void run_ctz_tests(void) { static const uint32_t b32[] = {1, 0xffffffff, 0x5e56968f, 0xe0d63129}; static const uint64_t b64[] = {1, 0xffffffffffffffff, 0xbcd02462139b3fc3, 0x98b5f80c769693ef}; @@ -435,28 +421,32 @@ void run_ctz_tests(void) { unsigned i; for (i = 0; i < sizeof(b32) / sizeof(b32[0]); ++i) { for (shift = 0; shift < 32; ++shift) { - CHECK(rustsecp256k1_v0_7_0_ctz32_var_debruijn(b32[i] << shift) == shift); - CHECK(rustsecp256k1_v0_7_0_ctz32_var(b32[i] << shift) == shift); + CHECK(rustsecp256k1_v0_8_0_ctz32_var_debruijn(b32[i] << shift) == shift); + CHECK(rustsecp256k1_v0_8_0_ctz32_var(b32[i] << shift) == shift); } } for (i = 0; i < sizeof(b64) / sizeof(b64[0]); ++i) { for (shift = 0; shift < 64; ++shift) { - CHECK(rustsecp256k1_v0_7_0_ctz64_var_debruijn(b64[i] << shift) == shift); - CHECK(rustsecp256k1_v0_7_0_ctz64_var(b64[i] << shift) == shift); + CHECK(rustsecp256k1_v0_8_0_ctz64_var_debruijn(b64[i] << shift) == shift); + CHECK(rustsecp256k1_v0_8_0_ctz64_var(b64[i] << shift) == shift); } } } /***** HASH TESTS *****/ -void run_sha256_tests(void) { - static const char *inputs[8] = { +void run_sha256_known_output_tests(void) { + static const char *inputs[] = { "", "abc", "message digest", "secure hash algorithm", "SHA256 is considered to be safe", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", "For this sample, this 63-byte string will be used as input data", - "This is exactly 64 bytes long, not counting the terminating byte" + "This is exactly 64 bytes long, not counting the terminating byte", + "aaaaa", }; - static const unsigned char outputs[8][32] = { + static const unsigned int repeat[] = { + 1, 1, 1, 1, 1, 1, 1, 1, 1000000/5 + }; + static const unsigned char outputs[][32] = { {0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}, {0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23, 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad}, {0xf7, 0x84, 0x6f, 0x55, 0xcf, 0x23, 0xe1, 0x4e, 0xeb, 0xea, 0xb5, 0xb4, 0xe1, 0x55, 0x0c, 0xad, 0x5b, 0x50, 0x9e, 0x33, 0x48, 0xfb, 0xc4, 0xef, 0xa3, 0xa1, 0x41, 0x3d, 0x39, 0x3c, 0xb6, 0x50}, @@ -464,27 +454,146 @@ void run_sha256_tests(void) { {0x68, 0x19, 0xd9, 0x15, 0xc7, 0x3f, 0x4d, 0x1e, 0x77, 0xe4, 0xe1, 0xb5, 0x2d, 0x1f, 0xa0, 0xf9, 0xcf, 0x9b, 0xea, 0xea, 0xd3, 0x93, 0x9f, 0x15, 0x87, 0x4b, 0xd9, 0x88, 0xe2, 0xa2, 0x36, 0x30}, {0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8, 0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39, 0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67, 0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1}, {0xf0, 0x8a, 0x78, 0xcb, 0xba, 0xee, 0x08, 0x2b, 0x05, 0x2a, 0xe0, 0x70, 0x8f, 0x32, 0xfa, 0x1e, 0x50, 0xc5, 0xc4, 0x21, 0xaa, 0x77, 0x2b, 0xa5, 0xdb, 0xb4, 0x06, 0xa2, 0xea, 0x6b, 0xe3, 0x42}, - {0xab, 0x64, 0xef, 0xf7, 0xe8, 0x8e, 0x2e, 0x46, 0x16, 0x5e, 0x29, 0xf2, 0xbc, 0xe4, 0x18, 0x26, 0xbd, 0x4c, 0x7b, 0x35, 0x52, 0xf6, 0xb3, 0x82, 0xa9, 0xe7, 0xd3, 0xaf, 0x47, 0xc2, 0x45, 0xf8} + {0xab, 0x64, 0xef, 0xf7, 0xe8, 0x8e, 0x2e, 0x46, 0x16, 0x5e, 0x29, 0xf2, 0xbc, 0xe4, 0x18, 0x26, 0xbd, 0x4c, 0x7b, 0x35, 0x52, 0xf6, 0xb3, 0x82, 0xa9, 0xe7, 0xd3, 0xaf, 0x47, 0xc2, 0x45, 0xf8}, + {0xcd, 0xc7, 0x6e, 0x5c, 0x99, 0x14, 0xfb, 0x92, 0x81, 0xa1, 0xc7, 0xe2, 0x84, 0xd7, 0x3e, 0x67, 0xf1, 0x80, 0x9a, 0x48, 0xa4, 0x97, 0x20, 0x0e, 0x04, 0x6d, 0x39, 0xcc, 0xc7, 0x11, 0x2c, 0xd0}, }; - int i; - for (i = 0; i < 8; i++) { + unsigned int i, ninputs; + + /* Skip last input vector for low iteration counts */ + ninputs = sizeof(inputs)/sizeof(inputs[0]) - 1; + CONDITIONAL_TEST(16, "run_sha256_known_output_tests 1000000") ninputs++; + + for (i = 0; i < ninputs; i++) { unsigned char out[32]; - rustsecp256k1_v0_7_0_sha256 hasher; - rustsecp256k1_v0_7_0_sha256_initialize(&hasher); - rustsecp256k1_v0_7_0_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); - rustsecp256k1_v0_7_0_sha256_finalize(&hasher, out); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(out, outputs[i], 32) == 0); + rustsecp256k1_v0_8_0_sha256 hasher; + unsigned int j; + /* 1. Run: simply write the input bytestrings */ + j = repeat[i]; + rustsecp256k1_v0_8_0_sha256_initialize(&hasher); + while (j > 0) { + rustsecp256k1_v0_8_0_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); + j--; + } + rustsecp256k1_v0_8_0_sha256_finalize(&hasher, out); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(out, outputs[i], 32) == 0); + /* 2. Run: split the input bytestrings randomly before writing */ if (strlen(inputs[i]) > 0) { - int split = rustsecp256k1_v0_7_0_testrand_int(strlen(inputs[i])); - rustsecp256k1_v0_7_0_sha256_initialize(&hasher); - rustsecp256k1_v0_7_0_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); - rustsecp256k1_v0_7_0_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); - rustsecp256k1_v0_7_0_sha256_finalize(&hasher, out); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(out, outputs[i], 32) == 0); + int split = rustsecp256k1_v0_8_0_testrand_int(strlen(inputs[i])); + rustsecp256k1_v0_8_0_sha256_initialize(&hasher); + j = repeat[i]; + while (j > 0) { + rustsecp256k1_v0_8_0_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); + rustsecp256k1_v0_8_0_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); + j--; + } + rustsecp256k1_v0_8_0_sha256_finalize(&hasher, out); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(out, outputs[i], 32) == 0); } } } +/** SHA256 counter tests + +The tests verify that the SHA256 counter doesn't wrap around at message length +2^i bytes for i = 20, ..., 33. This wide range aims at being independent of the +implementation of the counter and it catches multiple natural 32-bit overflows +(e.g., counting bits, counting bytes, counting blocks, ...). + +The test vectors have been generated using following Python script which relies +on https://github.com/cloudtools/sha256/ (v0.3 on Python v3.10.2). + +``` +from sha256 import sha256 +from copy import copy + +def midstate_c_definition(hasher): + ret = ' {{0x' + hasher.state[0].hex('_', 4).replace('_', ', 0x') + '},\n' + ret += ' {0x00}, ' + str(hex(hasher.state[1])) + '}' + return ret + +def output_c_literal(hasher): + return '{0x' + hasher.digest().hex('_').replace('_', ', 0x') + '}' + +MESSAGE = b'abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmno' +assert(len(MESSAGE) == 64) +BYTE_BOUNDARIES = [(2**b)//len(MESSAGE) - 1 for b in range(20, 34)] + +midstates = [] +digests = [] +hasher = sha256() +for i in range(BYTE_BOUNDARIES[-1] + 1): + if i in BYTE_BOUNDARIES: + midstates.append(midstate_c_definition(hasher)) + hasher_copy = copy(hasher) + hasher_copy.update(MESSAGE) + digests.append(output_c_literal(hasher_copy)) + hasher.update(MESSAGE) + +for x in midstates: + print(x + ',') + +for x in digests: + print(x + ',') +``` +*/ +void run_sha256_counter_tests(void) { + static const char *input = "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmno"; + static const rustsecp256k1_v0_8_0_sha256 midstates[] = { + {{0xa2b5c8bb, 0x26c88bb3, 0x2abdc3d2, 0x9def99a3, 0xdfd21a6e, 0x41fe585b, 0x7ef2c440, 0x2b79adda}, + {0x00}, 0xfffc0}, + {{0xa0d29445, 0x9287de66, 0x76aabd71, 0x41acd765, 0x0c7528b4, 0x84e14906, 0x942faec6, 0xcc5a7b26}, + {0x00}, 0x1fffc0}, + {{0x50449526, 0xb9f1d657, 0xa0fc13e9, 0x50860f10, 0xa550c431, 0x3fbc97c1, 0x7bbb2d89, 0xdb67bac1}, + {0x00}, 0x3fffc0}, + {{0x54a6efdc, 0x46762e7b, 0x88bfe73f, 0xbbd149c7, 0x41620c43, 0x1168da7b, 0x2c5960f9, 0xeccffda6}, + {0x00}, 0x7fffc0}, + {{0x2515a8f5, 0x5faa2977, 0x3a850486, 0xac858cad, 0x7b7276ee, 0x235c0385, 0xc53a157c, 0x7cb3e69c}, + {0x00}, 0xffffc0}, + {{0x34f39828, 0x409fedb7, 0x4bbdd0fb, 0x3b643634, 0x7806bf2e, 0xe0d1b713, 0xca3f2e1e, 0xe38722c2}, + {0x00}, 0x1ffffc0}, + {{0x389ef5c5, 0x38c54167, 0x8f5d56ab, 0x582a75cc, 0x8217caef, 0xf10947dd, 0x6a1998a8, 0x048f0b8c}, + {0x00}, 0x3ffffc0}, + {{0xd6c3f394, 0x0bee43b9, 0x6783f497, 0x29fa9e21, 0x6ce491c1, 0xa81fe45e, 0x2fc3859a, 0x269012d0}, + {0x00}, 0x7ffffc0}, + {{0x6dd3c526, 0x44d88aa0, 0x806a1bae, 0xfbcc0d32, 0x9d6144f3, 0x9d2bd757, 0x9851a957, 0xb50430ad}, + {0x00}, 0xfffffc0}, + {{0x2add4021, 0xdfe8a9e6, 0xa56317c6, 0x7a15f5bb, 0x4a48aacd, 0x5d368414, 0x4f00e6f0, 0xd9355023}, + {0x00}, 0x1fffffc0}, + {{0xb66666b4, 0xdbeac32b, 0x0ea351ae, 0xcba9da46, 0x6278b874, 0x8c508e23, 0xe16ca776, 0x8465bac1}, + {0x00}, 0x3fffffc0}, + {{0xb6744789, 0x9cce87aa, 0xc4c478b7, 0xf38404d8, 0x2e38ba62, 0xa3f7019b, 0x50458fe7, 0x3047dbec}, + {0x00}, 0x7fffffc0}, + {{0x8b1297ba, 0xba261a80, 0x2ba1b0dd, 0xfbc67d6d, 0x61072c4e, 0x4b5a2a0f, 0x52872760, 0x2dfeb162}, + {0x00}, 0xffffffc0}, + {{0x24f33cf7, 0x41ad6583, 0x41c8ff5d, 0xca7ef35f, 0x50395756, 0x021b743e, 0xd7126cd7, 0xd037473a}, + {0x00}, 0x1ffffffc0}, + }; + static const unsigned char outputs[][32] = { + {0x0e, 0x83, 0xe2, 0xc9, 0x4f, 0xb2, 0xb8, 0x2b, 0x89, 0x06, 0x92, 0x78, 0x04, 0x03, 0x48, 0x5c, 0x48, 0x44, 0x67, 0x61, 0x77, 0xa4, 0xc7, 0x90, 0x9e, 0x92, 0x55, 0x10, 0x05, 0xfe, 0x39, 0x15}, + {0x1d, 0x1e, 0xd7, 0xb8, 0xa3, 0xa7, 0x8a, 0x79, 0xfd, 0xa0, 0x05, 0x08, 0x9c, 0xeb, 0xf0, 0xec, 0x67, 0x07, 0x9f, 0x8e, 0x3c, 0x0d, 0x8e, 0xf9, 0x75, 0x55, 0x13, 0xc1, 0xe8, 0x77, 0xf8, 0xbb}, + {0x66, 0x95, 0x6c, 0xc9, 0xe0, 0x39, 0x65, 0xb6, 0xb0, 0x05, 0xd1, 0xaf, 0xaf, 0xf3, 0x1d, 0xb9, 0xa4, 0xda, 0x6f, 0x20, 0xcd, 0x3a, 0xae, 0x64, 0xc2, 0xdb, 0xee, 0xf5, 0xb8, 0x8d, 0x57, 0x0e}, + {0x3c, 0xbb, 0x1c, 0x12, 0x5e, 0x17, 0xfd, 0x54, 0x90, 0x45, 0xa7, 0x7b, 0x61, 0x6c, 0x1d, 0xfe, 0xe6, 0xcc, 0x7f, 0xee, 0xcf, 0xef, 0x33, 0x35, 0x50, 0x62, 0x16, 0x70, 0x2f, 0x87, 0xc3, 0xc9}, + {0x53, 0x4d, 0xa8, 0xe7, 0x1e, 0x98, 0x73, 0x8d, 0xd9, 0xa3, 0x54, 0xa5, 0x0e, 0x59, 0x2c, 0x25, 0x43, 0x6f, 0xaa, 0xa2, 0xf5, 0x21, 0x06, 0x3e, 0xc9, 0x82, 0x06, 0x94, 0x98, 0x72, 0x9d, 0xa7}, + {0xef, 0x7e, 0xe9, 0x6b, 0xd3, 0xe5, 0xb7, 0x41, 0x4c, 0xc8, 0xd3, 0x07, 0x52, 0x9a, 0x5a, 0x8b, 0x4e, 0x1e, 0x75, 0xa4, 0x17, 0x78, 0xc8, 0x36, 0xcd, 0xf8, 0x2e, 0xd9, 0x57, 0xe3, 0xd7, 0x07}, + {0x87, 0x16, 0xfb, 0xf9, 0xa5, 0xf8, 0xc4, 0x56, 0x2b, 0x48, 0x52, 0x8e, 0x2d, 0x30, 0x85, 0xb6, 0x4c, 0x56, 0xb5, 0xd1, 0x16, 0x9c, 0xcf, 0x32, 0x95, 0xad, 0x03, 0xe8, 0x05, 0x58, 0x06, 0x76}, + {0x75, 0x03, 0x80, 0x28, 0xf2, 0xa7, 0x63, 0x22, 0x1a, 0x26, 0x9c, 0x68, 0xe0, 0x58, 0xfc, 0x73, 0xeb, 0x42, 0xf6, 0x86, 0x16, 0x24, 0x4b, 0xbc, 0x24, 0xf7, 0x02, 0xc8, 0x3d, 0x90, 0xe2, 0xb0}, + {0xdf, 0x49, 0x0f, 0x15, 0x7b, 0x7d, 0xbf, 0xe0, 0xd4, 0xcf, 0x47, 0xc0, 0x80, 0x93, 0x4a, 0x61, 0xaa, 0x03, 0x07, 0x66, 0xb3, 0x38, 0x5d, 0xc8, 0xc9, 0x07, 0x61, 0xfb, 0x97, 0x10, 0x2f, 0xd8}, + {0x77, 0x19, 0x40, 0x56, 0x41, 0xad, 0xbc, 0x59, 0xda, 0x1e, 0xc5, 0x37, 0x14, 0x63, 0x7b, 0xfb, 0x79, 0xe2, 0x7a, 0xb1, 0x55, 0x42, 0x99, 0x42, 0x56, 0xfe, 0x26, 0x9d, 0x0f, 0x7e, 0x80, 0xc6}, + {0x50, 0xe7, 0x2a, 0x0e, 0x26, 0x44, 0x2f, 0xe2, 0x55, 0x2d, 0xc3, 0x93, 0x8a, 0xc5, 0x86, 0x58, 0x22, 0x8c, 0x0c, 0xbf, 0xb1, 0xd2, 0xca, 0x87, 0x2a, 0xe4, 0x35, 0x26, 0x6f, 0xcd, 0x05, 0x5e}, + {0xe4, 0x80, 0x6f, 0xdb, 0x3d, 0x7d, 0xba, 0xde, 0x50, 0x3f, 0xea, 0x00, 0x3d, 0x46, 0x59, 0x64, 0xfd, 0x58, 0x1c, 0xa1, 0xb8, 0x7d, 0x5f, 0xac, 0x94, 0x37, 0x9e, 0xa0, 0xc0, 0x9c, 0x93, 0x8b}, + {0x2c, 0xf3, 0xa9, 0xf6, 0x15, 0x25, 0x80, 0x70, 0x76, 0x99, 0x7d, 0xf1, 0xc3, 0x2f, 0xa3, 0x31, 0xff, 0x92, 0x35, 0x2e, 0x8d, 0x04, 0x13, 0x33, 0xd8, 0x0d, 0xdb, 0x4a, 0xf6, 0x8c, 0x03, 0x34}, + {0xec, 0x12, 0x24, 0x9f, 0x35, 0xa4, 0x29, 0x8b, 0x9e, 0x4a, 0x95, 0xf8, 0x61, 0xaf, 0x61, 0xc5, 0x66, 0x55, 0x3e, 0x3f, 0x2a, 0x98, 0xea, 0x71, 0x16, 0x6b, 0x1c, 0xd9, 0xe4, 0x09, 0xd2, 0x8e}, + }; + unsigned int i; + for (i = 0; i < sizeof(midstates)/sizeof(midstates[0]); i++) { + unsigned char out[32]; + rustsecp256k1_v0_8_0_sha256 hasher = midstates[i]; + rustsecp256k1_v0_8_0_sha256_write(&hasher, (const unsigned char*)input, strlen(input)); + rustsecp256k1_v0_8_0_sha256_finalize(&hasher, out); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(out, outputs[i], 32) == 0); + } +} + void run_hmac_sha256_tests(void) { static const char *keys[6] = { "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", @@ -512,19 +621,19 @@ void run_hmac_sha256_tests(void) { }; int i; for (i = 0; i < 6; i++) { - rustsecp256k1_v0_7_0_hmac_sha256 hasher; + rustsecp256k1_v0_8_0_hmac_sha256 hasher; unsigned char out[32]; - rustsecp256k1_v0_7_0_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); - rustsecp256k1_v0_7_0_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); - rustsecp256k1_v0_7_0_hmac_sha256_finalize(&hasher, out); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(out, outputs[i], 32) == 0); + rustsecp256k1_v0_8_0_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); + rustsecp256k1_v0_8_0_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); + rustsecp256k1_v0_8_0_hmac_sha256_finalize(&hasher, out); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(out, outputs[i], 32) == 0); if (strlen(inputs[i]) > 0) { - int split = rustsecp256k1_v0_7_0_testrand_int(strlen(inputs[i])); - rustsecp256k1_v0_7_0_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); - rustsecp256k1_v0_7_0_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); - rustsecp256k1_v0_7_0_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); - rustsecp256k1_v0_7_0_hmac_sha256_finalize(&hasher, out); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(out, outputs[i], 32) == 0); + int split = rustsecp256k1_v0_8_0_testrand_int(strlen(inputs[i])); + rustsecp256k1_v0_8_0_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); + rustsecp256k1_v0_8_0_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); + rustsecp256k1_v0_8_0_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); + rustsecp256k1_v0_8_0_hmac_sha256_finalize(&hasher, out); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(out, outputs[i], 32) == 0); } } } @@ -544,35 +653,34 @@ void run_rfc6979_hmac_sha256_tests(void) { {0x75, 0x97, 0x88, 0x7c, 0xbd, 0x76, 0x32, 0x1f, 0x32, 0xe3, 0x04, 0x40, 0x67, 0x9a, 0x22, 0xcf, 0x7f, 0x8d, 0x9d, 0x2e, 0xac, 0x39, 0x0e, 0x58, 0x1f, 0xea, 0x09, 0x1c, 0xe2, 0x02, 0xba, 0x94} }; - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256 rng; + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256 rng; unsigned char out[32]; int i; - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_initialize(&rng, key1, 64); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_initialize(&rng, key1, 64); for (i = 0; i < 3; i++) { - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_generate(&rng, out, 32); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(out, out1[i], 32) == 0); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_generate(&rng, out, 32); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(out, out1[i], 32) == 0); } - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_finalize(&rng); - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_initialize(&rng, key1, 65); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_initialize(&rng, key1, 65); for (i = 0; i < 3; i++) { - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_generate(&rng, out, 32); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(out, out1[i], 32) != 0); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_generate(&rng, out, 32); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(out, out1[i], 32) != 0); } - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_finalize(&rng); - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_initialize(&rng, key2, 64); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_initialize(&rng, key2, 64); for (i = 0; i < 3; i++) { - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_generate(&rng, out, 32); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(out, out2[i], 32) == 0); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_generate(&rng, out, 32); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(out, out2[i], 32) == 0); } - rustsecp256k1_v0_7_0_rfc6979_hmac_sha256_finalize(&rng); + rustsecp256k1_v0_8_0_rfc6979_hmac_sha256_finalize(&rng); } void run_tagged_sha256_tests(void) { int ecount = 0; - rustsecp256k1_v0_7_0_context *none = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_NONE); unsigned char tag[32] = { 0 }; unsigned char msg[32] = { 0 }; unsigned char hash32[32]; @@ -583,23 +691,22 @@ void run_tagged_sha256_tests(void) { 0xE2, 0x76, 0x55, 0x9A, 0x3B, 0xDE, 0x55, 0xB3 }; - rustsecp256k1_v0_7_0_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); /* API test */ - CHECK(rustsecp256k1_v0_7_0_tagged_sha256(none, hash32, tag, sizeof(tag), msg, sizeof(msg)) == 1); - CHECK(rustsecp256k1_v0_7_0_tagged_sha256(none, NULL, tag, sizeof(tag), msg, sizeof(msg)) == 0); + CHECK(rustsecp256k1_v0_8_0_tagged_sha256(ctx, hash32, tag, sizeof(tag), msg, sizeof(msg)) == 1); + CHECK(rustsecp256k1_v0_8_0_tagged_sha256(ctx, NULL, tag, sizeof(tag), msg, sizeof(msg)) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_tagged_sha256(none, hash32, NULL, 0, msg, sizeof(msg)) == 0); + CHECK(rustsecp256k1_v0_8_0_tagged_sha256(ctx, hash32, NULL, 0, msg, sizeof(msg)) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_tagged_sha256(none, hash32, tag, sizeof(tag), NULL, 0) == 0); + CHECK(rustsecp256k1_v0_8_0_tagged_sha256(ctx, hash32, tag, sizeof(tag), NULL, 0) == 0); CHECK(ecount == 3); /* Static test vector */ memcpy(tag, "tag", 3); memcpy(msg, "msg", 3); - CHECK(rustsecp256k1_v0_7_0_tagged_sha256(none, hash32, tag, 3, msg, 3) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(hash32, hash_expected, sizeof(hash32)) == 0); - rustsecp256k1_v0_7_0_context_destroy(none); + CHECK(rustsecp256k1_v0_8_0_tagged_sha256(ctx, hash32, tag, 3, msg, 3) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(hash32, hash_expected, sizeof(hash32)) == 0); } /***** RANDOM TESTS *****/ @@ -621,7 +728,7 @@ void test_rand_bits(int rand32, int bits) { /* Multiply the output of all rand calls with the odd number m, which should not change the uniformity of its distribution. */ for (i = 0; i < rounds[usebits]; i++) { - uint32_t r = (rand32 ? rustsecp256k1_v0_7_0_testrand32() : rustsecp256k1_v0_7_0_testrand_bits(bits)); + uint32_t r = (rand32 ? rustsecp256k1_v0_8_0_testrand32() : rustsecp256k1_v0_8_0_testrand_bits(bits)); CHECK((((uint64_t)r) >> bits) == 0); for (m = 0; m < sizeof(mults) / sizeof(mults[0]); m++) { uint32_t rm = r * mults[m]; @@ -646,7 +753,7 @@ void test_rand_int(uint32_t range, uint32_t subrange) { uint64_t x = 0; CHECK((range % subrange) == 0); for (i = 0; i < rounds; i++) { - uint32_t r = rustsecp256k1_v0_7_0_testrand_int(range); + uint32_t r = rustsecp256k1_v0_8_0_testrand_int(range); CHECK(r < range); r = r % subrange; x |= (((uint64_t)1) << r); @@ -689,7 +796,8 @@ uint64_t modinv2p64(uint64_t x) { return w; } -/* compute out = (a*b) mod m; if b=NULL, treat b=1. + +/* compute out = (a*b) mod m; if b=NULL, treat b=1; if m=NULL, treat m=infinity. * * Out is a 512-bit number (represented as 32 uint16_t's in LE order). The other * arguments are 256-bit numbers (represented as 16 uint16_t's in LE order). */ @@ -731,51 +839,53 @@ void mulmod256(uint16_t* out, const uint16_t* a, const uint16_t* b, const uint16 } } - /* Compute the highest set bit in m. */ - for (i = 255; i >= 0; --i) { - if ((m[i >> 4] >> (i & 15)) & 1) { - m_bitlen = i; - break; + if (m) { + /* Compute the highest set bit in m. */ + for (i = 255; i >= 0; --i) { + if ((m[i >> 4] >> (i & 15)) & 1) { + m_bitlen = i; + break; + } } - } - /* Try do mul -= m<= 0; --i) { - uint16_t mul2[32]; - int64_t cs; - - /* Compute mul2 = mul - m<= 0 && bitpos < 256) { - sub |= ((m[bitpos >> 4] >> (bitpos & 15)) & 1) << p; + /* Try do mul -= m<= 0; --i) { + uint16_t mul2[32]; + int64_t cs; + + /* Compute mul2 = mul - m<= 0 && bitpos < 256) { + sub |= ((m[bitpos >> 4] >> (bitpos & 15)) & 1) << p; + } } + /* Add mul[j]-sub to accumulator, and shift bottom 16 bits out to mul2[j]. */ + cs += mul[j]; + cs -= sub; + mul2[j] = (cs & 0xFFFF); + cs >>= 16; + } + /* If remainder of subtraction is 0, set mul = mul2. */ + if (cs == 0) { + memcpy(mul, mul2, sizeof(mul)); } - /* Add mul[j]-sub to accumulator, and shift bottom 16 bits out to mul2[j]. */ - cs += mul[j]; - cs -= sub; - mul2[j] = (cs & 0xFFFF); - cs >>= 16; } - /* If remainder of subtraction is 0, set mul = mul2. */ - if (cs == 0) { - memcpy(mul, mul2, sizeof(mul)); + /* Sanity check: test that all limbs higher than m's highest are zero */ + for (i = (m_bitlen >> 4) + 1; i < 32; ++i) { + CHECK(mul[i] == 0); } } - /* Sanity check: test that all limbs higher than m's highest are zero */ - for (i = (m_bitlen >> 4) + 1; i < 32; ++i) { - CHECK(mul[i] == 0); - } memcpy(out, mul, 32); } /* Convert a 256-bit number represented as 16 uint16_t's to signed30 notation. */ -void uint16_to_signed30(rustsecp256k1_v0_7_0_modinv32_signed30* out, const uint16_t* in) { +void uint16_to_signed30(rustsecp256k1_v0_8_0_modinv32_signed30* out, const uint16_t* in) { int i; memset(out->v, 0, sizeof(out->v)); for (i = 0; i < 256; ++i) { @@ -784,7 +894,7 @@ void uint16_to_signed30(rustsecp256k1_v0_7_0_modinv32_signed30* out, const uint1 } /* Convert a 256-bit number in signed30 notation to a representation as 16 uint16_t's. */ -void signed30_to_uint16(uint16_t* out, const rustsecp256k1_v0_7_0_modinv32_signed30* in) { +void signed30_to_uint16(uint16_t* out, const rustsecp256k1_v0_8_0_modinv32_signed30* in) { int i; memset(out, 0, 32); for (i = 0; i < 256; ++i) { @@ -793,10 +903,10 @@ void signed30_to_uint16(uint16_t* out, const rustsecp256k1_v0_7_0_modinv32_signe } /* Randomly mutate the sign of limbs in signed30 representation, without changing the value. */ -void mutate_sign_signed30(rustsecp256k1_v0_7_0_modinv32_signed30* x) { +void mutate_sign_signed30(rustsecp256k1_v0_8_0_modinv32_signed30* x) { int i; for (i = 0; i < 16; ++i) { - int pos = rustsecp256k1_v0_7_0_testrand_bits(3); + int pos = rustsecp256k1_v0_8_0_testrand_bits(3); if (x->v[pos] > 0 && x->v[pos + 1] <= 0x3fffffff) { x->v[pos] -= 0x40000000; x->v[pos + 1] += 1; @@ -807,11 +917,11 @@ void mutate_sign_signed30(rustsecp256k1_v0_7_0_modinv32_signed30* x) { } } -/* Test rustsecp256k1_v0_7_0_modinv32{_var}, using inputs in 16-bit limb format, and returning inverse. */ +/* Test rustsecp256k1_v0_8_0_modinv32{_var}, using inputs in 16-bit limb format, and returning inverse. */ void test_modinv32_uint16(uint16_t* out, const uint16_t* in, const uint16_t* mod) { uint16_t tmp[16]; - rustsecp256k1_v0_7_0_modinv32_signed30 x; - rustsecp256k1_v0_7_0_modinv32_modinfo m; + rustsecp256k1_v0_8_0_modinv32_signed30 x; + rustsecp256k1_v0_8_0_modinv32_modinfo m; int i, vartime, nonzero; uint16_to_signed30(&x, in); @@ -825,7 +935,7 @@ void test_modinv32_uint16(uint16_t* out, const uint16_t* in, const uint16_t* mod for (vartime = 0; vartime < 2; ++vartime) { /* compute inverse */ - (vartime ? rustsecp256k1_v0_7_0_modinv32_var : rustsecp256k1_v0_7_0_modinv32)(&x, &m); + (vartime ? rustsecp256k1_v0_8_0_modinv32_var : rustsecp256k1_v0_8_0_modinv32)(&x, &m); /* produce output */ signed30_to_uint16(out, &x); @@ -836,7 +946,7 @@ void test_modinv32_uint16(uint16_t* out, const uint16_t* in, const uint16_t* mod for (i = 1; i < 16; ++i) CHECK(tmp[i] == 0); /* invert again */ - (vartime ? rustsecp256k1_v0_7_0_modinv32_var : rustsecp256k1_v0_7_0_modinv32)(&x, &m); + (vartime ? rustsecp256k1_v0_8_0_modinv32_var : rustsecp256k1_v0_8_0_modinv32)(&x, &m); /* check if the result is equal to the input */ signed30_to_uint16(tmp, &x); @@ -846,7 +956,7 @@ void test_modinv32_uint16(uint16_t* out, const uint16_t* in, const uint16_t* mod #ifdef SECP256K1_WIDEMUL_INT128 /* Convert a 256-bit number represented as 16 uint16_t's to signed62 notation. */ -void uint16_to_signed62(rustsecp256k1_v0_7_0_modinv64_signed62* out, const uint16_t* in) { +void uint16_to_signed62(rustsecp256k1_v0_8_0_modinv64_signed62* out, const uint16_t* in) { int i; memset(out->v, 0, sizeof(out->v)); for (i = 0; i < 256; ++i) { @@ -855,7 +965,7 @@ void uint16_to_signed62(rustsecp256k1_v0_7_0_modinv64_signed62* out, const uint1 } /* Convert a 256-bit number in signed62 notation to a representation as 16 uint16_t's. */ -void signed62_to_uint16(uint16_t* out, const rustsecp256k1_v0_7_0_modinv64_signed62* in) { +void signed62_to_uint16(uint16_t* out, const rustsecp256k1_v0_8_0_modinv64_signed62* in) { int i; memset(out, 0, 32); for (i = 0; i < 256; ++i) { @@ -864,11 +974,11 @@ void signed62_to_uint16(uint16_t* out, const rustsecp256k1_v0_7_0_modinv64_signe } /* Randomly mutate the sign of limbs in signed62 representation, without changing the value. */ -void mutate_sign_signed62(rustsecp256k1_v0_7_0_modinv64_signed62* x) { +void mutate_sign_signed62(rustsecp256k1_v0_8_0_modinv64_signed62* x) { static const int64_t M62 = (int64_t)(UINT64_MAX >> 2); int i; for (i = 0; i < 8; ++i) { - int pos = rustsecp256k1_v0_7_0_testrand_bits(2); + int pos = rustsecp256k1_v0_8_0_testrand_bits(2); if (x->v[pos] > 0 && x->v[pos + 1] <= M62) { x->v[pos] -= (M62 + 1); x->v[pos + 1] += 1; @@ -879,12 +989,12 @@ void mutate_sign_signed62(rustsecp256k1_v0_7_0_modinv64_signed62* x) { } } -/* Test rustsecp256k1_v0_7_0_modinv64{_var}, using inputs in 16-bit limb format, and returning inverse. */ +/* Test rustsecp256k1_v0_8_0_modinv64{_var}, using inputs in 16-bit limb format, and returning inverse. */ void test_modinv64_uint16(uint16_t* out, const uint16_t* in, const uint16_t* mod) { static const int64_t M62 = (int64_t)(UINT64_MAX >> 2); uint16_t tmp[16]; - rustsecp256k1_v0_7_0_modinv64_signed62 x; - rustsecp256k1_v0_7_0_modinv64_modinfo m; + rustsecp256k1_v0_8_0_modinv64_signed62 x; + rustsecp256k1_v0_8_0_modinv64_modinfo m; int i, vartime, nonzero; uint16_to_signed62(&x, in); @@ -898,7 +1008,7 @@ void test_modinv64_uint16(uint16_t* out, const uint16_t* in, const uint16_t* mod for (vartime = 0; vartime < 2; ++vartime) { /* compute inverse */ - (vartime ? rustsecp256k1_v0_7_0_modinv64_var : rustsecp256k1_v0_7_0_modinv64)(&x, &m); + (vartime ? rustsecp256k1_v0_8_0_modinv64_var : rustsecp256k1_v0_8_0_modinv64)(&x, &m); /* produce output */ signed62_to_uint16(out, &x); @@ -909,7 +1019,7 @@ void test_modinv64_uint16(uint16_t* out, const uint16_t* in, const uint16_t* mod for (i = 1; i < 16; ++i) CHECK(tmp[i] == 0); /* invert again */ - (vartime ? rustsecp256k1_v0_7_0_modinv64_var : rustsecp256k1_v0_7_0_modinv64)(&x, &m); + (vartime ? rustsecp256k1_v0_8_0_modinv64_var : rustsecp256k1_v0_8_0_modinv64)(&x, &m); /* check if the result is equal to the input */ signed62_to_uint16(tmp, &x); @@ -1561,8 +1671,8 @@ void run_modinv_tests(void) { /* generate random xd and md, so that md is odd, md>1, xd>= 16; + } +} + +/* Negate a 256-bit number (represented as 16 uint16_t's in LE order) mod 2^256. */ +void neg256(uint16_t* out, const uint16_t* a) { + int i; + uint32_t carry = 1; + for (i = 0; i < 16; ++i) { + carry += (uint16_t)~a[i]; + out[i] = carry; + carry >>= 16; + } +} + +/* Right-shift a 256-bit number (represented as 16 uint16_t's in LE order). */ +void rshift256(uint16_t* out, const uint16_t* a, int n, int sign_extend) { + uint16_t sign = sign_extend && (a[15] >> 15); + int i, j; + for (i = 15; i >= 0; --i) { + uint16_t v = 0; + for (j = 0; j < 16; ++j) { + int frompos = i*16 + j + n; + if (frompos >= 256) { + v |= sign << j; + } else { + v |= ((uint16_t)((a[frompos >> 4] >> (frompos & 15)) & 1)) << j; + } + } + out[i] = v; + } +} + +/* Load a 64-bit unsigned integer into an array of 16 uint16_t's in LE order representing a 256-bit value. */ +void load256u64(uint16_t* out, uint64_t v, int is_signed) { + int i; + uint64_t sign = is_signed && (v >> 63) ? UINT64_MAX : 0; + for (i = 0; i < 4; ++i) { + out[i] = v >> (16 * i); + } + for (i = 4; i < 16; ++i) { + out[i] = sign; + } +} + +/* Load a 128-bit unsigned integer into an array of 16 uint16_t's in LE order representing a 256-bit value. */ +void load256two64(uint16_t* out, uint64_t hi, uint64_t lo, int is_signed) { + int i; + uint64_t sign = is_signed && (hi >> 63) ? UINT64_MAX : 0; + for (i = 0; i < 4; ++i) { + out[i] = lo >> (16 * i); + } + for (i = 4; i < 8; ++i) { + out[i] = hi >> (16 * (i - 4)); + } + for (i = 8; i < 16; ++i) { + out[i] = sign; + } +} + +/* Check whether the 256-bit value represented by array of 16-bit values is in range -2^127 < v < 2^127. */ +int int256is127(const uint16_t* v) { + int all_0 = ((v[7] & 0x8000) == 0), all_1 = ((v[7] & 0x8000) == 0x8000); + int i; + for (i = 8; i < 16; ++i) { + if (v[i] != 0) all_0 = 0; + if (v[i] != 0xffff) all_1 = 0; + } + return all_0 || all_1; +} + +void load256u128(uint16_t* out, const rustsecp256k1_v0_8_0_uint128* v) { + uint64_t lo = rustsecp256k1_v0_8_0_u128_to_u64(v), hi = rustsecp256k1_v0_8_0_u128_hi_u64(v); + load256two64(out, hi, lo, 0); +} + +void load256i128(uint16_t* out, const rustsecp256k1_v0_8_0_int128* v) { + uint64_t lo; + int64_t hi; + rustsecp256k1_v0_8_0_int128 c = *v; + lo = rustsecp256k1_v0_8_0_i128_to_i64(&c); + rustsecp256k1_v0_8_0_i128_rshift(&c, 64); + hi = rustsecp256k1_v0_8_0_i128_to_i64(&c); + load256two64(out, hi, lo, 1); +} + +void run_int128_test_case(void) { + unsigned char buf[32]; + uint64_t v[4]; + rustsecp256k1_v0_8_0_int128 swa, swz; + rustsecp256k1_v0_8_0_uint128 uwa, uwz; + uint64_t ub, uc; + int64_t sb, sc; + uint16_t rswa[16], rswz[32], rswr[32], ruwa[16], ruwz[32], ruwr[32]; + uint16_t rub[16], ruc[16], rsb[16], rsc[16]; + int i; + + /* Generate 32-byte random value. */ + rustsecp256k1_v0_8_0_testrand256_test(buf); + /* Convert into 4 64-bit integers. */ + for (i = 0; i < 4; ++i) { + uint64_t vi = 0; + int j; + for (j = 0; j < 8; ++j) vi = (vi << 8) + buf[8*i + j]; + v[i] = vi; + } + /* Convert those into a 128-bit value and two 64-bit values (signed and unsigned). */ + rustsecp256k1_v0_8_0_u128_load(&uwa, v[1], v[0]); + rustsecp256k1_v0_8_0_i128_load(&swa, v[1], v[0]); + ub = v[2]; + sb = v[2]; + uc = v[3]; + sc = v[3]; + /* Load those also into 16-bit array representations. */ + load256u128(ruwa, &uwa); + load256i128(rswa, &swa); + load256u64(rub, ub, 0); + load256u64(rsb, sb, 1); + load256u64(ruc, uc, 0); + load256u64(rsc, sc, 1); + /* test rustsecp256k1_v0_8_0_u128_mul */ + mulmod256(ruwr, rub, ruc, NULL); + rustsecp256k1_v0_8_0_u128_mul(&uwz, ub, uc); + load256u128(ruwz, &uwz); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(ruwr, ruwz, 16) == 0); + /* test rustsecp256k1_v0_8_0_u128_accum_mul */ + mulmod256(ruwr, rub, ruc, NULL); + add256(ruwr, ruwr, ruwa); + uwz = uwa; + rustsecp256k1_v0_8_0_u128_accum_mul(&uwz, ub, uc); + load256u128(ruwz, &uwz); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(ruwr, ruwz, 16) == 0); + /* test rustsecp256k1_v0_8_0_u128_accum_u64 */ + add256(ruwr, rub, ruwa); + uwz = uwa; + rustsecp256k1_v0_8_0_u128_accum_u64(&uwz, ub); + load256u128(ruwz, &uwz); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(ruwr, ruwz, 16) == 0); + /* test rustsecp256k1_v0_8_0_u128_rshift */ + rshift256(ruwr, ruwa, uc % 128, 0); + uwz = uwa; + rustsecp256k1_v0_8_0_u128_rshift(&uwz, uc % 128); + load256u128(ruwz, &uwz); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(ruwr, ruwz, 16) == 0); + /* test rustsecp256k1_v0_8_0_u128_to_u64 */ + CHECK(rustsecp256k1_v0_8_0_u128_to_u64(&uwa) == v[0]); + /* test rustsecp256k1_v0_8_0_u128_hi_u64 */ + CHECK(rustsecp256k1_v0_8_0_u128_hi_u64(&uwa) == v[1]); + /* test rustsecp256k1_v0_8_0_u128_from_u64 */ + rustsecp256k1_v0_8_0_u128_from_u64(&uwz, ub); + load256u128(ruwz, &uwz); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(rub, ruwz, 16) == 0); + /* test rustsecp256k1_v0_8_0_u128_check_bits */ + { + int uwa_bits = 0; + int j; + for (j = 0; j < 128; ++j) { + if (ruwa[j / 16] >> (j % 16)) uwa_bits = 1 + j; + } + for (j = 0; j < 128; ++j) { + CHECK(rustsecp256k1_v0_8_0_u128_check_bits(&uwa, j) == (uwa_bits <= j)); + } + } + /* test rustsecp256k1_v0_8_0_i128_mul */ + mulmod256(rswr, rsb, rsc, NULL); + rustsecp256k1_v0_8_0_i128_mul(&swz, sb, sc); + load256i128(rswz, &swz); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(rswr, rswz, 16) == 0); + /* test rustsecp256k1_v0_8_0_i128_accum_mul */ + mulmod256(rswr, rsb, rsc, NULL); + add256(rswr, rswr, rswa); + if (int256is127(rswr)) { + swz = swa; + rustsecp256k1_v0_8_0_i128_accum_mul(&swz, sb, sc); + load256i128(rswz, &swz); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(rswr, rswz, 16) == 0); + } + /* test rustsecp256k1_v0_8_0_i128_det */ + { + uint16_t rsd[16], rse[16], rst[32]; + int64_t sd = v[0], se = v[1]; + load256u64(rsd, sd, 1); + load256u64(rse, se, 1); + mulmod256(rst, rsc, rsd, NULL); + neg256(rst, rst); + mulmod256(rswr, rsb, rse, NULL); + add256(rswr, rswr, rst); + rustsecp256k1_v0_8_0_i128_det(&swz, sb, sc, sd, se); + load256i128(rswz, &swz); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(rswr, rswz, 16) == 0); + } + /* test rustsecp256k1_v0_8_0_i128_rshift */ + rshift256(rswr, rswa, uc % 127, 1); + swz = swa; + rustsecp256k1_v0_8_0_i128_rshift(&swz, uc % 127); + load256i128(rswz, &swz); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(rswr, rswz, 16) == 0); + /* test rustsecp256k1_v0_8_0_i128_to_i64 */ + CHECK((uint64_t)rustsecp256k1_v0_8_0_i128_to_i64(&swa) == v[0]); + /* test rustsecp256k1_v0_8_0_i128_from_i64 */ + rustsecp256k1_v0_8_0_i128_from_i64(&swz, sb); + load256i128(rswz, &swz); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(rsb, rswz, 16) == 0); + /* test rustsecp256k1_v0_8_0_i128_eq_var */ + { + int expect = (uc & 1); + swz = swa; + if (!expect) { + /* Make sure swz != swa */ + uint64_t v0c = v[0], v1c = v[1]; + if (ub & 64) { + v1c ^= (((uint64_t)1) << (ub & 63)); + } else { + v0c ^= (((uint64_t)1) << (ub & 63)); + } + rustsecp256k1_v0_8_0_i128_load(&swz, v1c, v0c); + } + CHECK(rustsecp256k1_v0_8_0_i128_eq_var(&swa, &swz) == expect); + } + /* test rustsecp256k1_v0_8_0_i128_check_pow2 */ + { + int expect = (uc & 1); + int pos = ub % 127; + if (expect) { + /* If expect==1, set swz to exactly (2 << pos). */ + uint64_t hi = 0; + uint64_t lo = 0; + if (pos & 64) { + hi = (((uint64_t)1) << (pos & 63)); + } else { + lo = (((uint64_t)1) << (pos & 63)); + } + rustsecp256k1_v0_8_0_i128_load(&swz, hi, lo); + } else { + /* If expect==0, set swz = swa, but update expect=1 if swa happens to equal (2 << pos). */ + if (pos & 64) { + if ((v[1] == (((uint64_t)1) << (pos & 63))) && v[0] == 0) expect = 1; + } else { + if ((v[0] == (((uint64_t)1) << (pos & 63))) && v[1] == 0) expect = 1; + } + swz = swa; + } + CHECK(rustsecp256k1_v0_8_0_i128_check_pow2(&swz, pos) == expect); + } +} + +void run_int128_tests(void) { + { /* rustsecp256k1_v0_8_0_u128_accum_mul */ + rustsecp256k1_v0_8_0_uint128 res; + + /* Check rustsecp256k1_v0_8_0_u128_accum_mul overflow */ + rustsecp256k1_v0_8_0_u128_mul(&res, UINT64_MAX, UINT64_MAX); + rustsecp256k1_v0_8_0_u128_accum_mul(&res, UINT64_MAX, UINT64_MAX); + CHECK(rustsecp256k1_v0_8_0_u128_to_u64(&res) == 2); + CHECK(rustsecp256k1_v0_8_0_u128_hi_u64(&res) == 18446744073709551612U); + } + { /* rustsecp256k1_v0_8_0_u128_accum_mul */ + rustsecp256k1_v0_8_0_int128 res; + + /* Compute INT128_MAX = 2^127 - 1 with rustsecp256k1_v0_8_0_i128_accum_mul */ + rustsecp256k1_v0_8_0_i128_mul(&res, INT64_MAX, INT64_MAX); + rustsecp256k1_v0_8_0_i128_accum_mul(&res, INT64_MAX, INT64_MAX); + CHECK(rustsecp256k1_v0_8_0_i128_to_i64(&res) == 2); + rustsecp256k1_v0_8_0_i128_accum_mul(&res, 4, 9223372036854775807); + rustsecp256k1_v0_8_0_i128_accum_mul(&res, 1, 1); + CHECK((uint64_t)rustsecp256k1_v0_8_0_i128_to_i64(&res) == UINT64_MAX); + rustsecp256k1_v0_8_0_i128_rshift(&res, 64); + CHECK(rustsecp256k1_v0_8_0_i128_to_i64(&res) == INT64_MAX); + + /* Compute INT128_MIN = - 2^127 with rustsecp256k1_v0_8_0_i128_accum_mul */ + rustsecp256k1_v0_8_0_i128_mul(&res, INT64_MAX, INT64_MIN); + CHECK(rustsecp256k1_v0_8_0_i128_to_i64(&res) == INT64_MIN); + rustsecp256k1_v0_8_0_i128_accum_mul(&res, INT64_MAX, INT64_MIN); + CHECK(rustsecp256k1_v0_8_0_i128_to_i64(&res) == 0); + rustsecp256k1_v0_8_0_i128_accum_mul(&res, 2, INT64_MIN); + CHECK(rustsecp256k1_v0_8_0_i128_to_i64(&res) == 0); + rustsecp256k1_v0_8_0_i128_rshift(&res, 64); + CHECK(rustsecp256k1_v0_8_0_i128_to_i64(&res) == INT64_MIN); + } + { + /* Randomized tests. */ + int i; + for (i = 0; i < 256 * count; ++i) run_int128_test_case(); + } +} +#endif + +/***** SCALAR TESTS *****/ void scalar_test(void) { - rustsecp256k1_v0_7_0_scalar s; - rustsecp256k1_v0_7_0_scalar s1; - rustsecp256k1_v0_7_0_scalar s2; + rustsecp256k1_v0_8_0_scalar s; + rustsecp256k1_v0_8_0_scalar s1; + rustsecp256k1_v0_8_0_scalar s2; unsigned char c[32]; /* Set 's' to a random scalar, with value 'snum'. */ @@ -1602,172 +2009,172 @@ void scalar_test(void) { /* Set 's2' to a random scalar, with value 'snum2', and byte array representation 'c'. */ random_scalar_order_test(&s2); - rustsecp256k1_v0_7_0_scalar_get_b32(c, &s2); + rustsecp256k1_v0_8_0_scalar_get_b32(c, &s2); { int i; /* Test that fetching groups of 4 bits from a scalar and recursing n(i)=16*n(i-1)+p(i) reconstructs it. */ - rustsecp256k1_v0_7_0_scalar n; - rustsecp256k1_v0_7_0_scalar_set_int(&n, 0); + rustsecp256k1_v0_8_0_scalar n; + rustsecp256k1_v0_8_0_scalar_set_int(&n, 0); for (i = 0; i < 256; i += 4) { - rustsecp256k1_v0_7_0_scalar t; + rustsecp256k1_v0_8_0_scalar t; int j; - rustsecp256k1_v0_7_0_scalar_set_int(&t, rustsecp256k1_v0_7_0_scalar_get_bits(&s, 256 - 4 - i, 4)); + rustsecp256k1_v0_8_0_scalar_set_int(&t, rustsecp256k1_v0_8_0_scalar_get_bits(&s, 256 - 4 - i, 4)); for (j = 0; j < 4; j++) { - rustsecp256k1_v0_7_0_scalar_add(&n, &n, &n); + rustsecp256k1_v0_8_0_scalar_add(&n, &n, &n); } - rustsecp256k1_v0_7_0_scalar_add(&n, &n, &t); + rustsecp256k1_v0_8_0_scalar_add(&n, &n, &t); } - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&n, &s)); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&n, &s)); } { /* Test that fetching groups of randomly-sized bits from a scalar and recursing n(i)=b*n(i-1)+p(i) reconstructs it. */ - rustsecp256k1_v0_7_0_scalar n; + rustsecp256k1_v0_8_0_scalar n; int i = 0; - rustsecp256k1_v0_7_0_scalar_set_int(&n, 0); + rustsecp256k1_v0_8_0_scalar_set_int(&n, 0); while (i < 256) { - rustsecp256k1_v0_7_0_scalar t; + rustsecp256k1_v0_8_0_scalar t; int j; - int now = rustsecp256k1_v0_7_0_testrand_int(15) + 1; + int now = rustsecp256k1_v0_8_0_testrand_int(15) + 1; if (now + i > 256) { now = 256 - i; } - rustsecp256k1_v0_7_0_scalar_set_int(&t, rustsecp256k1_v0_7_0_scalar_get_bits_var(&s, 256 - now - i, now)); + rustsecp256k1_v0_8_0_scalar_set_int(&t, rustsecp256k1_v0_8_0_scalar_get_bits_var(&s, 256 - now - i, now)); for (j = 0; j < now; j++) { - rustsecp256k1_v0_7_0_scalar_add(&n, &n, &n); + rustsecp256k1_v0_8_0_scalar_add(&n, &n, &n); } - rustsecp256k1_v0_7_0_scalar_add(&n, &n, &t); + rustsecp256k1_v0_8_0_scalar_add(&n, &n, &t); i += now; } - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&n, &s)); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&n, &s)); } { - /* test rustsecp256k1_v0_7_0_scalar_shr_int */ - rustsecp256k1_v0_7_0_scalar r; + /* test rustsecp256k1_v0_8_0_scalar_shr_int */ + rustsecp256k1_v0_8_0_scalar r; int i; random_scalar_order_test(&r); for (i = 0; i < 100; ++i) { int low; - int shift = 1 + rustsecp256k1_v0_7_0_testrand_int(15); + int shift = 1 + rustsecp256k1_v0_8_0_testrand_int(15); int expected = r.d[0] % (1 << shift); - low = rustsecp256k1_v0_7_0_scalar_shr_int(&r, shift); + low = rustsecp256k1_v0_8_0_scalar_shr_int(&r, shift); CHECK(expected == low); } } { /* Test commutativity of add. */ - rustsecp256k1_v0_7_0_scalar r1, r2; - rustsecp256k1_v0_7_0_scalar_add(&r1, &s1, &s2); - rustsecp256k1_v0_7_0_scalar_add(&r2, &s2, &s1); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_8_0_scalar r1, r2; + rustsecp256k1_v0_8_0_scalar_add(&r1, &s1, &s2); + rustsecp256k1_v0_8_0_scalar_add(&r2, &s2, &s1); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&r1, &r2)); } { - rustsecp256k1_v0_7_0_scalar r1, r2; - rustsecp256k1_v0_7_0_scalar b; + rustsecp256k1_v0_8_0_scalar r1, r2; + rustsecp256k1_v0_8_0_scalar b; int i; /* Test add_bit. */ - int bit = rustsecp256k1_v0_7_0_testrand_bits(8); - rustsecp256k1_v0_7_0_scalar_set_int(&b, 1); - CHECK(rustsecp256k1_v0_7_0_scalar_is_one(&b)); + int bit = rustsecp256k1_v0_8_0_testrand_bits(8); + rustsecp256k1_v0_8_0_scalar_set_int(&b, 1); + CHECK(rustsecp256k1_v0_8_0_scalar_is_one(&b)); for (i = 0; i < bit; i++) { - rustsecp256k1_v0_7_0_scalar_add(&b, &b, &b); + rustsecp256k1_v0_8_0_scalar_add(&b, &b, &b); } r1 = s1; r2 = s1; - if (!rustsecp256k1_v0_7_0_scalar_add(&r1, &r1, &b)) { + if (!rustsecp256k1_v0_8_0_scalar_add(&r1, &r1, &b)) { /* No overflow happened. */ - rustsecp256k1_v0_7_0_scalar_cadd_bit(&r2, bit, 1); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_8_0_scalar_cadd_bit(&r2, bit, 1); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&r1, &r2)); /* cadd is a noop when flag is zero */ - rustsecp256k1_v0_7_0_scalar_cadd_bit(&r2, bit, 0); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_8_0_scalar_cadd_bit(&r2, bit, 0); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&r1, &r2)); } } { /* Test commutativity of mul. */ - rustsecp256k1_v0_7_0_scalar r1, r2; - rustsecp256k1_v0_7_0_scalar_mul(&r1, &s1, &s2); - rustsecp256k1_v0_7_0_scalar_mul(&r2, &s2, &s1); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_8_0_scalar r1, r2; + rustsecp256k1_v0_8_0_scalar_mul(&r1, &s1, &s2); + rustsecp256k1_v0_8_0_scalar_mul(&r2, &s2, &s1); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&r1, &r2)); } { /* Test associativity of add. */ - rustsecp256k1_v0_7_0_scalar r1, r2; - rustsecp256k1_v0_7_0_scalar_add(&r1, &s1, &s2); - rustsecp256k1_v0_7_0_scalar_add(&r1, &r1, &s); - rustsecp256k1_v0_7_0_scalar_add(&r2, &s2, &s); - rustsecp256k1_v0_7_0_scalar_add(&r2, &s1, &r2); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_8_0_scalar r1, r2; + rustsecp256k1_v0_8_0_scalar_add(&r1, &s1, &s2); + rustsecp256k1_v0_8_0_scalar_add(&r1, &r1, &s); + rustsecp256k1_v0_8_0_scalar_add(&r2, &s2, &s); + rustsecp256k1_v0_8_0_scalar_add(&r2, &s1, &r2); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&r1, &r2)); } { /* Test associativity of mul. */ - rustsecp256k1_v0_7_0_scalar r1, r2; - rustsecp256k1_v0_7_0_scalar_mul(&r1, &s1, &s2); - rustsecp256k1_v0_7_0_scalar_mul(&r1, &r1, &s); - rustsecp256k1_v0_7_0_scalar_mul(&r2, &s2, &s); - rustsecp256k1_v0_7_0_scalar_mul(&r2, &s1, &r2); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_8_0_scalar r1, r2; + rustsecp256k1_v0_8_0_scalar_mul(&r1, &s1, &s2); + rustsecp256k1_v0_8_0_scalar_mul(&r1, &r1, &s); + rustsecp256k1_v0_8_0_scalar_mul(&r2, &s2, &s); + rustsecp256k1_v0_8_0_scalar_mul(&r2, &s1, &r2); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&r1, &r2)); } { /* Test distributitivity of mul over add. */ - rustsecp256k1_v0_7_0_scalar r1, r2, t; - rustsecp256k1_v0_7_0_scalar_add(&r1, &s1, &s2); - rustsecp256k1_v0_7_0_scalar_mul(&r1, &r1, &s); - rustsecp256k1_v0_7_0_scalar_mul(&r2, &s1, &s); - rustsecp256k1_v0_7_0_scalar_mul(&t, &s2, &s); - rustsecp256k1_v0_7_0_scalar_add(&r2, &r2, &t); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&r1, &r2)); + rustsecp256k1_v0_8_0_scalar r1, r2, t; + rustsecp256k1_v0_8_0_scalar_add(&r1, &s1, &s2); + rustsecp256k1_v0_8_0_scalar_mul(&r1, &r1, &s); + rustsecp256k1_v0_8_0_scalar_mul(&r2, &s1, &s); + rustsecp256k1_v0_8_0_scalar_mul(&t, &s2, &s); + rustsecp256k1_v0_8_0_scalar_add(&r2, &r2, &t); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&r1, &r2)); } { /* Test multiplicative identity. */ - rustsecp256k1_v0_7_0_scalar r1, v1; - rustsecp256k1_v0_7_0_scalar_set_int(&v1,1); - rustsecp256k1_v0_7_0_scalar_mul(&r1, &s1, &v1); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&r1, &s1)); + rustsecp256k1_v0_8_0_scalar r1, v1; + rustsecp256k1_v0_8_0_scalar_set_int(&v1,1); + rustsecp256k1_v0_8_0_scalar_mul(&r1, &s1, &v1); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&r1, &s1)); } { /* Test additive identity. */ - rustsecp256k1_v0_7_0_scalar r1, v0; - rustsecp256k1_v0_7_0_scalar_set_int(&v0,0); - rustsecp256k1_v0_7_0_scalar_add(&r1, &s1, &v0); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&r1, &s1)); + rustsecp256k1_v0_8_0_scalar r1, v0; + rustsecp256k1_v0_8_0_scalar_set_int(&v0,0); + rustsecp256k1_v0_8_0_scalar_add(&r1, &s1, &v0); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&r1, &s1)); } { /* Test zero product property. */ - rustsecp256k1_v0_7_0_scalar r1, v0; - rustsecp256k1_v0_7_0_scalar_set_int(&v0,0); - rustsecp256k1_v0_7_0_scalar_mul(&r1, &s1, &v0); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&r1, &v0)); + rustsecp256k1_v0_8_0_scalar r1, v0; + rustsecp256k1_v0_8_0_scalar_set_int(&v0,0); + rustsecp256k1_v0_8_0_scalar_mul(&r1, &s1, &v0); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&r1, &v0)); } } void run_scalar_set_b32_seckey_tests(void) { unsigned char b32[32]; - rustsecp256k1_v0_7_0_scalar s1; - rustsecp256k1_v0_7_0_scalar s2; + rustsecp256k1_v0_8_0_scalar s1; + rustsecp256k1_v0_8_0_scalar s2; /* Usually set_b32 and set_b32_seckey give the same result */ random_scalar_order_b32(b32); - rustsecp256k1_v0_7_0_scalar_set_b32(&s1, b32, NULL); - CHECK(rustsecp256k1_v0_7_0_scalar_set_b32_seckey(&s2, b32) == 1); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&s1, &s2) == 1); + rustsecp256k1_v0_8_0_scalar_set_b32(&s1, b32, NULL); + CHECK(rustsecp256k1_v0_8_0_scalar_set_b32_seckey(&s2, b32) == 1); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&s1, &s2) == 1); memset(b32, 0, sizeof(b32)); - CHECK(rustsecp256k1_v0_7_0_scalar_set_b32_seckey(&s2, b32) == 0); + CHECK(rustsecp256k1_v0_8_0_scalar_set_b32_seckey(&s2, b32) == 0); memset(b32, 0xFF, sizeof(b32)); - CHECK(rustsecp256k1_v0_7_0_scalar_set_b32_seckey(&s2, b32) == 0); + CHECK(rustsecp256k1_v0_8_0_scalar_set_b32_seckey(&s2, b32) == 0); } void run_scalar_tests(void) { @@ -1781,23 +2188,23 @@ void run_scalar_tests(void) { { /* (-1)+1 should be zero. */ - rustsecp256k1_v0_7_0_scalar s, o; - rustsecp256k1_v0_7_0_scalar_set_int(&s, 1); - CHECK(rustsecp256k1_v0_7_0_scalar_is_one(&s)); - rustsecp256k1_v0_7_0_scalar_negate(&o, &s); - rustsecp256k1_v0_7_0_scalar_add(&o, &o, &s); - CHECK(rustsecp256k1_v0_7_0_scalar_is_zero(&o)); - rustsecp256k1_v0_7_0_scalar_negate(&o, &o); - CHECK(rustsecp256k1_v0_7_0_scalar_is_zero(&o)); + rustsecp256k1_v0_8_0_scalar s, o; + rustsecp256k1_v0_8_0_scalar_set_int(&s, 1); + CHECK(rustsecp256k1_v0_8_0_scalar_is_one(&s)); + rustsecp256k1_v0_8_0_scalar_negate(&o, &s); + rustsecp256k1_v0_8_0_scalar_add(&o, &o, &s); + CHECK(rustsecp256k1_v0_8_0_scalar_is_zero(&o)); + rustsecp256k1_v0_8_0_scalar_negate(&o, &o); + CHECK(rustsecp256k1_v0_8_0_scalar_is_zero(&o)); } { /* Does check_overflow check catch all ones? */ - static const rustsecp256k1_v0_7_0_scalar overflowed = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_8_0_scalar overflowed = SECP256K1_SCALAR_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); - CHECK(rustsecp256k1_v0_7_0_scalar_check_overflow(&overflowed)); + CHECK(rustsecp256k1_v0_8_0_scalar_check_overflow(&overflowed)); } { @@ -1806,14 +2213,14 @@ void run_scalar_tests(void) { * and edge-case coverage on 32-bit and 64-bit implementations. * The responses were generated with Sage 5.9. */ - rustsecp256k1_v0_7_0_scalar x; - rustsecp256k1_v0_7_0_scalar y; - rustsecp256k1_v0_7_0_scalar z; - rustsecp256k1_v0_7_0_scalar zz; - rustsecp256k1_v0_7_0_scalar one; - rustsecp256k1_v0_7_0_scalar r1; - rustsecp256k1_v0_7_0_scalar r2; - rustsecp256k1_v0_7_0_scalar zzv; + rustsecp256k1_v0_8_0_scalar x; + rustsecp256k1_v0_8_0_scalar y; + rustsecp256k1_v0_8_0_scalar z; + rustsecp256k1_v0_8_0_scalar zz; + rustsecp256k1_v0_8_0_scalar one; + rustsecp256k1_v0_8_0_scalar r1; + rustsecp256k1_v0_8_0_scalar r2; + rustsecp256k1_v0_8_0_scalar zzv; int overflow; unsigned char chal[33][2][32] = { {{0xff, 0xff, 0x03, 0x07, 0x00, 0x00, 0x00, 0x00, @@ -2347,30 +2754,30 @@ void run_scalar_tests(void) { 0x1e, 0x86, 0x5d, 0x89, 0x63, 0xe6, 0x0a, 0x46, 0x5c, 0x02, 0x97, 0x1b, 0x62, 0x43, 0x86, 0xf5}} }; - rustsecp256k1_v0_7_0_scalar_set_int(&one, 1); + rustsecp256k1_v0_8_0_scalar_set_int(&one, 1); for (i = 0; i < 33; i++) { - rustsecp256k1_v0_7_0_scalar_set_b32(&x, chal[i][0], &overflow); + rustsecp256k1_v0_8_0_scalar_set_b32(&x, chal[i][0], &overflow); CHECK(!overflow); - rustsecp256k1_v0_7_0_scalar_set_b32(&y, chal[i][1], &overflow); + rustsecp256k1_v0_8_0_scalar_set_b32(&y, chal[i][1], &overflow); CHECK(!overflow); - rustsecp256k1_v0_7_0_scalar_set_b32(&r1, res[i][0], &overflow); + rustsecp256k1_v0_8_0_scalar_set_b32(&r1, res[i][0], &overflow); CHECK(!overflow); - rustsecp256k1_v0_7_0_scalar_set_b32(&r2, res[i][1], &overflow); + rustsecp256k1_v0_8_0_scalar_set_b32(&r2, res[i][1], &overflow); CHECK(!overflow); - rustsecp256k1_v0_7_0_scalar_mul(&z, &x, &y); - CHECK(!rustsecp256k1_v0_7_0_scalar_check_overflow(&z)); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&r1, &z)); - if (!rustsecp256k1_v0_7_0_scalar_is_zero(&y)) { - rustsecp256k1_v0_7_0_scalar_inverse(&zz, &y); - CHECK(!rustsecp256k1_v0_7_0_scalar_check_overflow(&zz)); - rustsecp256k1_v0_7_0_scalar_inverse_var(&zzv, &y); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&zzv, &zz)); - rustsecp256k1_v0_7_0_scalar_mul(&z, &z, &zz); - CHECK(!rustsecp256k1_v0_7_0_scalar_check_overflow(&z)); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&x, &z)); - rustsecp256k1_v0_7_0_scalar_mul(&zz, &zz, &y); - CHECK(!rustsecp256k1_v0_7_0_scalar_check_overflow(&zz)); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&one, &zz)); + rustsecp256k1_v0_8_0_scalar_mul(&z, &x, &y); + CHECK(!rustsecp256k1_v0_8_0_scalar_check_overflow(&z)); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&r1, &z)); + if (!rustsecp256k1_v0_8_0_scalar_is_zero(&y)) { + rustsecp256k1_v0_8_0_scalar_inverse(&zz, &y); + CHECK(!rustsecp256k1_v0_8_0_scalar_check_overflow(&zz)); + rustsecp256k1_v0_8_0_scalar_inverse_var(&zzv, &y); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&zzv, &zz)); + rustsecp256k1_v0_8_0_scalar_mul(&z, &z, &zz); + CHECK(!rustsecp256k1_v0_8_0_scalar_check_overflow(&z)); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&x, &z)); + rustsecp256k1_v0_8_0_scalar_mul(&zz, &zz, &y); + CHECK(!rustsecp256k1_v0_8_0_scalar_check_overflow(&zz)); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&one, &zz)); } } } @@ -2378,32 +2785,32 @@ void run_scalar_tests(void) { /***** FIELD TESTS *****/ -void random_fe(rustsecp256k1_v0_7_0_fe *x) { +void random_fe(rustsecp256k1_v0_8_0_fe *x) { unsigned char bin[32]; do { - rustsecp256k1_v0_7_0_testrand256(bin); - if (rustsecp256k1_v0_7_0_fe_set_b32(x, bin)) { + rustsecp256k1_v0_8_0_testrand256(bin); + if (rustsecp256k1_v0_8_0_fe_set_b32(x, bin)) { return; } } while(1); } -void random_fe_test(rustsecp256k1_v0_7_0_fe *x) { +void random_fe_test(rustsecp256k1_v0_8_0_fe *x) { unsigned char bin[32]; do { - rustsecp256k1_v0_7_0_testrand256_test(bin); - if (rustsecp256k1_v0_7_0_fe_set_b32(x, bin)) { + rustsecp256k1_v0_8_0_testrand256_test(bin); + if (rustsecp256k1_v0_8_0_fe_set_b32(x, bin)) { return; } } while(1); } -void random_fe_non_zero(rustsecp256k1_v0_7_0_fe *nz) { +void random_fe_non_zero(rustsecp256k1_v0_8_0_fe *nz) { int tries = 10; while (--tries >= 0) { random_fe(nz); - rustsecp256k1_v0_7_0_fe_normalize(nz); - if (!rustsecp256k1_v0_7_0_fe_is_zero(nz)) { + rustsecp256k1_v0_8_0_fe_normalize(nz); + if (!rustsecp256k1_v0_8_0_fe_is_zero(nz)) { break; } } @@ -2411,20 +2818,20 @@ void random_fe_non_zero(rustsecp256k1_v0_7_0_fe *nz) { CHECK(tries >= 0); } -void random_fe_non_square(rustsecp256k1_v0_7_0_fe *ns) { - rustsecp256k1_v0_7_0_fe r; +void random_fe_non_square(rustsecp256k1_v0_8_0_fe *ns) { + rustsecp256k1_v0_8_0_fe r; random_fe_non_zero(ns); - if (rustsecp256k1_v0_7_0_fe_sqrt(&r, ns)) { - rustsecp256k1_v0_7_0_fe_negate(ns, ns, 1); + if (rustsecp256k1_v0_8_0_fe_sqrt(&r, ns)) { + rustsecp256k1_v0_8_0_fe_negate(ns, ns, 1); } } -int check_fe_equal(const rustsecp256k1_v0_7_0_fe *a, const rustsecp256k1_v0_7_0_fe *b) { - rustsecp256k1_v0_7_0_fe an = *a; - rustsecp256k1_v0_7_0_fe bn = *b; - rustsecp256k1_v0_7_0_fe_normalize_weak(&an); - rustsecp256k1_v0_7_0_fe_normalize_var(&bn); - return rustsecp256k1_v0_7_0_fe_equal_var(&an, &bn); +int check_fe_equal(const rustsecp256k1_v0_8_0_fe *a, const rustsecp256k1_v0_8_0_fe *b) { + rustsecp256k1_v0_8_0_fe an = *a; + rustsecp256k1_v0_8_0_fe bn = *b; + rustsecp256k1_v0_8_0_fe_normalize_weak(&an); + rustsecp256k1_v0_8_0_fe_normalize_var(&bn); + return rustsecp256k1_v0_8_0_fe_equal_var(&an, &bn); } void run_field_convert(void) { @@ -2434,124 +2841,185 @@ void run_field_convert(void) { 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40 }; - static const rustsecp256k1_v0_7_0_fe_storage fes = SECP256K1_FE_STORAGE_CONST( + static const rustsecp256k1_v0_8_0_fe_storage fes = SECP256K1_FE_STORAGE_CONST( 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL, 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL ); - static const rustsecp256k1_v0_7_0_fe fe = SECP256K1_FE_CONST( + static const rustsecp256k1_v0_8_0_fe fe = SECP256K1_FE_CONST( 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL, 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL ); - rustsecp256k1_v0_7_0_fe fe2; + rustsecp256k1_v0_8_0_fe fe2; unsigned char b322[32]; - rustsecp256k1_v0_7_0_fe_storage fes2; + rustsecp256k1_v0_8_0_fe_storage fes2; /* Check conversions to fe. */ - CHECK(rustsecp256k1_v0_7_0_fe_set_b32(&fe2, b32)); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&fe, &fe2)); - rustsecp256k1_v0_7_0_fe_from_storage(&fe2, &fes); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&fe, &fe2)); + CHECK(rustsecp256k1_v0_8_0_fe_set_b32(&fe2, b32)); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&fe, &fe2)); + rustsecp256k1_v0_8_0_fe_from_storage(&fe2, &fes); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&fe, &fe2)); /* Check conversion from fe. */ - rustsecp256k1_v0_7_0_fe_get_b32(b322, &fe); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(b322, b32, 32) == 0); - rustsecp256k1_v0_7_0_fe_to_storage(&fes2, &fe); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&fes2, &fes, sizeof(fes)) == 0); + rustsecp256k1_v0_8_0_fe_get_b32(b322, &fe); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(b322, b32, 32) == 0); + rustsecp256k1_v0_8_0_fe_to_storage(&fes2, &fe); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&fes2, &fes, sizeof(fes)) == 0); } /* Returns true if two field elements have the same representation. */ -int fe_identical(const rustsecp256k1_v0_7_0_fe *a, const rustsecp256k1_v0_7_0_fe *b) { +int fe_identical(const rustsecp256k1_v0_8_0_fe *a, const rustsecp256k1_v0_8_0_fe *b) { int ret = 1; #ifdef VERIFY ret &= (a->magnitude == b->magnitude); ret &= (a->normalized == b->normalized); #endif /* Compare the struct member that holds the limbs. */ - ret &= (rustsecp256k1_v0_7_0_memcmp_var(a->n, b->n, sizeof(a->n)) == 0); + ret &= (rustsecp256k1_v0_8_0_memcmp_var(a->n, b->n, sizeof(a->n)) == 0); return ret; } +void run_field_half(void) { + rustsecp256k1_v0_8_0_fe t, u; + int m; + + /* Check magnitude 0 input */ + rustsecp256k1_v0_8_0_fe_get_bounds(&t, 0); + rustsecp256k1_v0_8_0_fe_half(&t); +#ifdef VERIFY + CHECK(t.magnitude == 1); + CHECK(t.normalized == 0); +#endif + CHECK(rustsecp256k1_v0_8_0_fe_normalizes_to_zero(&t)); + + /* Check non-zero magnitudes in the supported range */ + for (m = 1; m < 32; m++) { + /* Check max-value input */ + rustsecp256k1_v0_8_0_fe_get_bounds(&t, m); + + u = t; + rustsecp256k1_v0_8_0_fe_half(&u); +#ifdef VERIFY + CHECK(u.magnitude == (m >> 1) + 1); + CHECK(u.normalized == 0); +#endif + rustsecp256k1_v0_8_0_fe_normalize_weak(&u); + rustsecp256k1_v0_8_0_fe_add(&u, &u); + CHECK(check_fe_equal(&t, &u)); + + /* Check worst-case input: ensure the LSB is 1 so that P will be added, + * which will also cause all carries to be 1, since all limbs that can + * generate a carry are initially even and all limbs of P are odd in + * every existing field implementation. */ + rustsecp256k1_v0_8_0_fe_get_bounds(&t, m); + CHECK(t.n[0] > 0); + CHECK((t.n[0] & 1) == 0); + --t.n[0]; + + u = t; + rustsecp256k1_v0_8_0_fe_half(&u); +#ifdef VERIFY + CHECK(u.magnitude == (m >> 1) + 1); + CHECK(u.normalized == 0); +#endif + rustsecp256k1_v0_8_0_fe_normalize_weak(&u); + rustsecp256k1_v0_8_0_fe_add(&u, &u); + CHECK(check_fe_equal(&t, &u)); + } +} + void run_field_misc(void) { - rustsecp256k1_v0_7_0_fe x; - rustsecp256k1_v0_7_0_fe y; - rustsecp256k1_v0_7_0_fe z; - rustsecp256k1_v0_7_0_fe q; - rustsecp256k1_v0_7_0_fe fe5 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 5); + rustsecp256k1_v0_8_0_fe x; + rustsecp256k1_v0_8_0_fe y; + rustsecp256k1_v0_8_0_fe z; + rustsecp256k1_v0_8_0_fe q; + rustsecp256k1_v0_8_0_fe fe5 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 5); int i, j; - for (i = 0; i < 5*count; i++) { - rustsecp256k1_v0_7_0_fe_storage xs, ys, zs; - random_fe(&x); + for (i = 0; i < 1000 * count; i++) { + rustsecp256k1_v0_8_0_fe_storage xs, ys, zs; + if (i & 1) { + random_fe(&x); + } else { + random_fe_test(&x); + } random_fe_non_zero(&y); /* Test the fe equality and comparison operations. */ - CHECK(rustsecp256k1_v0_7_0_fe_cmp_var(&x, &x) == 0); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&x, &x)); + CHECK(rustsecp256k1_v0_8_0_fe_cmp_var(&x, &x) == 0); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&x, &x)); z = x; - rustsecp256k1_v0_7_0_fe_add(&z,&y); + rustsecp256k1_v0_8_0_fe_add(&z,&y); /* Test fe conditional move; z is not normalized here. */ q = x; - rustsecp256k1_v0_7_0_fe_cmov(&x, &z, 0); + rustsecp256k1_v0_8_0_fe_cmov(&x, &z, 0); #ifdef VERIFY CHECK(x.normalized && x.magnitude == 1); #endif - rustsecp256k1_v0_7_0_fe_cmov(&x, &x, 1); + rustsecp256k1_v0_8_0_fe_cmov(&x, &x, 1); CHECK(!fe_identical(&x, &z)); CHECK(fe_identical(&x, &q)); - rustsecp256k1_v0_7_0_fe_cmov(&q, &z, 1); + rustsecp256k1_v0_8_0_fe_cmov(&q, &z, 1); #ifdef VERIFY CHECK(!q.normalized && q.magnitude == z.magnitude); #endif CHECK(fe_identical(&q, &z)); - rustsecp256k1_v0_7_0_fe_normalize_var(&x); - rustsecp256k1_v0_7_0_fe_normalize_var(&z); - CHECK(!rustsecp256k1_v0_7_0_fe_equal_var(&x, &z)); - rustsecp256k1_v0_7_0_fe_normalize_var(&q); - rustsecp256k1_v0_7_0_fe_cmov(&q, &z, (i&1)); + rustsecp256k1_v0_8_0_fe_normalize_var(&x); + rustsecp256k1_v0_8_0_fe_normalize_var(&z); + CHECK(!rustsecp256k1_v0_8_0_fe_equal_var(&x, &z)); + rustsecp256k1_v0_8_0_fe_normalize_var(&q); + rustsecp256k1_v0_8_0_fe_cmov(&q, &z, (i&1)); #ifdef VERIFY CHECK(q.normalized && q.magnitude == 1); #endif for (j = 0; j < 6; j++) { - rustsecp256k1_v0_7_0_fe_negate(&z, &z, j+1); - rustsecp256k1_v0_7_0_fe_normalize_var(&q); - rustsecp256k1_v0_7_0_fe_cmov(&q, &z, (j&1)); + rustsecp256k1_v0_8_0_fe_negate(&z, &z, j+1); + rustsecp256k1_v0_8_0_fe_normalize_var(&q); + rustsecp256k1_v0_8_0_fe_cmov(&q, &z, (j&1)); #ifdef VERIFY CHECK((q.normalized != (j&1)) && q.magnitude == ((j&1) ? z.magnitude : 1)); #endif } - rustsecp256k1_v0_7_0_fe_normalize_var(&z); + rustsecp256k1_v0_8_0_fe_normalize_var(&z); /* Test storage conversion and conditional moves. */ - rustsecp256k1_v0_7_0_fe_to_storage(&xs, &x); - rustsecp256k1_v0_7_0_fe_to_storage(&ys, &y); - rustsecp256k1_v0_7_0_fe_to_storage(&zs, &z); - rustsecp256k1_v0_7_0_fe_storage_cmov(&zs, &xs, 0); - rustsecp256k1_v0_7_0_fe_storage_cmov(&zs, &zs, 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&xs, &zs, sizeof(xs)) != 0); - rustsecp256k1_v0_7_0_fe_storage_cmov(&ys, &xs, 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&xs, &ys, sizeof(xs)) == 0); - rustsecp256k1_v0_7_0_fe_from_storage(&x, &xs); - rustsecp256k1_v0_7_0_fe_from_storage(&y, &ys); - rustsecp256k1_v0_7_0_fe_from_storage(&z, &zs); + rustsecp256k1_v0_8_0_fe_to_storage(&xs, &x); + rustsecp256k1_v0_8_0_fe_to_storage(&ys, &y); + rustsecp256k1_v0_8_0_fe_to_storage(&zs, &z); + rustsecp256k1_v0_8_0_fe_storage_cmov(&zs, &xs, 0); + rustsecp256k1_v0_8_0_fe_storage_cmov(&zs, &zs, 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&xs, &zs, sizeof(xs)) != 0); + rustsecp256k1_v0_8_0_fe_storage_cmov(&ys, &xs, 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&xs, &ys, sizeof(xs)) == 0); + rustsecp256k1_v0_8_0_fe_from_storage(&x, &xs); + rustsecp256k1_v0_8_0_fe_from_storage(&y, &ys); + rustsecp256k1_v0_8_0_fe_from_storage(&z, &zs); /* Test that mul_int, mul, and add agree. */ - rustsecp256k1_v0_7_0_fe_add(&y, &x); - rustsecp256k1_v0_7_0_fe_add(&y, &x); + rustsecp256k1_v0_8_0_fe_add(&y, &x); + rustsecp256k1_v0_8_0_fe_add(&y, &x); z = x; - rustsecp256k1_v0_7_0_fe_mul_int(&z, 3); + rustsecp256k1_v0_8_0_fe_mul_int(&z, 3); CHECK(check_fe_equal(&y, &z)); - rustsecp256k1_v0_7_0_fe_add(&y, &x); - rustsecp256k1_v0_7_0_fe_add(&z, &x); + rustsecp256k1_v0_8_0_fe_add(&y, &x); + rustsecp256k1_v0_8_0_fe_add(&z, &x); CHECK(check_fe_equal(&z, &y)); z = x; - rustsecp256k1_v0_7_0_fe_mul_int(&z, 5); - rustsecp256k1_v0_7_0_fe_mul(&q, &x, &fe5); + rustsecp256k1_v0_8_0_fe_mul_int(&z, 5); + rustsecp256k1_v0_8_0_fe_mul(&q, &x, &fe5); CHECK(check_fe_equal(&z, &q)); - rustsecp256k1_v0_7_0_fe_negate(&x, &x, 1); - rustsecp256k1_v0_7_0_fe_add(&z, &x); - rustsecp256k1_v0_7_0_fe_add(&q, &x); + rustsecp256k1_v0_8_0_fe_negate(&x, &x, 1); + rustsecp256k1_v0_8_0_fe_add(&z, &x); + rustsecp256k1_v0_8_0_fe_add(&q, &x); CHECK(check_fe_equal(&y, &z)); CHECK(check_fe_equal(&q, &y)); + /* Check rustsecp256k1_v0_8_0_fe_half. */ + z = x; + rustsecp256k1_v0_8_0_fe_half(&z); + rustsecp256k1_v0_8_0_fe_add(&z, &z); + CHECK(check_fe_equal(&x, &z)); + rustsecp256k1_v0_8_0_fe_add(&z, &z); + rustsecp256k1_v0_8_0_fe_half(&z); + CHECK(check_fe_equal(&x, &z)); } } -void test_fe_mul(const rustsecp256k1_v0_7_0_fe* a, const rustsecp256k1_v0_7_0_fe* b, int use_sqr) +void test_fe_mul(const rustsecp256k1_v0_8_0_fe* a, const rustsecp256k1_v0_8_0_fe* b, int use_sqr) { - rustsecp256k1_v0_7_0_fe c, an, bn; + rustsecp256k1_v0_8_0_fe c, an, bn; /* Variables in BE 32-byte format. */ unsigned char a32[32], b32[32], c32[32]; /* Variables in LE 16x uint16_t format. */ @@ -2567,20 +3035,20 @@ void test_fe_mul(const rustsecp256k1_v0_7_0_fe* a, const rustsecp256k1_v0_7_0_fe /* Compute C = A * B in fe format. */ c = *a; if (use_sqr) { - rustsecp256k1_v0_7_0_fe_sqr(&c, &c); + rustsecp256k1_v0_8_0_fe_sqr(&c, &c); } else { - rustsecp256k1_v0_7_0_fe_mul(&c, &c, b); + rustsecp256k1_v0_8_0_fe_mul(&c, &c, b); } /* Convert A, B, C into LE 16x uint16_t format. */ an = *a; bn = *b; - rustsecp256k1_v0_7_0_fe_normalize_var(&c); - rustsecp256k1_v0_7_0_fe_normalize_var(&an); - rustsecp256k1_v0_7_0_fe_normalize_var(&bn); - rustsecp256k1_v0_7_0_fe_get_b32(a32, &an); - rustsecp256k1_v0_7_0_fe_get_b32(b32, &bn); - rustsecp256k1_v0_7_0_fe_get_b32(c32, &c); + rustsecp256k1_v0_8_0_fe_normalize_var(&c); + rustsecp256k1_v0_8_0_fe_normalize_var(&an); + rustsecp256k1_v0_8_0_fe_normalize_var(&bn); + rustsecp256k1_v0_8_0_fe_get_b32(a32, &an); + rustsecp256k1_v0_8_0_fe_get_b32(b32, &bn); + rustsecp256k1_v0_8_0_fe_get_b32(c32, &c); for (i = 0; i < 16; ++i) { a16[i] = a32[31 - 2*i] + ((uint16_t)a32[30 - 2*i] << 8); b16[i] = b32[31 - 2*i] + ((uint16_t)b32[30 - 2*i] << 8); @@ -2589,13 +3057,13 @@ void test_fe_mul(const rustsecp256k1_v0_7_0_fe* a, const rustsecp256k1_v0_7_0_fe /* Compute T = A * B in LE 16x uint16_t format. */ mulmod256(t16, a16, b16, m16); /* Compare */ - CHECK(rustsecp256k1_v0_7_0_memcmp_var(t16, c16, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(t16, c16, 32) == 0); } void run_fe_mul(void) { int i; for (i = 0; i < 100 * count; ++i) { - rustsecp256k1_v0_7_0_fe a, b, c, d; + rustsecp256k1_v0_8_0_fe a, b, c, d; random_fe(&a); random_field_element_magnitude(&a); random_fe(&b); @@ -2614,50 +3082,50 @@ void run_fe_mul(void) { } void run_sqr(void) { - rustsecp256k1_v0_7_0_fe x, s; + rustsecp256k1_v0_8_0_fe x, s; { int i; - rustsecp256k1_v0_7_0_fe_set_int(&x, 1); - rustsecp256k1_v0_7_0_fe_negate(&x, &x, 1); + rustsecp256k1_v0_8_0_fe_set_int(&x, 1); + rustsecp256k1_v0_8_0_fe_negate(&x, &x, 1); for (i = 1; i <= 512; ++i) { - rustsecp256k1_v0_7_0_fe_mul_int(&x, 2); - rustsecp256k1_v0_7_0_fe_normalize(&x); - rustsecp256k1_v0_7_0_fe_sqr(&s, &x); + rustsecp256k1_v0_8_0_fe_mul_int(&x, 2); + rustsecp256k1_v0_8_0_fe_normalize(&x); + rustsecp256k1_v0_8_0_fe_sqr(&s, &x); } } } -void test_sqrt(const rustsecp256k1_v0_7_0_fe *a, const rustsecp256k1_v0_7_0_fe *k) { - rustsecp256k1_v0_7_0_fe r1, r2; - int v = rustsecp256k1_v0_7_0_fe_sqrt(&r1, a); +void test_sqrt(const rustsecp256k1_v0_8_0_fe *a, const rustsecp256k1_v0_8_0_fe *k) { + rustsecp256k1_v0_8_0_fe r1, r2; + int v = rustsecp256k1_v0_8_0_fe_sqrt(&r1, a); CHECK((v == 0) == (k == NULL)); if (k != NULL) { /* Check that the returned root is +/- the given known answer */ - rustsecp256k1_v0_7_0_fe_negate(&r2, &r1, 1); - rustsecp256k1_v0_7_0_fe_add(&r1, k); rustsecp256k1_v0_7_0_fe_add(&r2, k); - rustsecp256k1_v0_7_0_fe_normalize(&r1); rustsecp256k1_v0_7_0_fe_normalize(&r2); - CHECK(rustsecp256k1_v0_7_0_fe_is_zero(&r1) || rustsecp256k1_v0_7_0_fe_is_zero(&r2)); + rustsecp256k1_v0_8_0_fe_negate(&r2, &r1, 1); + rustsecp256k1_v0_8_0_fe_add(&r1, k); rustsecp256k1_v0_8_0_fe_add(&r2, k); + rustsecp256k1_v0_8_0_fe_normalize(&r1); rustsecp256k1_v0_8_0_fe_normalize(&r2); + CHECK(rustsecp256k1_v0_8_0_fe_is_zero(&r1) || rustsecp256k1_v0_8_0_fe_is_zero(&r2)); } } void run_sqrt(void) { - rustsecp256k1_v0_7_0_fe ns, x, s, t; + rustsecp256k1_v0_8_0_fe ns, x, s, t; int i; /* Check sqrt(0) is 0 */ - rustsecp256k1_v0_7_0_fe_set_int(&x, 0); - rustsecp256k1_v0_7_0_fe_sqr(&s, &x); + rustsecp256k1_v0_8_0_fe_set_int(&x, 0); + rustsecp256k1_v0_8_0_fe_sqr(&s, &x); test_sqrt(&s, &x); /* Check sqrt of small squares (and their negatives) */ for (i = 1; i <= 100; i++) { - rustsecp256k1_v0_7_0_fe_set_int(&x, i); - rustsecp256k1_v0_7_0_fe_sqr(&s, &x); + rustsecp256k1_v0_8_0_fe_set_int(&x, i); + rustsecp256k1_v0_8_0_fe_sqr(&s, &x); test_sqrt(&s, &x); - rustsecp256k1_v0_7_0_fe_negate(&t, &s, 1); + rustsecp256k1_v0_8_0_fe_negate(&t, &s, 1); test_sqrt(&t, NULL); } @@ -2667,11 +3135,11 @@ void run_sqrt(void) { random_fe_non_square(&ns); for (j = 0; j < count; j++) { random_fe(&x); - rustsecp256k1_v0_7_0_fe_sqr(&s, &x); + rustsecp256k1_v0_8_0_fe_sqr(&s, &x); test_sqrt(&s, &x); - rustsecp256k1_v0_7_0_fe_negate(&t, &s, 1); + rustsecp256k1_v0_8_0_fe_negate(&t, &s, 1); test_sqrt(&t, NULL); - rustsecp256k1_v0_7_0_fe_mul(&t, &s, &ns); + rustsecp256k1_v0_8_0_fe_mul(&t, &s, &ns); test_sqrt(&t, NULL); } } @@ -2679,12 +3147,12 @@ void run_sqrt(void) { /***** FIELD/SCALAR INVERSE TESTS *****/ -static const rustsecp256k1_v0_7_0_scalar scalar_minus_one = SECP256K1_SCALAR_CONST( +static const rustsecp256k1_v0_8_0_scalar scalar_minus_one = SECP256K1_SCALAR_CONST( 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0xBAAEDCE6, 0xAF48A03B, 0xBFD25E8C, 0xD0364140 ); -static const rustsecp256k1_v0_7_0_fe fe_minus_one = SECP256K1_FE_CONST( +static const rustsecp256k1_v0_8_0_fe fe_minus_one = SECP256K1_FE_CONST( 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFC2E ); @@ -2696,57 +3164,57 @@ static const rustsecp256k1_v0_7_0_fe fe_minus_one = SECP256K1_FE_CONST( * for x!=0 and x!=1: 1/(1/x - 1) + 1 == -1/(x-1) */ -void test_inverse_scalar(rustsecp256k1_v0_7_0_scalar* out, const rustsecp256k1_v0_7_0_scalar* x, int var) +void test_inverse_scalar(rustsecp256k1_v0_8_0_scalar* out, const rustsecp256k1_v0_8_0_scalar* x, int var) { - rustsecp256k1_v0_7_0_scalar l, r, t; + rustsecp256k1_v0_8_0_scalar l, r, t; - (var ? rustsecp256k1_v0_7_0_scalar_inverse_var : rustsecp256k1_v0_7_0_scalar_inverse)(&l, x); /* l = 1/x */ + (var ? rustsecp256k1_v0_8_0_scalar_inverse_var : rustsecp256k1_v0_8_0_scalar_inverse)(&l, x); /* l = 1/x */ if (out) *out = l; - if (rustsecp256k1_v0_7_0_scalar_is_zero(x)) { - CHECK(rustsecp256k1_v0_7_0_scalar_is_zero(&l)); + if (rustsecp256k1_v0_8_0_scalar_is_zero(x)) { + CHECK(rustsecp256k1_v0_8_0_scalar_is_zero(&l)); return; } - rustsecp256k1_v0_7_0_scalar_mul(&t, x, &l); /* t = x*(1/x) */ - CHECK(rustsecp256k1_v0_7_0_scalar_is_one(&t)); /* x*(1/x) == 1 */ - rustsecp256k1_v0_7_0_scalar_add(&r, x, &scalar_minus_one); /* r = x-1 */ - if (rustsecp256k1_v0_7_0_scalar_is_zero(&r)) return; - (var ? rustsecp256k1_v0_7_0_scalar_inverse_var : rustsecp256k1_v0_7_0_scalar_inverse)(&r, &r); /* r = 1/(x-1) */ - rustsecp256k1_v0_7_0_scalar_add(&l, &scalar_minus_one, &l); /* l = 1/x-1 */ - (var ? rustsecp256k1_v0_7_0_scalar_inverse_var : rustsecp256k1_v0_7_0_scalar_inverse)(&l, &l); /* l = 1/(1/x-1) */ - rustsecp256k1_v0_7_0_scalar_add(&l, &l, &rustsecp256k1_v0_7_0_scalar_one); /* l = 1/(1/x-1)+1 */ - rustsecp256k1_v0_7_0_scalar_add(&l, &r, &l); /* l = 1/(1/x-1)+1 + 1/(x-1) */ - CHECK(rustsecp256k1_v0_7_0_scalar_is_zero(&l)); /* l == 0 */ + rustsecp256k1_v0_8_0_scalar_mul(&t, x, &l); /* t = x*(1/x) */ + CHECK(rustsecp256k1_v0_8_0_scalar_is_one(&t)); /* x*(1/x) == 1 */ + rustsecp256k1_v0_8_0_scalar_add(&r, x, &scalar_minus_one); /* r = x-1 */ + if (rustsecp256k1_v0_8_0_scalar_is_zero(&r)) return; + (var ? rustsecp256k1_v0_8_0_scalar_inverse_var : rustsecp256k1_v0_8_0_scalar_inverse)(&r, &r); /* r = 1/(x-1) */ + rustsecp256k1_v0_8_0_scalar_add(&l, &scalar_minus_one, &l); /* l = 1/x-1 */ + (var ? rustsecp256k1_v0_8_0_scalar_inverse_var : rustsecp256k1_v0_8_0_scalar_inverse)(&l, &l); /* l = 1/(1/x-1) */ + rustsecp256k1_v0_8_0_scalar_add(&l, &l, &rustsecp256k1_v0_8_0_scalar_one); /* l = 1/(1/x-1)+1 */ + rustsecp256k1_v0_8_0_scalar_add(&l, &r, &l); /* l = 1/(1/x-1)+1 + 1/(x-1) */ + CHECK(rustsecp256k1_v0_8_0_scalar_is_zero(&l)); /* l == 0 */ } -void test_inverse_field(rustsecp256k1_v0_7_0_fe* out, const rustsecp256k1_v0_7_0_fe* x, int var) +void test_inverse_field(rustsecp256k1_v0_8_0_fe* out, const rustsecp256k1_v0_8_0_fe* x, int var) { - rustsecp256k1_v0_7_0_fe l, r, t; + rustsecp256k1_v0_8_0_fe l, r, t; - (var ? rustsecp256k1_v0_7_0_fe_inv_var : rustsecp256k1_v0_7_0_fe_inv)(&l, x) ; /* l = 1/x */ + (var ? rustsecp256k1_v0_8_0_fe_inv_var : rustsecp256k1_v0_8_0_fe_inv)(&l, x) ; /* l = 1/x */ if (out) *out = l; t = *x; /* t = x */ - if (rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(&t)) { - CHECK(rustsecp256k1_v0_7_0_fe_normalizes_to_zero(&l)); + if (rustsecp256k1_v0_8_0_fe_normalizes_to_zero_var(&t)) { + CHECK(rustsecp256k1_v0_8_0_fe_normalizes_to_zero(&l)); return; } - rustsecp256k1_v0_7_0_fe_mul(&t, x, &l); /* t = x*(1/x) */ - rustsecp256k1_v0_7_0_fe_add(&t, &fe_minus_one); /* t = x*(1/x)-1 */ - CHECK(rustsecp256k1_v0_7_0_fe_normalizes_to_zero(&t)); /* x*(1/x)-1 == 0 */ + rustsecp256k1_v0_8_0_fe_mul(&t, x, &l); /* t = x*(1/x) */ + rustsecp256k1_v0_8_0_fe_add(&t, &fe_minus_one); /* t = x*(1/x)-1 */ + CHECK(rustsecp256k1_v0_8_0_fe_normalizes_to_zero(&t)); /* x*(1/x)-1 == 0 */ r = *x; /* r = x */ - rustsecp256k1_v0_7_0_fe_add(&r, &fe_minus_one); /* r = x-1 */ - if (rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(&r)) return; - (var ? rustsecp256k1_v0_7_0_fe_inv_var : rustsecp256k1_v0_7_0_fe_inv)(&r, &r); /* r = 1/(x-1) */ - rustsecp256k1_v0_7_0_fe_add(&l, &fe_minus_one); /* l = 1/x-1 */ - (var ? rustsecp256k1_v0_7_0_fe_inv_var : rustsecp256k1_v0_7_0_fe_inv)(&l, &l); /* l = 1/(1/x-1) */ - rustsecp256k1_v0_7_0_fe_add(&l, &rustsecp256k1_v0_7_0_fe_one); /* l = 1/(1/x-1)+1 */ - rustsecp256k1_v0_7_0_fe_add(&l, &r); /* l = 1/(1/x-1)+1 + 1/(x-1) */ - CHECK(rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(&l)); /* l == 0 */ + rustsecp256k1_v0_8_0_fe_add(&r, &fe_minus_one); /* r = x-1 */ + if (rustsecp256k1_v0_8_0_fe_normalizes_to_zero_var(&r)) return; + (var ? rustsecp256k1_v0_8_0_fe_inv_var : rustsecp256k1_v0_8_0_fe_inv)(&r, &r); /* r = 1/(x-1) */ + rustsecp256k1_v0_8_0_fe_add(&l, &fe_minus_one); /* l = 1/x-1 */ + (var ? rustsecp256k1_v0_8_0_fe_inv_var : rustsecp256k1_v0_8_0_fe_inv)(&l, &l); /* l = 1/(1/x-1) */ + rustsecp256k1_v0_8_0_fe_add(&l, &rustsecp256k1_v0_8_0_fe_one); /* l = 1/(1/x-1)+1 */ + rustsecp256k1_v0_8_0_fe_add(&l, &r); /* l = 1/(1/x-1)+1 + 1/(x-1) */ + CHECK(rustsecp256k1_v0_8_0_fe_normalizes_to_zero_var(&l)); /* l == 0 */ } void run_inverse_tests(void) { /* Fixed test cases for field inverses: pairs of (x, 1/x) mod p. */ - static const rustsecp256k1_v0_7_0_fe fe_cases[][2] = { + static const rustsecp256k1_v0_8_0_fe fe_cases[][2] = { /* 0 */ {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0)}, @@ -2851,7 +3319,7 @@ void run_inverse_tests(void) SECP256K1_FE_CONST(0x9a94b9b5, 0x57eb71ee, 0x4c975b8b, 0xac5262a8, 0x077b0595, 0xe12a6b1f, 0xd728edef, 0x1a6bf956)} }; /* Fixed test cases for scalar inverses: pairs of (x, 1/x) mod n. */ - static const rustsecp256k1_v0_7_0_scalar scalar_cases[][2] = { + static const rustsecp256k1_v0_8_0_scalar scalar_cases[][2] = { /* 0 */ {SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0)}, @@ -2938,8 +3406,8 @@ void run_inverse_tests(void) }; int i, var, testrand; unsigned char b32[32]; - rustsecp256k1_v0_7_0_fe x_fe; - rustsecp256k1_v0_7_0_scalar x_scalar; + rustsecp256k1_v0_8_0_fe x_fe; + rustsecp256k1_v0_8_0_scalar x_scalar; memset(b32, 0, sizeof(b32)); /* Test fixed test cases through test_inverse_{scalar,field}, both ways. */ for (i = 0; (size_t)i < sizeof(fe_cases)/sizeof(fe_cases[0]); ++i) { @@ -2953,23 +3421,23 @@ void run_inverse_tests(void) for (i = 0; (size_t)i < sizeof(scalar_cases)/sizeof(scalar_cases[0]); ++i) { for (var = 0; var <= 1; ++var) { test_inverse_scalar(&x_scalar, &scalar_cases[i][0], var); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&x_scalar, &scalar_cases[i][1])); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&x_scalar, &scalar_cases[i][1])); test_inverse_scalar(&x_scalar, &scalar_cases[i][1], var); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&x_scalar, &scalar_cases[i][0])); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&x_scalar, &scalar_cases[i][0])); } } /* Test inputs 0..999 and their respective negations. */ for (i = 0; i < 1000; ++i) { b32[31] = i & 0xff; b32[30] = (i >> 8) & 0xff; - rustsecp256k1_v0_7_0_scalar_set_b32(&x_scalar, b32, NULL); - rustsecp256k1_v0_7_0_fe_set_b32(&x_fe, b32); + rustsecp256k1_v0_8_0_scalar_set_b32(&x_scalar, b32, NULL); + rustsecp256k1_v0_8_0_fe_set_b32(&x_fe, b32); for (var = 0; var <= 1; ++var) { test_inverse_scalar(NULL, &x_scalar, var); test_inverse_field(NULL, &x_fe, var); } - rustsecp256k1_v0_7_0_scalar_negate(&x_scalar, &x_scalar); - rustsecp256k1_v0_7_0_fe_negate(&x_fe, &x_fe, 1); + rustsecp256k1_v0_8_0_scalar_negate(&x_scalar, &x_scalar); + rustsecp256k1_v0_8_0_fe_negate(&x_fe, &x_fe, 1); for (var = 0; var <= 1; ++var) { test_inverse_scalar(NULL, &x_scalar, var); test_inverse_field(NULL, &x_fe, var); @@ -2978,9 +3446,9 @@ void run_inverse_tests(void) /* test 128*count random inputs; half with testrand256_test, half with testrand256 */ for (testrand = 0; testrand <= 1; ++testrand) { for (i = 0; i < 64 * count; ++i) { - (testrand ? rustsecp256k1_v0_7_0_testrand256_test : rustsecp256k1_v0_7_0_testrand256)(b32); - rustsecp256k1_v0_7_0_scalar_set_b32(&x_scalar, b32, NULL); - rustsecp256k1_v0_7_0_fe_set_b32(&x_fe, b32); + (testrand ? rustsecp256k1_v0_8_0_testrand256_test : rustsecp256k1_v0_8_0_testrand256)(b32); + rustsecp256k1_v0_8_0_scalar_set_b32(&x_scalar, b32, NULL); + rustsecp256k1_v0_8_0_fe_set_b32(&x_fe, b32); for (var = 0; var <= 1; ++var) { test_inverse_scalar(NULL, &x_scalar, var); test_inverse_field(NULL, &x_fe, var); @@ -2991,52 +3459,52 @@ void run_inverse_tests(void) /***** GROUP TESTS *****/ -void ge_equals_ge(const rustsecp256k1_v0_7_0_ge *a, const rustsecp256k1_v0_7_0_ge *b) { +void ge_equals_ge(const rustsecp256k1_v0_8_0_ge *a, const rustsecp256k1_v0_8_0_ge *b) { CHECK(a->infinity == b->infinity); if (a->infinity) { return; } - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&a->x, &b->x)); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&a->y, &b->y)); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&a->x, &b->x)); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&a->y, &b->y)); } /* This compares jacobian points including their Z, not just their geometric meaning. */ -int gej_xyz_equals_gej(const rustsecp256k1_v0_7_0_gej *a, const rustsecp256k1_v0_7_0_gej *b) { - rustsecp256k1_v0_7_0_gej a2; - rustsecp256k1_v0_7_0_gej b2; +int gej_xyz_equals_gej(const rustsecp256k1_v0_8_0_gej *a, const rustsecp256k1_v0_8_0_gej *b) { + rustsecp256k1_v0_8_0_gej a2; + rustsecp256k1_v0_8_0_gej b2; int ret = 1; ret &= a->infinity == b->infinity; if (ret && !a->infinity) { a2 = *a; b2 = *b; - rustsecp256k1_v0_7_0_fe_normalize(&a2.x); - rustsecp256k1_v0_7_0_fe_normalize(&a2.y); - rustsecp256k1_v0_7_0_fe_normalize(&a2.z); - rustsecp256k1_v0_7_0_fe_normalize(&b2.x); - rustsecp256k1_v0_7_0_fe_normalize(&b2.y); - rustsecp256k1_v0_7_0_fe_normalize(&b2.z); - ret &= rustsecp256k1_v0_7_0_fe_cmp_var(&a2.x, &b2.x) == 0; - ret &= rustsecp256k1_v0_7_0_fe_cmp_var(&a2.y, &b2.y) == 0; - ret &= rustsecp256k1_v0_7_0_fe_cmp_var(&a2.z, &b2.z) == 0; + rustsecp256k1_v0_8_0_fe_normalize(&a2.x); + rustsecp256k1_v0_8_0_fe_normalize(&a2.y); + rustsecp256k1_v0_8_0_fe_normalize(&a2.z); + rustsecp256k1_v0_8_0_fe_normalize(&b2.x); + rustsecp256k1_v0_8_0_fe_normalize(&b2.y); + rustsecp256k1_v0_8_0_fe_normalize(&b2.z); + ret &= rustsecp256k1_v0_8_0_fe_cmp_var(&a2.x, &b2.x) == 0; + ret &= rustsecp256k1_v0_8_0_fe_cmp_var(&a2.y, &b2.y) == 0; + ret &= rustsecp256k1_v0_8_0_fe_cmp_var(&a2.z, &b2.z) == 0; } return ret; } -void ge_equals_gej(const rustsecp256k1_v0_7_0_ge *a, const rustsecp256k1_v0_7_0_gej *b) { - rustsecp256k1_v0_7_0_fe z2s; - rustsecp256k1_v0_7_0_fe u1, u2, s1, s2; +void ge_equals_gej(const rustsecp256k1_v0_8_0_ge *a, const rustsecp256k1_v0_8_0_gej *b) { + rustsecp256k1_v0_8_0_fe z2s; + rustsecp256k1_v0_8_0_fe u1, u2, s1, s2; CHECK(a->infinity == b->infinity); if (a->infinity) { return; } /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */ - rustsecp256k1_v0_7_0_fe_sqr(&z2s, &b->z); - rustsecp256k1_v0_7_0_fe_mul(&u1, &a->x, &z2s); - u2 = b->x; rustsecp256k1_v0_7_0_fe_normalize_weak(&u2); - rustsecp256k1_v0_7_0_fe_mul(&s1, &a->y, &z2s); rustsecp256k1_v0_7_0_fe_mul(&s1, &s1, &b->z); - s2 = b->y; rustsecp256k1_v0_7_0_fe_normalize_weak(&s2); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&u1, &u2)); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&s1, &s2)); + rustsecp256k1_v0_8_0_fe_sqr(&z2s, &b->z); + rustsecp256k1_v0_8_0_fe_mul(&u1, &a->x, &z2s); + u2 = b->x; rustsecp256k1_v0_8_0_fe_normalize_weak(&u2); + rustsecp256k1_v0_8_0_fe_mul(&s1, &a->y, &z2s); rustsecp256k1_v0_8_0_fe_mul(&s1, &s1, &b->z); + s2 = b->y; rustsecp256k1_v0_8_0_fe_normalize_weak(&s2); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&u1, &u2)); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&s1, &s2)); } void test_ge(void) { @@ -3048,31 +3516,31 @@ void test_ge(void) { * negation, and then those two again but with randomized Z coordinate. * - The same is then done for lambda*p1 and lambda^2*p1. */ - rustsecp256k1_v0_7_0_ge *ge = (rustsecp256k1_v0_7_0_ge *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_7_0_ge) * (1 + 4 * runs)); - rustsecp256k1_v0_7_0_gej *gej = (rustsecp256k1_v0_7_0_gej *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_7_0_gej) * (1 + 4 * runs)); - rustsecp256k1_v0_7_0_fe zf; - rustsecp256k1_v0_7_0_fe zfi2, zfi3; - - rustsecp256k1_v0_7_0_gej_set_infinity(&gej[0]); - rustsecp256k1_v0_7_0_ge_clear(&ge[0]); - rustsecp256k1_v0_7_0_ge_set_gej_var(&ge[0], &gej[0]); + rustsecp256k1_v0_8_0_ge *ge = (rustsecp256k1_v0_8_0_ge *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_8_0_ge) * (1 + 4 * runs)); + rustsecp256k1_v0_8_0_gej *gej = (rustsecp256k1_v0_8_0_gej *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_8_0_gej) * (1 + 4 * runs)); + rustsecp256k1_v0_8_0_fe zf; + rustsecp256k1_v0_8_0_fe zfi2, zfi3; + + rustsecp256k1_v0_8_0_gej_set_infinity(&gej[0]); + rustsecp256k1_v0_8_0_ge_clear(&ge[0]); + rustsecp256k1_v0_8_0_ge_set_gej_var(&ge[0], &gej[0]); for (i = 0; i < runs; i++) { int j; - rustsecp256k1_v0_7_0_ge g; + rustsecp256k1_v0_8_0_ge g; random_group_element_test(&g); if (i >= runs - 2) { - rustsecp256k1_v0_7_0_ge_mul_lambda(&g, &ge[1]); + rustsecp256k1_v0_8_0_ge_mul_lambda(&g, &ge[1]); } if (i >= runs - 1) { - rustsecp256k1_v0_7_0_ge_mul_lambda(&g, &g); + rustsecp256k1_v0_8_0_ge_mul_lambda(&g, &g); } ge[1 + 4 * i] = g; ge[2 + 4 * i] = g; - rustsecp256k1_v0_7_0_ge_neg(&ge[3 + 4 * i], &g); - rustsecp256k1_v0_7_0_ge_neg(&ge[4 + 4 * i], &g); - rustsecp256k1_v0_7_0_gej_set_ge(&gej[1 + 4 * i], &ge[1 + 4 * i]); + rustsecp256k1_v0_8_0_ge_neg(&ge[3 + 4 * i], &g); + rustsecp256k1_v0_8_0_ge_neg(&ge[4 + 4 * i], &g); + rustsecp256k1_v0_8_0_gej_set_ge(&gej[1 + 4 * i], &ge[1 + 4 * i]); random_group_element_jacobian_test(&gej[2 + 4 * i], &ge[2 + 4 * i]); - rustsecp256k1_v0_7_0_gej_set_ge(&gej[3 + 4 * i], &ge[3 + 4 * i]); + rustsecp256k1_v0_8_0_gej_set_ge(&gej[3 + 4 * i], &ge[3 + 4 * i]); random_group_element_jacobian_test(&gej[4 + 4 * i], &ge[4 + 4 * i]); for (j = 0; j < 4; j++) { random_field_element_magnitude(&ge[1 + j + 4 * i].x); @@ -3086,84 +3554,84 @@ void test_ge(void) { /* Generate random zf, and zfi2 = 1/zf^2, zfi3 = 1/zf^3 */ do { random_field_element_test(&zf); - } while(rustsecp256k1_v0_7_0_fe_is_zero(&zf)); + } while(rustsecp256k1_v0_8_0_fe_is_zero(&zf)); random_field_element_magnitude(&zf); - rustsecp256k1_v0_7_0_fe_inv_var(&zfi3, &zf); - rustsecp256k1_v0_7_0_fe_sqr(&zfi2, &zfi3); - rustsecp256k1_v0_7_0_fe_mul(&zfi3, &zfi3, &zfi2); + rustsecp256k1_v0_8_0_fe_inv_var(&zfi3, &zf); + rustsecp256k1_v0_8_0_fe_sqr(&zfi2, &zfi3); + rustsecp256k1_v0_8_0_fe_mul(&zfi3, &zfi3, &zfi2); for (i1 = 0; i1 < 1 + 4 * runs; i1++) { int i2; for (i2 = 0; i2 < 1 + 4 * runs; i2++) { /* Compute reference result using gej + gej (var). */ - rustsecp256k1_v0_7_0_gej refj, resj; - rustsecp256k1_v0_7_0_ge ref; - rustsecp256k1_v0_7_0_fe zr; - rustsecp256k1_v0_7_0_gej_add_var(&refj, &gej[i1], &gej[i2], rustsecp256k1_v0_7_0_gej_is_infinity(&gej[i1]) ? NULL : &zr); + rustsecp256k1_v0_8_0_gej refj, resj; + rustsecp256k1_v0_8_0_ge ref; + rustsecp256k1_v0_8_0_fe zr; + rustsecp256k1_v0_8_0_gej_add_var(&refj, &gej[i1], &gej[i2], rustsecp256k1_v0_8_0_gej_is_infinity(&gej[i1]) ? NULL : &zr); /* Check Z ratio. */ - if (!rustsecp256k1_v0_7_0_gej_is_infinity(&gej[i1]) && !rustsecp256k1_v0_7_0_gej_is_infinity(&refj)) { - rustsecp256k1_v0_7_0_fe zrz; rustsecp256k1_v0_7_0_fe_mul(&zrz, &zr, &gej[i1].z); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&zrz, &refj.z)); + if (!rustsecp256k1_v0_8_0_gej_is_infinity(&gej[i1]) && !rustsecp256k1_v0_8_0_gej_is_infinity(&refj)) { + rustsecp256k1_v0_8_0_fe zrz; rustsecp256k1_v0_8_0_fe_mul(&zrz, &zr, &gej[i1].z); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&zrz, &refj.z)); } - rustsecp256k1_v0_7_0_ge_set_gej_var(&ref, &refj); + rustsecp256k1_v0_8_0_ge_set_gej_var(&ref, &refj); /* Test gej + ge with Z ratio result (var). */ - rustsecp256k1_v0_7_0_gej_add_ge_var(&resj, &gej[i1], &ge[i2], rustsecp256k1_v0_7_0_gej_is_infinity(&gej[i1]) ? NULL : &zr); + rustsecp256k1_v0_8_0_gej_add_ge_var(&resj, &gej[i1], &ge[i2], rustsecp256k1_v0_8_0_gej_is_infinity(&gej[i1]) ? NULL : &zr); ge_equals_gej(&ref, &resj); - if (!rustsecp256k1_v0_7_0_gej_is_infinity(&gej[i1]) && !rustsecp256k1_v0_7_0_gej_is_infinity(&resj)) { - rustsecp256k1_v0_7_0_fe zrz; rustsecp256k1_v0_7_0_fe_mul(&zrz, &zr, &gej[i1].z); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&zrz, &resj.z)); + if (!rustsecp256k1_v0_8_0_gej_is_infinity(&gej[i1]) && !rustsecp256k1_v0_8_0_gej_is_infinity(&resj)) { + rustsecp256k1_v0_8_0_fe zrz; rustsecp256k1_v0_8_0_fe_mul(&zrz, &zr, &gej[i1].z); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&zrz, &resj.z)); } /* Test gej + ge (var, with additional Z factor). */ { - rustsecp256k1_v0_7_0_ge ge2_zfi = ge[i2]; /* the second term with x and y rescaled for z = 1/zf */ - rustsecp256k1_v0_7_0_fe_mul(&ge2_zfi.x, &ge2_zfi.x, &zfi2); - rustsecp256k1_v0_7_0_fe_mul(&ge2_zfi.y, &ge2_zfi.y, &zfi3); + rustsecp256k1_v0_8_0_ge ge2_zfi = ge[i2]; /* the second term with x and y rescaled for z = 1/zf */ + rustsecp256k1_v0_8_0_fe_mul(&ge2_zfi.x, &ge2_zfi.x, &zfi2); + rustsecp256k1_v0_8_0_fe_mul(&ge2_zfi.y, &ge2_zfi.y, &zfi3); random_field_element_magnitude(&ge2_zfi.x); random_field_element_magnitude(&ge2_zfi.y); - rustsecp256k1_v0_7_0_gej_add_zinv_var(&resj, &gej[i1], &ge2_zfi, &zf); + rustsecp256k1_v0_8_0_gej_add_zinv_var(&resj, &gej[i1], &ge2_zfi, &zf); ge_equals_gej(&ref, &resj); } /* Test gej + ge (const). */ if (i2 != 0) { - /* rustsecp256k1_v0_7_0_gej_add_ge does not support its second argument being infinity. */ - rustsecp256k1_v0_7_0_gej_add_ge(&resj, &gej[i1], &ge[i2]); + /* rustsecp256k1_v0_8_0_gej_add_ge does not support its second argument being infinity. */ + rustsecp256k1_v0_8_0_gej_add_ge(&resj, &gej[i1], &ge[i2]); ge_equals_gej(&ref, &resj); } /* Test doubling (var). */ if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 == ((i2 + 3)%4)/2)) { - rustsecp256k1_v0_7_0_fe zr2; + rustsecp256k1_v0_8_0_fe zr2; /* Normal doubling with Z ratio result. */ - rustsecp256k1_v0_7_0_gej_double_var(&resj, &gej[i1], &zr2); + rustsecp256k1_v0_8_0_gej_double_var(&resj, &gej[i1], &zr2); ge_equals_gej(&ref, &resj); /* Check Z ratio. */ - rustsecp256k1_v0_7_0_fe_mul(&zr2, &zr2, &gej[i1].z); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&zr2, &resj.z)); + rustsecp256k1_v0_8_0_fe_mul(&zr2, &zr2, &gej[i1].z); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&zr2, &resj.z)); /* Normal doubling. */ - rustsecp256k1_v0_7_0_gej_double_var(&resj, &gej[i2], NULL); + rustsecp256k1_v0_8_0_gej_double_var(&resj, &gej[i2], NULL); ge_equals_gej(&ref, &resj); /* Constant-time doubling. */ - rustsecp256k1_v0_7_0_gej_double(&resj, &gej[i2]); + rustsecp256k1_v0_8_0_gej_double(&resj, &gej[i2]); ge_equals_gej(&ref, &resj); } /* Test adding opposites. */ if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 != ((i2 + 3)%4)/2)) { - CHECK(rustsecp256k1_v0_7_0_ge_is_infinity(&ref)); + CHECK(rustsecp256k1_v0_8_0_ge_is_infinity(&ref)); } /* Test adding infinity. */ if (i1 == 0) { - CHECK(rustsecp256k1_v0_7_0_ge_is_infinity(&ge[i1])); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&gej[i1])); + CHECK(rustsecp256k1_v0_8_0_ge_is_infinity(&ge[i1])); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&gej[i1])); ge_equals_gej(&ref, &gej[i2]); } if (i2 == 0) { - CHECK(rustsecp256k1_v0_7_0_ge_is_infinity(&ge[i2])); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&gej[i2])); + CHECK(rustsecp256k1_v0_8_0_ge_is_infinity(&ge[i2])); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&gej[i2])); ge_equals_gej(&ref, &gej[i1]); } } @@ -3171,34 +3639,34 @@ void test_ge(void) { /* Test adding all points together in random order equals infinity. */ { - rustsecp256k1_v0_7_0_gej sum = SECP256K1_GEJ_CONST_INFINITY; - rustsecp256k1_v0_7_0_gej *gej_shuffled = (rustsecp256k1_v0_7_0_gej *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(rustsecp256k1_v0_7_0_gej)); + rustsecp256k1_v0_8_0_gej sum = SECP256K1_GEJ_CONST_INFINITY; + rustsecp256k1_v0_8_0_gej *gej_shuffled = (rustsecp256k1_v0_8_0_gej *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(rustsecp256k1_v0_8_0_gej)); for (i = 0; i < 4 * runs + 1; i++) { gej_shuffled[i] = gej[i]; } for (i = 0; i < 4 * runs + 1; i++) { - int swap = i + rustsecp256k1_v0_7_0_testrand_int(4 * runs + 1 - i); + int swap = i + rustsecp256k1_v0_8_0_testrand_int(4 * runs + 1 - i); if (swap != i) { - rustsecp256k1_v0_7_0_gej t = gej_shuffled[i]; + rustsecp256k1_v0_8_0_gej t = gej_shuffled[i]; gej_shuffled[i] = gej_shuffled[swap]; gej_shuffled[swap] = t; } } for (i = 0; i < 4 * runs + 1; i++) { - rustsecp256k1_v0_7_0_gej_add_var(&sum, &sum, &gej_shuffled[i], NULL); + rustsecp256k1_v0_8_0_gej_add_var(&sum, &sum, &gej_shuffled[i], NULL); } - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&sum)); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&sum)); free(gej_shuffled); } /* Test batch gej -> ge conversion without known z ratios. */ { - rustsecp256k1_v0_7_0_ge *ge_set_all = (rustsecp256k1_v0_7_0_ge *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(rustsecp256k1_v0_7_0_ge)); - rustsecp256k1_v0_7_0_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1); + rustsecp256k1_v0_8_0_ge *ge_set_all = (rustsecp256k1_v0_8_0_ge *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(rustsecp256k1_v0_8_0_ge)); + rustsecp256k1_v0_8_0_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1); for (i = 0; i < 4 * runs + 1; i++) { - rustsecp256k1_v0_7_0_fe s; + rustsecp256k1_v0_8_0_fe s; random_fe_non_zero(&s); - rustsecp256k1_v0_7_0_gej_rescale(&gej[i], &s); + rustsecp256k1_v0_8_0_gej_rescale(&gej[i], &s); ge_equals_gej(&ge_set_all[i], &gej[i]); } free(ge_set_all); @@ -3208,16 +3676,16 @@ void test_ge(void) { for (i = 0; i < 4 * runs + 1; i++) { int odd; random_group_element_test(&ge[i]); - odd = rustsecp256k1_v0_7_0_fe_is_odd(&ge[i].x); + odd = rustsecp256k1_v0_8_0_fe_is_odd(&ge[i].x); CHECK(odd == 0 || odd == 1); /* randomly set half the points to infinity */ if (odd == i % 2) { - rustsecp256k1_v0_7_0_ge_set_infinity(&ge[i]); + rustsecp256k1_v0_8_0_ge_set_infinity(&ge[i]); } - rustsecp256k1_v0_7_0_gej_set_ge(&gej[i], &ge[i]); + rustsecp256k1_v0_8_0_gej_set_ge(&gej[i], &ge[i]); } /* batch convert */ - rustsecp256k1_v0_7_0_ge_set_all_gej_var(ge, gej, 4 * runs + 1); + rustsecp256k1_v0_8_0_ge_set_all_gej_var(ge, gej, 4 * runs + 1); /* check result */ for (i = 0; i < 4 * runs + 1; i++) { ge_equals_gej(&ge[i], &gej[i]); @@ -3225,13 +3693,13 @@ void test_ge(void) { /* Test batch gej -> ge conversion with all infinities. */ for (i = 0; i < 4 * runs + 1; i++) { - rustsecp256k1_v0_7_0_gej_set_infinity(&gej[i]); + rustsecp256k1_v0_8_0_gej_set_infinity(&gej[i]); } /* batch convert */ - rustsecp256k1_v0_7_0_ge_set_all_gej_var(ge, gej, 4 * runs + 1); + rustsecp256k1_v0_8_0_ge_set_all_gej_var(ge, gej, 4 * runs + 1); /* check result */ for (i = 0; i < 4 * runs + 1; i++) { - CHECK(rustsecp256k1_v0_7_0_ge_is_infinity(&ge[i])); + CHECK(rustsecp256k1_v0_8_0_ge_is_infinity(&ge[i])); } free(ge); @@ -3240,33 +3708,33 @@ void test_ge(void) { void test_intialized_inf(void) { - rustsecp256k1_v0_7_0_ge p; - rustsecp256k1_v0_7_0_gej pj, npj, infj1, infj2, infj3; - rustsecp256k1_v0_7_0_fe zinv; + rustsecp256k1_v0_8_0_ge p; + rustsecp256k1_v0_8_0_gej pj, npj, infj1, infj2, infj3; + rustsecp256k1_v0_8_0_fe zinv; /* Test that adding P+(-P) results in a fully initialized infinity*/ random_group_element_test(&p); - rustsecp256k1_v0_7_0_gej_set_ge(&pj, &p); - rustsecp256k1_v0_7_0_gej_neg(&npj, &pj); + rustsecp256k1_v0_8_0_gej_set_ge(&pj, &p); + rustsecp256k1_v0_8_0_gej_neg(&npj, &pj); - rustsecp256k1_v0_7_0_gej_add_var(&infj1, &pj, &npj, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&infj1)); - CHECK(rustsecp256k1_v0_7_0_fe_is_zero(&infj1.x)); - CHECK(rustsecp256k1_v0_7_0_fe_is_zero(&infj1.y)); - CHECK(rustsecp256k1_v0_7_0_fe_is_zero(&infj1.z)); + rustsecp256k1_v0_8_0_gej_add_var(&infj1, &pj, &npj, NULL); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&infj1)); + CHECK(rustsecp256k1_v0_8_0_fe_is_zero(&infj1.x)); + CHECK(rustsecp256k1_v0_8_0_fe_is_zero(&infj1.y)); + CHECK(rustsecp256k1_v0_8_0_fe_is_zero(&infj1.z)); - rustsecp256k1_v0_7_0_gej_add_ge_var(&infj2, &npj, &p, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&infj2)); - CHECK(rustsecp256k1_v0_7_0_fe_is_zero(&infj2.x)); - CHECK(rustsecp256k1_v0_7_0_fe_is_zero(&infj2.y)); - CHECK(rustsecp256k1_v0_7_0_fe_is_zero(&infj2.z)); + rustsecp256k1_v0_8_0_gej_add_ge_var(&infj2, &npj, &p, NULL); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&infj2)); + CHECK(rustsecp256k1_v0_8_0_fe_is_zero(&infj2.x)); + CHECK(rustsecp256k1_v0_8_0_fe_is_zero(&infj2.y)); + CHECK(rustsecp256k1_v0_8_0_fe_is_zero(&infj2.z)); - rustsecp256k1_v0_7_0_fe_set_int(&zinv, 1); - rustsecp256k1_v0_7_0_gej_add_zinv_var(&infj3, &npj, &p, &zinv); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&infj3)); - CHECK(rustsecp256k1_v0_7_0_fe_is_zero(&infj3.x)); - CHECK(rustsecp256k1_v0_7_0_fe_is_zero(&infj3.y)); - CHECK(rustsecp256k1_v0_7_0_fe_is_zero(&infj3.z)); + rustsecp256k1_v0_8_0_fe_set_int(&zinv, 1); + rustsecp256k1_v0_8_0_gej_add_zinv_var(&infj3, &npj, &p, &zinv); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&infj3)); + CHECK(rustsecp256k1_v0_8_0_fe_is_zero(&infj3.x)); + CHECK(rustsecp256k1_v0_8_0_fe_is_zero(&infj3.y)); + CHECK(rustsecp256k1_v0_8_0_fe_is_zero(&infj3.z)); } @@ -3302,39 +3770,39 @@ void test_add_neg_y_diff_x(void) { * print " Q: %x %x" % Q.xy() * print "P + Q: %x %x" % (P + Q).xy() */ - rustsecp256k1_v0_7_0_gej aj = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_8_0_gej aj = SECP256K1_GEJ_CONST( 0x8d24cd95, 0x0a355af1, 0x3c543505, 0x44238d30, 0x0643d79f, 0x05a59614, 0x2f8ec030, 0xd58977cb, 0x001e337a, 0x38093dcd, 0x6c0f386d, 0x0b1293a8, 0x4d72c879, 0xd7681924, 0x44e6d2f3, 0x9190117d ); - rustsecp256k1_v0_7_0_gej bj = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_8_0_gej bj = SECP256K1_GEJ_CONST( 0xc7b74206, 0x1f788cd9, 0xabd0937d, 0x164a0d86, 0x95f6ff75, 0xf19a4ce9, 0xd013bd7b, 0xbf92d2a7, 0xffe1cc85, 0xc7f6c232, 0x93f0c792, 0xf4ed6c57, 0xb28d3786, 0x2897e6db, 0xbb192d0b, 0x6e6feab2 ); - rustsecp256k1_v0_7_0_gej sumj = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_8_0_gej sumj = SECP256K1_GEJ_CONST( 0x671a63c0, 0x3efdad4c, 0x389a7798, 0x24356027, 0xb3d69010, 0x278625c3, 0x5c86d390, 0x184a8f7a, 0x5f6409c2, 0x2ce01f2b, 0x511fd375, 0x25071d08, 0xda651801, 0x70e95caf, 0x8f0d893c, 0xbed8fbbe ); - rustsecp256k1_v0_7_0_ge b; - rustsecp256k1_v0_7_0_gej resj; - rustsecp256k1_v0_7_0_ge res; - rustsecp256k1_v0_7_0_ge_set_gej(&b, &bj); + rustsecp256k1_v0_8_0_ge b; + rustsecp256k1_v0_8_0_gej resj; + rustsecp256k1_v0_8_0_ge res; + rustsecp256k1_v0_8_0_ge_set_gej(&b, &bj); - rustsecp256k1_v0_7_0_gej_add_var(&resj, &aj, &bj, NULL); - rustsecp256k1_v0_7_0_ge_set_gej(&res, &resj); + rustsecp256k1_v0_8_0_gej_add_var(&resj, &aj, &bj, NULL); + rustsecp256k1_v0_8_0_ge_set_gej(&res, &resj); ge_equals_gej(&res, &sumj); - rustsecp256k1_v0_7_0_gej_add_ge(&resj, &aj, &b); - rustsecp256k1_v0_7_0_ge_set_gej(&res, &resj); + rustsecp256k1_v0_8_0_gej_add_ge(&resj, &aj, &b); + rustsecp256k1_v0_8_0_ge_set_gej(&res, &resj); ge_equals_gej(&res, &sumj); - rustsecp256k1_v0_7_0_gej_add_ge_var(&resj, &aj, &b, NULL); - rustsecp256k1_v0_7_0_ge_set_gej(&res, &resj); + rustsecp256k1_v0_8_0_gej_add_ge_var(&resj, &aj, &b, NULL); + rustsecp256k1_v0_8_0_ge_set_gej(&res, &resj); ge_equals_gej(&res, &sumj); } @@ -3347,22 +3815,22 @@ void run_ge(void) { test_intialized_inf(); } -void test_gej_cmov(const rustsecp256k1_v0_7_0_gej *a, const rustsecp256k1_v0_7_0_gej *b) { - rustsecp256k1_v0_7_0_gej t = *a; - rustsecp256k1_v0_7_0_gej_cmov(&t, b, 0); +void test_gej_cmov(const rustsecp256k1_v0_8_0_gej *a, const rustsecp256k1_v0_8_0_gej *b) { + rustsecp256k1_v0_8_0_gej t = *a; + rustsecp256k1_v0_8_0_gej_cmov(&t, b, 0); CHECK(gej_xyz_equals_gej(&t, a)); - rustsecp256k1_v0_7_0_gej_cmov(&t, b, 1); + rustsecp256k1_v0_8_0_gej_cmov(&t, b, 1); CHECK(gej_xyz_equals_gej(&t, b)); } void run_gej(void) { int i; - rustsecp256k1_v0_7_0_gej a, b; + rustsecp256k1_v0_8_0_gej a, b; - /* Tests for rustsecp256k1_v0_7_0_gej_cmov */ + /* Tests for rustsecp256k1_v0_8_0_gej_cmov */ for (i = 0; i < count; i++) { - rustsecp256k1_v0_7_0_gej_set_infinity(&a); - rustsecp256k1_v0_7_0_gej_set_infinity(&b); + rustsecp256k1_v0_8_0_gej_set_infinity(&a); + rustsecp256k1_v0_8_0_gej_set_infinity(&b); test_gej_cmov(&a, &b); random_gej_test(&a); @@ -3376,30 +3844,46 @@ void run_gej(void) { test_gej_cmov(&a, &b); test_gej_cmov(&b, &a); } + + /* Tests for rustsecp256k1_v0_8_0_gej_eq_var */ + for (i = 0; i < count; i++) { + rustsecp256k1_v0_8_0_fe fe; + random_gej_test(&a); + random_gej_test(&b); + CHECK(!rustsecp256k1_v0_8_0_gej_eq_var(&a, &b)); + + b = a; + random_field_element_test(&fe); + if (rustsecp256k1_v0_8_0_fe_is_zero(&fe)) { + continue; + } + rustsecp256k1_v0_8_0_gej_rescale(&a, &fe); + CHECK(rustsecp256k1_v0_8_0_gej_eq_var(&a, &b)); + } } void test_ec_combine(void) { - rustsecp256k1_v0_7_0_scalar sum = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - rustsecp256k1_v0_7_0_pubkey data[6]; - const rustsecp256k1_v0_7_0_pubkey* d[6]; - rustsecp256k1_v0_7_0_pubkey sd; - rustsecp256k1_v0_7_0_pubkey sd2; - rustsecp256k1_v0_7_0_gej Qj; - rustsecp256k1_v0_7_0_ge Q; + rustsecp256k1_v0_8_0_scalar sum = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + rustsecp256k1_v0_8_0_pubkey data[6]; + const rustsecp256k1_v0_8_0_pubkey* d[6]; + rustsecp256k1_v0_8_0_pubkey sd; + rustsecp256k1_v0_8_0_pubkey sd2; + rustsecp256k1_v0_8_0_gej Qj; + rustsecp256k1_v0_8_0_ge Q; int i; for (i = 1; i <= 6; i++) { - rustsecp256k1_v0_7_0_scalar s; + rustsecp256k1_v0_8_0_scalar s; random_scalar_order_test(&s); - rustsecp256k1_v0_7_0_scalar_add(&sum, &sum, &s); - rustsecp256k1_v0_7_0_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &s); - rustsecp256k1_v0_7_0_ge_set_gej(&Q, &Qj); - rustsecp256k1_v0_7_0_pubkey_save(&data[i - 1], &Q); + rustsecp256k1_v0_8_0_scalar_add(&sum, &sum, &s); + rustsecp256k1_v0_8_0_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &s); + rustsecp256k1_v0_8_0_ge_set_gej(&Q, &Qj); + rustsecp256k1_v0_8_0_pubkey_save(&data[i - 1], &Q); d[i - 1] = &data[i - 1]; - rustsecp256k1_v0_7_0_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &sum); - rustsecp256k1_v0_7_0_ge_set_gej(&Q, &Qj); - rustsecp256k1_v0_7_0_pubkey_save(&sd, &Q); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_combine(ctx, &sd2, d, i) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&sd, &sd2, sizeof(sd)) == 0); + rustsecp256k1_v0_8_0_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &sum); + rustsecp256k1_v0_8_0_ge_set_gej(&Q, &Qj); + rustsecp256k1_v0_8_0_pubkey_save(&sd, &Q); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_combine(ctx, &sd2, d, i) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&sd, &sd2, sizeof(sd)) == 0); } } @@ -3410,45 +3894,45 @@ void run_ec_combine(void) { } } -void test_group_decompress(const rustsecp256k1_v0_7_0_fe* x) { +void test_group_decompress(const rustsecp256k1_v0_8_0_fe* x) { /* The input itself, normalized. */ - rustsecp256k1_v0_7_0_fe fex = *x; + rustsecp256k1_v0_8_0_fe fex = *x; /* Results of set_xo_var(..., 0), set_xo_var(..., 1). */ - rustsecp256k1_v0_7_0_ge ge_even, ge_odd; + rustsecp256k1_v0_8_0_ge ge_even, ge_odd; /* Return values of the above calls. */ int res_even, res_odd; - rustsecp256k1_v0_7_0_fe_normalize_var(&fex); + rustsecp256k1_v0_8_0_fe_normalize_var(&fex); - res_even = rustsecp256k1_v0_7_0_ge_set_xo_var(&ge_even, &fex, 0); - res_odd = rustsecp256k1_v0_7_0_ge_set_xo_var(&ge_odd, &fex, 1); + res_even = rustsecp256k1_v0_8_0_ge_set_xo_var(&ge_even, &fex, 0); + res_odd = rustsecp256k1_v0_8_0_ge_set_xo_var(&ge_odd, &fex, 1); CHECK(res_even == res_odd); if (res_even) { - rustsecp256k1_v0_7_0_fe_normalize_var(&ge_odd.x); - rustsecp256k1_v0_7_0_fe_normalize_var(&ge_even.x); - rustsecp256k1_v0_7_0_fe_normalize_var(&ge_odd.y); - rustsecp256k1_v0_7_0_fe_normalize_var(&ge_even.y); + rustsecp256k1_v0_8_0_fe_normalize_var(&ge_odd.x); + rustsecp256k1_v0_8_0_fe_normalize_var(&ge_even.x); + rustsecp256k1_v0_8_0_fe_normalize_var(&ge_odd.y); + rustsecp256k1_v0_8_0_fe_normalize_var(&ge_even.y); /* No infinity allowed. */ CHECK(!ge_even.infinity); CHECK(!ge_odd.infinity); /* Check that the x coordinates check out. */ - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&ge_even.x, x)); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&ge_odd.x, x)); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&ge_even.x, x)); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&ge_odd.x, x)); /* Check odd/even Y in ge_odd, ge_even. */ - CHECK(rustsecp256k1_v0_7_0_fe_is_odd(&ge_odd.y)); - CHECK(!rustsecp256k1_v0_7_0_fe_is_odd(&ge_even.y)); + CHECK(rustsecp256k1_v0_8_0_fe_is_odd(&ge_odd.y)); + CHECK(!rustsecp256k1_v0_8_0_fe_is_odd(&ge_even.y)); } } void run_group_decompress(void) { int i; for (i = 0; i < count * 4; i++) { - rustsecp256k1_v0_7_0_fe fe; + rustsecp256k1_v0_8_0_fe fe; random_fe_test(&fe); test_group_decompress(&fe); } @@ -3456,7 +3940,7 @@ void run_group_decompress(void) { /***** ECMULT TESTS *****/ -void test_pre_g_table(const rustsecp256k1_v0_7_0_ge_storage * pre_g, size_t n) { +void test_pre_g_table(const rustsecp256k1_v0_8_0_ge_storage * pre_g, size_t n) { /* Tests the pre_g / pre_g_128 tables for consistency. * For independent verification we take a "geometric" approach to verification. * We check that every entry is on-curve. @@ -3467,175 +3951,170 @@ void test_pre_g_table(const rustsecp256k1_v0_7_0_ge_storage * pre_g, size_t n) { * * Checking the table's generators are correct is done in run_ecmult_pre_g. */ - rustsecp256k1_v0_7_0_gej g2; - rustsecp256k1_v0_7_0_ge p, q, gg; - rustsecp256k1_v0_7_0_fe dpx, dpy, dqx, dqy; + rustsecp256k1_v0_8_0_gej g2; + rustsecp256k1_v0_8_0_ge p, q, gg; + rustsecp256k1_v0_8_0_fe dpx, dpy, dqx, dqy; size_t i; CHECK(0 < n); - rustsecp256k1_v0_7_0_ge_from_storage(&p, &pre_g[0]); - CHECK(rustsecp256k1_v0_7_0_ge_is_valid_var(&p)); + rustsecp256k1_v0_8_0_ge_from_storage(&p, &pre_g[0]); + CHECK(rustsecp256k1_v0_8_0_ge_is_valid_var(&p)); - rustsecp256k1_v0_7_0_gej_set_ge(&g2, &p); - rustsecp256k1_v0_7_0_gej_double_var(&g2, &g2, NULL); - rustsecp256k1_v0_7_0_ge_set_gej_var(&gg, &g2); + rustsecp256k1_v0_8_0_gej_set_ge(&g2, &p); + rustsecp256k1_v0_8_0_gej_double_var(&g2, &g2, NULL); + rustsecp256k1_v0_8_0_ge_set_gej_var(&gg, &g2); for (i = 1; i < n; ++i) { - rustsecp256k1_v0_7_0_fe_negate(&dpx, &p.x, 1); rustsecp256k1_v0_7_0_fe_add(&dpx, &gg.x); rustsecp256k1_v0_7_0_fe_normalize_weak(&dpx); - rustsecp256k1_v0_7_0_fe_negate(&dpy, &p.y, 1); rustsecp256k1_v0_7_0_fe_add(&dpy, &gg.y); rustsecp256k1_v0_7_0_fe_normalize_weak(&dpy); + rustsecp256k1_v0_8_0_fe_negate(&dpx, &p.x, 1); rustsecp256k1_v0_8_0_fe_add(&dpx, &gg.x); rustsecp256k1_v0_8_0_fe_normalize_weak(&dpx); + rustsecp256k1_v0_8_0_fe_negate(&dpy, &p.y, 1); rustsecp256k1_v0_8_0_fe_add(&dpy, &gg.y); rustsecp256k1_v0_8_0_fe_normalize_weak(&dpy); /* Check that p is not equal to gg */ - CHECK(!rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(&dpx) || !rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(&dpy)); + CHECK(!rustsecp256k1_v0_8_0_fe_normalizes_to_zero_var(&dpx) || !rustsecp256k1_v0_8_0_fe_normalizes_to_zero_var(&dpy)); - rustsecp256k1_v0_7_0_ge_from_storage(&q, &pre_g[i]); - CHECK(rustsecp256k1_v0_7_0_ge_is_valid_var(&q)); + rustsecp256k1_v0_8_0_ge_from_storage(&q, &pre_g[i]); + CHECK(rustsecp256k1_v0_8_0_ge_is_valid_var(&q)); - rustsecp256k1_v0_7_0_fe_negate(&dqx, &q.x, 1); rustsecp256k1_v0_7_0_fe_add(&dqx, &gg.x); rustsecp256k1_v0_7_0_fe_normalize_weak(&dqx); - dqy = q.y; rustsecp256k1_v0_7_0_fe_add(&dqy, &gg.y); rustsecp256k1_v0_7_0_fe_normalize_weak(&dqy); + rustsecp256k1_v0_8_0_fe_negate(&dqx, &q.x, 1); rustsecp256k1_v0_8_0_fe_add(&dqx, &gg.x); rustsecp256k1_v0_8_0_fe_normalize_weak(&dqx); + dqy = q.y; rustsecp256k1_v0_8_0_fe_add(&dqy, &gg.y); rustsecp256k1_v0_8_0_fe_normalize_weak(&dqy); /* Check that -q is not equal to gg */ - CHECK(!rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(&dqx) || !rustsecp256k1_v0_7_0_fe_normalizes_to_zero_var(&dqy)); + CHECK(!rustsecp256k1_v0_8_0_fe_normalizes_to_zero_var(&dqx) || !rustsecp256k1_v0_8_0_fe_normalizes_to_zero_var(&dqy)); /* Check that -q is not equal to p */ - CHECK(!rustsecp256k1_v0_7_0_fe_equal_var(&dpx, &dqx) || !rustsecp256k1_v0_7_0_fe_equal_var(&dpy, &dqy)); + CHECK(!rustsecp256k1_v0_8_0_fe_equal_var(&dpx, &dqx) || !rustsecp256k1_v0_8_0_fe_equal_var(&dpy, &dqy)); /* Check that p, -q and gg are colinear */ - rustsecp256k1_v0_7_0_fe_mul(&dpx, &dpx, &dqy); - rustsecp256k1_v0_7_0_fe_mul(&dpy, &dpy, &dqx); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&dpx, &dpy)); + rustsecp256k1_v0_8_0_fe_mul(&dpx, &dpx, &dqy); + rustsecp256k1_v0_8_0_fe_mul(&dpy, &dpy, &dqx); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&dpx, &dpy)); p = q; } } void run_ecmult_pre_g(void) { - rustsecp256k1_v0_7_0_ge_storage gs; - rustsecp256k1_v0_7_0_gej gj; - rustsecp256k1_v0_7_0_ge g; + rustsecp256k1_v0_8_0_ge_storage gs; + rustsecp256k1_v0_8_0_gej gj; + rustsecp256k1_v0_8_0_ge g; size_t i; /* Check that the pre_g and pre_g_128 tables are consistent. */ - test_pre_g_table(rustsecp256k1_v0_7_0_pre_g, ECMULT_TABLE_SIZE(WINDOW_G)); - test_pre_g_table(rustsecp256k1_v0_7_0_pre_g_128, ECMULT_TABLE_SIZE(WINDOW_G)); + test_pre_g_table(rustsecp256k1_v0_8_0_pre_g, ECMULT_TABLE_SIZE(WINDOW_G)); + test_pre_g_table(rustsecp256k1_v0_8_0_pre_g_128, ECMULT_TABLE_SIZE(WINDOW_G)); /* Check the first entry from the pre_g table. */ - rustsecp256k1_v0_7_0_ge_to_storage(&gs, &rustsecp256k1_v0_7_0_ge_const_g); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&gs, &rustsecp256k1_v0_7_0_pre_g[0], sizeof(gs)) == 0); + rustsecp256k1_v0_8_0_ge_to_storage(&gs, &rustsecp256k1_v0_8_0_ge_const_g); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&gs, &rustsecp256k1_v0_8_0_pre_g[0], sizeof(gs)) == 0); /* Check the first entry from the pre_g_128 table. */ - rustsecp256k1_v0_7_0_gej_set_ge(&gj, &rustsecp256k1_v0_7_0_ge_const_g); + rustsecp256k1_v0_8_0_gej_set_ge(&gj, &rustsecp256k1_v0_8_0_ge_const_g); for (i = 0; i < 128; ++i) { - rustsecp256k1_v0_7_0_gej_double_var(&gj, &gj, NULL); + rustsecp256k1_v0_8_0_gej_double_var(&gj, &gj, NULL); } - rustsecp256k1_v0_7_0_ge_set_gej(&g, &gj); - rustsecp256k1_v0_7_0_ge_to_storage(&gs, &g); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&gs, &rustsecp256k1_v0_7_0_pre_g_128[0], sizeof(gs)) == 0); + rustsecp256k1_v0_8_0_ge_set_gej(&g, &gj); + rustsecp256k1_v0_8_0_ge_to_storage(&gs, &g); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&gs, &rustsecp256k1_v0_8_0_pre_g_128[0], sizeof(gs)) == 0); } void run_ecmult_chain(void) { /* random starting point A (on the curve) */ - rustsecp256k1_v0_7_0_gej a = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_8_0_gej a = SECP256K1_GEJ_CONST( 0x8b30bbe9, 0xae2a9906, 0x96b22f67, 0x0709dff3, 0x727fd8bc, 0x04d3362c, 0x6c7bf458, 0xe2846004, 0xa357ae91, 0x5c4a6528, 0x1309edf2, 0x0504740f, 0x0eb33439, 0x90216b4f, 0x81063cb6, 0x5f2f7e0f ); /* two random initial factors xn and gn */ - rustsecp256k1_v0_7_0_scalar xn = SECP256K1_SCALAR_CONST( + rustsecp256k1_v0_8_0_scalar xn = SECP256K1_SCALAR_CONST( 0x84cc5452, 0xf7fde1ed, 0xb4d38a8c, 0xe9b1b84c, 0xcef31f14, 0x6e569be9, 0x705d357a, 0x42985407 ); - rustsecp256k1_v0_7_0_scalar gn = SECP256K1_SCALAR_CONST( + rustsecp256k1_v0_8_0_scalar gn = SECP256K1_SCALAR_CONST( 0xa1e58d22, 0x553dcd42, 0xb2398062, 0x5d4c57a9, 0x6e9323d4, 0x2b3152e5, 0xca2c3990, 0xedc7c9de ); /* two small multipliers to be applied to xn and gn in every iteration: */ - static const rustsecp256k1_v0_7_0_scalar xf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x1337); - static const rustsecp256k1_v0_7_0_scalar gf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x7113); + static const rustsecp256k1_v0_8_0_scalar xf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x1337); + static const rustsecp256k1_v0_8_0_scalar gf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x7113); /* accumulators with the resulting coefficients to A and G */ - rustsecp256k1_v0_7_0_scalar ae = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); - rustsecp256k1_v0_7_0_scalar ge = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + rustsecp256k1_v0_8_0_scalar ae = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); + rustsecp256k1_v0_8_0_scalar ge = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); /* actual points */ - rustsecp256k1_v0_7_0_gej x; - rustsecp256k1_v0_7_0_gej x2; + rustsecp256k1_v0_8_0_gej x; + rustsecp256k1_v0_8_0_gej x2; int i; /* the point being computed */ x = a; for (i = 0; i < 200*count; i++) { /* in each iteration, compute X = xn*X + gn*G; */ - rustsecp256k1_v0_7_0_ecmult(&x, &x, &xn, &gn); + rustsecp256k1_v0_8_0_ecmult(&x, &x, &xn, &gn); /* also compute ae and ge: the actual accumulated factors for A and G */ /* if X was (ae*A+ge*G), xn*X + gn*G results in (xn*ae*A + (xn*ge+gn)*G) */ - rustsecp256k1_v0_7_0_scalar_mul(&ae, &ae, &xn); - rustsecp256k1_v0_7_0_scalar_mul(&ge, &ge, &xn); - rustsecp256k1_v0_7_0_scalar_add(&ge, &ge, &gn); + rustsecp256k1_v0_8_0_scalar_mul(&ae, &ae, &xn); + rustsecp256k1_v0_8_0_scalar_mul(&ge, &ge, &xn); + rustsecp256k1_v0_8_0_scalar_add(&ge, &ge, &gn); /* modify xn and gn */ - rustsecp256k1_v0_7_0_scalar_mul(&xn, &xn, &xf); - rustsecp256k1_v0_7_0_scalar_mul(&gn, &gn, &gf); + rustsecp256k1_v0_8_0_scalar_mul(&xn, &xn, &xf); + rustsecp256k1_v0_8_0_scalar_mul(&gn, &gn, &gf); /* verify */ if (i == 19999) { /* expected result after 19999 iterations */ - rustsecp256k1_v0_7_0_gej rp = SECP256K1_GEJ_CONST( + rustsecp256k1_v0_8_0_gej rp = SECP256K1_GEJ_CONST( 0xD6E96687, 0xF9B10D09, 0x2A6F3543, 0x9D86CEBE, 0xA4535D0D, 0x409F5358, 0x6440BD74, 0xB933E830, 0xB95CBCA2, 0xC77DA786, 0x539BE8FD, 0x53354D2D, 0x3B4F566A, 0xE6580454, 0x07ED6015, 0xEE1B2A88 ); - - rustsecp256k1_v0_7_0_gej_neg(&rp, &rp); - rustsecp256k1_v0_7_0_gej_add_var(&rp, &rp, &x, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&rp)); + CHECK(rustsecp256k1_v0_8_0_gej_eq_var(&rp, &x)); } } /* redo the computation, but directly with the resulting ae and ge coefficients: */ - rustsecp256k1_v0_7_0_ecmult(&x2, &a, &ae, &ge); - rustsecp256k1_v0_7_0_gej_neg(&x2, &x2); - rustsecp256k1_v0_7_0_gej_add_var(&x2, &x2, &x, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&x2)); + rustsecp256k1_v0_8_0_ecmult(&x2, &a, &ae, &ge); + CHECK(rustsecp256k1_v0_8_0_gej_eq_var(&x, &x2)); } -void test_point_times_order(const rustsecp256k1_v0_7_0_gej *point) { +void test_point_times_order(const rustsecp256k1_v0_8_0_gej *point) { /* X * (point + G) + (order-X) * (pointer + G) = 0 */ - rustsecp256k1_v0_7_0_scalar x; - rustsecp256k1_v0_7_0_scalar nx; - rustsecp256k1_v0_7_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - rustsecp256k1_v0_7_0_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); - rustsecp256k1_v0_7_0_gej res1, res2; - rustsecp256k1_v0_7_0_ge res3; + rustsecp256k1_v0_8_0_scalar x; + rustsecp256k1_v0_8_0_scalar nx; + rustsecp256k1_v0_8_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + rustsecp256k1_v0_8_0_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); + rustsecp256k1_v0_8_0_gej res1, res2; + rustsecp256k1_v0_8_0_ge res3; unsigned char pub[65]; size_t psize = 65; random_scalar_order_test(&x); - rustsecp256k1_v0_7_0_scalar_negate(&nx, &x); - rustsecp256k1_v0_7_0_ecmult(&res1, point, &x, &x); /* calc res1 = x * point + x * G; */ - rustsecp256k1_v0_7_0_ecmult(&res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */ - rustsecp256k1_v0_7_0_gej_add_var(&res1, &res1, &res2, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&res1)); - rustsecp256k1_v0_7_0_ge_set_gej(&res3, &res1); - CHECK(rustsecp256k1_v0_7_0_ge_is_infinity(&res3)); - CHECK(rustsecp256k1_v0_7_0_ge_is_valid_var(&res3) == 0); - CHECK(rustsecp256k1_v0_7_0_eckey_pubkey_serialize(&res3, pub, &psize, 0) == 0); + rustsecp256k1_v0_8_0_scalar_negate(&nx, &x); + rustsecp256k1_v0_8_0_ecmult(&res1, point, &x, &x); /* calc res1 = x * point + x * G; */ + rustsecp256k1_v0_8_0_ecmult(&res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */ + rustsecp256k1_v0_8_0_gej_add_var(&res1, &res1, &res2, NULL); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&res1)); + rustsecp256k1_v0_8_0_ge_set_gej(&res3, &res1); + CHECK(rustsecp256k1_v0_8_0_ge_is_infinity(&res3)); + CHECK(rustsecp256k1_v0_8_0_ge_is_valid_var(&res3) == 0); + CHECK(rustsecp256k1_v0_8_0_eckey_pubkey_serialize(&res3, pub, &psize, 0) == 0); psize = 65; - CHECK(rustsecp256k1_v0_7_0_eckey_pubkey_serialize(&res3, pub, &psize, 1) == 0); + CHECK(rustsecp256k1_v0_8_0_eckey_pubkey_serialize(&res3, pub, &psize, 1) == 0); /* check zero/one edge cases */ - rustsecp256k1_v0_7_0_ecmult(&res1, point, &zero, &zero); - rustsecp256k1_v0_7_0_ge_set_gej(&res3, &res1); - CHECK(rustsecp256k1_v0_7_0_ge_is_infinity(&res3)); - rustsecp256k1_v0_7_0_ecmult(&res1, point, &one, &zero); - rustsecp256k1_v0_7_0_ge_set_gej(&res3, &res1); + rustsecp256k1_v0_8_0_ecmult(&res1, point, &zero, &zero); + rustsecp256k1_v0_8_0_ge_set_gej(&res3, &res1); + CHECK(rustsecp256k1_v0_8_0_ge_is_infinity(&res3)); + rustsecp256k1_v0_8_0_ecmult(&res1, point, &one, &zero); + rustsecp256k1_v0_8_0_ge_set_gej(&res3, &res1); ge_equals_gej(&res3, point); - rustsecp256k1_v0_7_0_ecmult(&res1, point, &zero, &one); - rustsecp256k1_v0_7_0_ge_set_gej(&res3, &res1); - ge_equals_ge(&res3, &rustsecp256k1_v0_7_0_ge_const_g); + rustsecp256k1_v0_8_0_ecmult(&res1, point, &zero, &one); + rustsecp256k1_v0_8_0_ge_set_gej(&res3, &res1); + ge_equals_ge(&res3, &rustsecp256k1_v0_8_0_ge_const_g); } -/* These scalars reach large (in absolute value) outputs when fed to rustsecp256k1_v0_7_0_scalar_split_lambda. +/* These scalars reach large (in absolute value) outputs when fed to rustsecp256k1_v0_8_0_scalar_split_lambda. * * They are computed as: * - For a in [-2, -1, 0, 1, 2]: * - For b in [-3, -1, 1, 3]: * - Output (a*LAMBDA + (ORDER+b)/2) % ORDER */ -static const rustsecp256k1_v0_7_0_scalar scalars_near_split_bounds[20] = { +static const rustsecp256k1_v0_8_0_scalar scalars_near_split_bounds[20] = { SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fc), SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fd), SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fe), @@ -3658,43 +4137,43 @@ static const rustsecp256k1_v0_7_0_scalar scalars_near_split_bounds[20] = { SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a45) }; -void test_ecmult_target(const rustsecp256k1_v0_7_0_scalar* target, int mode) { +void test_ecmult_target(const rustsecp256k1_v0_8_0_scalar* target, int mode) { /* Mode: 0=ecmult_gen, 1=ecmult, 2=ecmult_const */ - rustsecp256k1_v0_7_0_scalar n1, n2; - rustsecp256k1_v0_7_0_ge p; - rustsecp256k1_v0_7_0_gej pj, p1j, p2j, ptj; - static const rustsecp256k1_v0_7_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + rustsecp256k1_v0_8_0_scalar n1, n2; + rustsecp256k1_v0_8_0_ge p; + rustsecp256k1_v0_8_0_gej pj, p1j, p2j, ptj; + static const rustsecp256k1_v0_8_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); /* Generate random n1,n2 such that n1+n2 = -target. */ random_scalar_order_test(&n1); - rustsecp256k1_v0_7_0_scalar_add(&n2, &n1, target); - rustsecp256k1_v0_7_0_scalar_negate(&n2, &n2); + rustsecp256k1_v0_8_0_scalar_add(&n2, &n1, target); + rustsecp256k1_v0_8_0_scalar_negate(&n2, &n2); /* Generate a random input point. */ if (mode != 0) { random_group_element_test(&p); - rustsecp256k1_v0_7_0_gej_set_ge(&pj, &p); + rustsecp256k1_v0_8_0_gej_set_ge(&pj, &p); } /* EC multiplications */ if (mode == 0) { - rustsecp256k1_v0_7_0_ecmult_gen(&ctx->ecmult_gen_ctx, &p1j, &n1); - rustsecp256k1_v0_7_0_ecmult_gen(&ctx->ecmult_gen_ctx, &p2j, &n2); - rustsecp256k1_v0_7_0_ecmult_gen(&ctx->ecmult_gen_ctx, &ptj, target); + rustsecp256k1_v0_8_0_ecmult_gen(&ctx->ecmult_gen_ctx, &p1j, &n1); + rustsecp256k1_v0_8_0_ecmult_gen(&ctx->ecmult_gen_ctx, &p2j, &n2); + rustsecp256k1_v0_8_0_ecmult_gen(&ctx->ecmult_gen_ctx, &ptj, target); } else if (mode == 1) { - rustsecp256k1_v0_7_0_ecmult(&p1j, &pj, &n1, &zero); - rustsecp256k1_v0_7_0_ecmult(&p2j, &pj, &n2, &zero); - rustsecp256k1_v0_7_0_ecmult(&ptj, &pj, target, &zero); + rustsecp256k1_v0_8_0_ecmult(&p1j, &pj, &n1, &zero); + rustsecp256k1_v0_8_0_ecmult(&p2j, &pj, &n2, &zero); + rustsecp256k1_v0_8_0_ecmult(&ptj, &pj, target, &zero); } else { - rustsecp256k1_v0_7_0_ecmult_const(&p1j, &p, &n1, 256); - rustsecp256k1_v0_7_0_ecmult_const(&p2j, &p, &n2, 256); - rustsecp256k1_v0_7_0_ecmult_const(&ptj, &p, target, 256); + rustsecp256k1_v0_8_0_ecmult_const(&p1j, &p, &n1, 256); + rustsecp256k1_v0_8_0_ecmult_const(&p2j, &p, &n2, 256); + rustsecp256k1_v0_8_0_ecmult_const(&ptj, &p, target, 256); } /* Add them all up: n1*P + n2*P + target*P = (n1+n2+target)*P = (n1+n1-n1-n2)*P = 0. */ - rustsecp256k1_v0_7_0_gej_add_var(&ptj, &ptj, &p1j, NULL); - rustsecp256k1_v0_7_0_gej_add_var(&ptj, &ptj, &p2j, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&ptj)); + rustsecp256k1_v0_8_0_gej_add_var(&ptj, &ptj, &p1j, NULL); + rustsecp256k1_v0_8_0_gej_add_var(&ptj, &ptj, &p2j, NULL); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&ptj)); } void run_ecmult_near_split_bound(void) { @@ -3711,118 +4190,118 @@ void run_ecmult_near_split_bound(void) { void run_point_times_order(void) { int i; - rustsecp256k1_v0_7_0_fe x = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2); - static const rustsecp256k1_v0_7_0_fe xr = SECP256K1_FE_CONST( + rustsecp256k1_v0_8_0_fe x = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2); + static const rustsecp256k1_v0_8_0_fe xr = SECP256K1_FE_CONST( 0x7603CB59, 0xB0EF6C63, 0xFE608479, 0x2A0C378C, 0xDB3233A8, 0x0F8A9A09, 0xA877DEAD, 0x31B38C45 ); for (i = 0; i < 500; i++) { - rustsecp256k1_v0_7_0_ge p; - if (rustsecp256k1_v0_7_0_ge_set_xo_var(&p, &x, 1)) { - rustsecp256k1_v0_7_0_gej j; - CHECK(rustsecp256k1_v0_7_0_ge_is_valid_var(&p)); - rustsecp256k1_v0_7_0_gej_set_ge(&j, &p); + rustsecp256k1_v0_8_0_ge p; + if (rustsecp256k1_v0_8_0_ge_set_xo_var(&p, &x, 1)) { + rustsecp256k1_v0_8_0_gej j; + CHECK(rustsecp256k1_v0_8_0_ge_is_valid_var(&p)); + rustsecp256k1_v0_8_0_gej_set_ge(&j, &p); test_point_times_order(&j); } - rustsecp256k1_v0_7_0_fe_sqr(&x, &x); + rustsecp256k1_v0_8_0_fe_sqr(&x, &x); } - rustsecp256k1_v0_7_0_fe_normalize_var(&x); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&x, &xr)); + rustsecp256k1_v0_8_0_fe_normalize_var(&x); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&x, &xr)); } void ecmult_const_random_mult(void) { /* random starting point A (on the curve) */ - rustsecp256k1_v0_7_0_ge a = SECP256K1_GE_CONST( + rustsecp256k1_v0_8_0_ge a = SECP256K1_GE_CONST( 0x6d986544, 0x57ff52b8, 0xcf1b8126, 0x5b802a5b, 0xa97f9263, 0xb1e88044, 0x93351325, 0x91bc450a, 0x535c59f7, 0x325e5d2b, 0xc391fbe8, 0x3c12787c, 0x337e4a98, 0xe82a9011, 0x0123ba37, 0xdd769c7d ); /* random initial factor xn */ - rustsecp256k1_v0_7_0_scalar xn = SECP256K1_SCALAR_CONST( + rustsecp256k1_v0_8_0_scalar xn = SECP256K1_SCALAR_CONST( 0x649d4f77, 0xc4242df7, 0x7f2079c9, 0x14530327, 0xa31b876a, 0xd2d8ce2a, 0x2236d5c6, 0xd7b2029b ); /* expected xn * A (from sage) */ - rustsecp256k1_v0_7_0_ge expected_b = SECP256K1_GE_CONST( + rustsecp256k1_v0_8_0_ge expected_b = SECP256K1_GE_CONST( 0x23773684, 0x4d209dc7, 0x098a786f, 0x20d06fcd, 0x070a38bf, 0xc11ac651, 0x03004319, 0x1e2a8786, 0xed8c3b8e, 0xc06dd57b, 0xd06ea66e, 0x45492b0f, 0xb84e4e1b, 0xfb77e21f, 0x96baae2a, 0x63dec956 ); - rustsecp256k1_v0_7_0_gej b; - rustsecp256k1_v0_7_0_ecmult_const(&b, &a, &xn, 256); + rustsecp256k1_v0_8_0_gej b; + rustsecp256k1_v0_8_0_ecmult_const(&b, &a, &xn, 256); - CHECK(rustsecp256k1_v0_7_0_ge_is_valid_var(&a)); + CHECK(rustsecp256k1_v0_8_0_ge_is_valid_var(&a)); ge_equals_gej(&expected_b, &b); } void ecmult_const_commutativity(void) { - rustsecp256k1_v0_7_0_scalar a; - rustsecp256k1_v0_7_0_scalar b; - rustsecp256k1_v0_7_0_gej res1; - rustsecp256k1_v0_7_0_gej res2; - rustsecp256k1_v0_7_0_ge mid1; - rustsecp256k1_v0_7_0_ge mid2; + rustsecp256k1_v0_8_0_scalar a; + rustsecp256k1_v0_8_0_scalar b; + rustsecp256k1_v0_8_0_gej res1; + rustsecp256k1_v0_8_0_gej res2; + rustsecp256k1_v0_8_0_ge mid1; + rustsecp256k1_v0_8_0_ge mid2; random_scalar_order_test(&a); random_scalar_order_test(&b); - rustsecp256k1_v0_7_0_ecmult_const(&res1, &rustsecp256k1_v0_7_0_ge_const_g, &a, 256); - rustsecp256k1_v0_7_0_ecmult_const(&res2, &rustsecp256k1_v0_7_0_ge_const_g, &b, 256); - rustsecp256k1_v0_7_0_ge_set_gej(&mid1, &res1); - rustsecp256k1_v0_7_0_ge_set_gej(&mid2, &res2); - rustsecp256k1_v0_7_0_ecmult_const(&res1, &mid1, &b, 256); - rustsecp256k1_v0_7_0_ecmult_const(&res2, &mid2, &a, 256); - rustsecp256k1_v0_7_0_ge_set_gej(&mid1, &res1); - rustsecp256k1_v0_7_0_ge_set_gej(&mid2, &res2); + rustsecp256k1_v0_8_0_ecmult_const(&res1, &rustsecp256k1_v0_8_0_ge_const_g, &a, 256); + rustsecp256k1_v0_8_0_ecmult_const(&res2, &rustsecp256k1_v0_8_0_ge_const_g, &b, 256); + rustsecp256k1_v0_8_0_ge_set_gej(&mid1, &res1); + rustsecp256k1_v0_8_0_ge_set_gej(&mid2, &res2); + rustsecp256k1_v0_8_0_ecmult_const(&res1, &mid1, &b, 256); + rustsecp256k1_v0_8_0_ecmult_const(&res2, &mid2, &a, 256); + rustsecp256k1_v0_8_0_ge_set_gej(&mid1, &res1); + rustsecp256k1_v0_8_0_ge_set_gej(&mid2, &res2); ge_equals_ge(&mid1, &mid2); } void ecmult_const_mult_zero_one(void) { - rustsecp256k1_v0_7_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - rustsecp256k1_v0_7_0_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); - rustsecp256k1_v0_7_0_scalar negone; - rustsecp256k1_v0_7_0_gej res1; - rustsecp256k1_v0_7_0_ge res2; - rustsecp256k1_v0_7_0_ge point; - rustsecp256k1_v0_7_0_scalar_negate(&negone, &one); + rustsecp256k1_v0_8_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + rustsecp256k1_v0_8_0_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); + rustsecp256k1_v0_8_0_scalar negone; + rustsecp256k1_v0_8_0_gej res1; + rustsecp256k1_v0_8_0_ge res2; + rustsecp256k1_v0_8_0_ge point; + rustsecp256k1_v0_8_0_scalar_negate(&negone, &one); random_group_element_test(&point); - rustsecp256k1_v0_7_0_ecmult_const(&res1, &point, &zero, 3); - rustsecp256k1_v0_7_0_ge_set_gej(&res2, &res1); - CHECK(rustsecp256k1_v0_7_0_ge_is_infinity(&res2)); - rustsecp256k1_v0_7_0_ecmult_const(&res1, &point, &one, 2); - rustsecp256k1_v0_7_0_ge_set_gej(&res2, &res1); + rustsecp256k1_v0_8_0_ecmult_const(&res1, &point, &zero, 3); + rustsecp256k1_v0_8_0_ge_set_gej(&res2, &res1); + CHECK(rustsecp256k1_v0_8_0_ge_is_infinity(&res2)); + rustsecp256k1_v0_8_0_ecmult_const(&res1, &point, &one, 2); + rustsecp256k1_v0_8_0_ge_set_gej(&res2, &res1); ge_equals_ge(&res2, &point); - rustsecp256k1_v0_7_0_ecmult_const(&res1, &point, &negone, 256); - rustsecp256k1_v0_7_0_gej_neg(&res1, &res1); - rustsecp256k1_v0_7_0_ge_set_gej(&res2, &res1); + rustsecp256k1_v0_8_0_ecmult_const(&res1, &point, &negone, 256); + rustsecp256k1_v0_8_0_gej_neg(&res1, &res1); + rustsecp256k1_v0_8_0_ge_set_gej(&res2, &res1); ge_equals_ge(&res2, &point); } void ecmult_const_chain_multiply(void) { /* Check known result (randomly generated test problem from sage) */ - const rustsecp256k1_v0_7_0_scalar scalar = SECP256K1_SCALAR_CONST( + const rustsecp256k1_v0_8_0_scalar scalar = SECP256K1_SCALAR_CONST( 0x4968d524, 0x2abf9b7a, 0x466abbcf, 0x34b11b6d, 0xcd83d307, 0x827bed62, 0x05fad0ce, 0x18fae63b ); - const rustsecp256k1_v0_7_0_gej expected_point = SECP256K1_GEJ_CONST( + const rustsecp256k1_v0_8_0_gej expected_point = SECP256K1_GEJ_CONST( 0x5494c15d, 0x32099706, 0xc2395f94, 0x348745fd, 0x757ce30e, 0x4e8c90fb, 0xa2bad184, 0xf883c69f, 0x5d195d20, 0xe191bf7f, 0x1be3e55f, 0x56a80196, 0x6071ad01, 0xf1462f66, 0xc997fa94, 0xdb858435 ); - rustsecp256k1_v0_7_0_gej point; - rustsecp256k1_v0_7_0_ge res; + rustsecp256k1_v0_8_0_gej point; + rustsecp256k1_v0_8_0_ge res; int i; - rustsecp256k1_v0_7_0_gej_set_ge(&point, &rustsecp256k1_v0_7_0_ge_const_g); + rustsecp256k1_v0_8_0_gej_set_ge(&point, &rustsecp256k1_v0_8_0_ge_const_g); for (i = 0; i < 100; ++i) { - rustsecp256k1_v0_7_0_ge tmp; - rustsecp256k1_v0_7_0_ge_set_gej(&tmp, &point); - rustsecp256k1_v0_7_0_ecmult_const(&point, &tmp, &scalar, 256); + rustsecp256k1_v0_8_0_ge tmp; + rustsecp256k1_v0_8_0_ge_set_gej(&tmp, &point); + rustsecp256k1_v0_8_0_ecmult_const(&point, &tmp, &scalar, 256); } - rustsecp256k1_v0_7_0_ge_set_gej(&res, &point); + rustsecp256k1_v0_8_0_ge_set_gej(&res, &point); ge_equals_gej(&res, &expected_point); } @@ -3834,18 +4313,18 @@ void run_ecmult_const_tests(void) { } typedef struct { - rustsecp256k1_v0_7_0_scalar *sc; - rustsecp256k1_v0_7_0_ge *pt; + rustsecp256k1_v0_8_0_scalar *sc; + rustsecp256k1_v0_8_0_ge *pt; } ecmult_multi_data; -static int ecmult_multi_callback(rustsecp256k1_v0_7_0_scalar *sc, rustsecp256k1_v0_7_0_ge *pt, size_t idx, void *cbdata) { +static int ecmult_multi_callback(rustsecp256k1_v0_8_0_scalar *sc, rustsecp256k1_v0_8_0_ge *pt, size_t idx, void *cbdata) { ecmult_multi_data *data = (ecmult_multi_data*) cbdata; *sc = data->sc[idx]; *pt = data->pt[idx]; return 1; } -static int ecmult_multi_false_callback(rustsecp256k1_v0_7_0_scalar *sc, rustsecp256k1_v0_7_0_ge *pt, size_t idx, void *cbdata) { +static int ecmult_multi_false_callback(rustsecp256k1_v0_8_0_scalar *sc, rustsecp256k1_v0_8_0_ge *pt, size_t idx, void *cbdata) { (void)sc; (void)pt; (void)idx; @@ -3853,102 +4332,94 @@ static int ecmult_multi_false_callback(rustsecp256k1_v0_7_0_scalar *sc, rustsecp return 0; } -void test_ecmult_multi(rustsecp256k1_v0_7_0_scratch *scratch, rustsecp256k1_v0_7_0_ecmult_multi_func ecmult_multi) { +void test_ecmult_multi(rustsecp256k1_v0_8_0_scratch *scratch, rustsecp256k1_v0_8_0_ecmult_multi_func ecmult_multi) { int ncount; - rustsecp256k1_v0_7_0_scalar szero; - rustsecp256k1_v0_7_0_scalar sc[32]; - rustsecp256k1_v0_7_0_ge pt[32]; - rustsecp256k1_v0_7_0_gej r; - rustsecp256k1_v0_7_0_gej r2; + rustsecp256k1_v0_8_0_scalar szero; + rustsecp256k1_v0_8_0_scalar sc[32]; + rustsecp256k1_v0_8_0_ge pt[32]; + rustsecp256k1_v0_8_0_gej r; + rustsecp256k1_v0_8_0_gej r2; ecmult_multi_data data; data.sc = sc; data.pt = pt; - rustsecp256k1_v0_7_0_scalar_set_int(&szero, 0); + rustsecp256k1_v0_8_0_scalar_set_int(&szero, 0); /* No points to multiply */ CHECK(ecmult_multi(&ctx->error_callback, scratch, &r, NULL, ecmult_multi_callback, &data, 0)); /* Check 1- and 2-point multiplies against ecmult */ for (ncount = 0; ncount < count; ncount++) { - rustsecp256k1_v0_7_0_ge ptg; - rustsecp256k1_v0_7_0_gej ptgj; + rustsecp256k1_v0_8_0_ge ptg; + rustsecp256k1_v0_8_0_gej ptgj; random_scalar_order(&sc[0]); random_scalar_order(&sc[1]); random_group_element_test(&ptg); - rustsecp256k1_v0_7_0_gej_set_ge(&ptgj, &ptg); + rustsecp256k1_v0_8_0_gej_set_ge(&ptgj, &ptg); pt[0] = ptg; - pt[1] = rustsecp256k1_v0_7_0_ge_const_g; + pt[1] = rustsecp256k1_v0_8_0_ge_const_g; /* only G scalar */ - rustsecp256k1_v0_7_0_ecmult(&r2, &ptgj, &szero, &sc[0]); + rustsecp256k1_v0_8_0_ecmult(&r2, &ptgj, &szero, &sc[0]); CHECK(ecmult_multi(&ctx->error_callback, scratch, &r, &sc[0], ecmult_multi_callback, &data, 0)); - rustsecp256k1_v0_7_0_gej_neg(&r2, &r2); - rustsecp256k1_v0_7_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_8_0_gej_eq_var(&r, &r2)); /* 1-point */ - rustsecp256k1_v0_7_0_ecmult(&r2, &ptgj, &sc[0], &szero); + rustsecp256k1_v0_8_0_ecmult(&r2, &ptgj, &sc[0], &szero); CHECK(ecmult_multi(&ctx->error_callback, scratch, &r, &szero, ecmult_multi_callback, &data, 1)); - rustsecp256k1_v0_7_0_gej_neg(&r2, &r2); - rustsecp256k1_v0_7_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_8_0_gej_eq_var(&r, &r2)); /* Try to multiply 1 point, but callback returns false */ CHECK(!ecmult_multi(&ctx->error_callback, scratch, &r, &szero, ecmult_multi_false_callback, &data, 1)); /* 2-point */ - rustsecp256k1_v0_7_0_ecmult(&r2, &ptgj, &sc[0], &sc[1]); + rustsecp256k1_v0_8_0_ecmult(&r2, &ptgj, &sc[0], &sc[1]); CHECK(ecmult_multi(&ctx->error_callback, scratch, &r, &szero, ecmult_multi_callback, &data, 2)); - rustsecp256k1_v0_7_0_gej_neg(&r2, &r2); - rustsecp256k1_v0_7_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_8_0_gej_eq_var(&r, &r2)); /* 2-point with G scalar */ - rustsecp256k1_v0_7_0_ecmult(&r2, &ptgj, &sc[0], &sc[1]); + rustsecp256k1_v0_8_0_ecmult(&r2, &ptgj, &sc[0], &sc[1]); CHECK(ecmult_multi(&ctx->error_callback, scratch, &r, &sc[1], ecmult_multi_callback, &data, 1)); - rustsecp256k1_v0_7_0_gej_neg(&r2, &r2); - rustsecp256k1_v0_7_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_8_0_gej_eq_var(&r, &r2)); } /* Check infinite outputs of various forms */ for (ncount = 0; ncount < count; ncount++) { - rustsecp256k1_v0_7_0_ge ptg; + rustsecp256k1_v0_8_0_ge ptg; size_t i, j; size_t sizes[] = { 2, 10, 32 }; for (j = 0; j < 3; j++) { for (i = 0; i < 32; i++) { random_scalar_order(&sc[i]); - rustsecp256k1_v0_7_0_ge_set_infinity(&pt[i]); + rustsecp256k1_v0_8_0_ge_set_infinity(&pt[i]); } CHECK(ecmult_multi(&ctx->error_callback, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&r)); } for (j = 0; j < 3; j++) { for (i = 0; i < 32; i++) { random_group_element_test(&ptg); pt[i] = ptg; - rustsecp256k1_v0_7_0_scalar_set_int(&sc[i], 0); + rustsecp256k1_v0_8_0_scalar_set_int(&sc[i], 0); } CHECK(ecmult_multi(&ctx->error_callback, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&r)); } for (j = 0; j < 3; j++) { random_group_element_test(&ptg); for (i = 0; i < 16; i++) { random_scalar_order(&sc[2*i]); - rustsecp256k1_v0_7_0_scalar_negate(&sc[2*i + 1], &sc[2*i]); + rustsecp256k1_v0_8_0_scalar_negate(&sc[2*i + 1], &sc[2*i]); pt[2 * i] = ptg; pt[2 * i + 1] = ptg; } CHECK(ecmult_multi(&ctx->error_callback, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&r)); random_scalar_order(&sc[0]); for (i = 0; i < 16; i++) { @@ -3957,70 +4428,66 @@ void test_ecmult_multi(rustsecp256k1_v0_7_0_scratch *scratch, rustsecp256k1_v0_7 sc[2*i] = sc[0]; sc[2*i+1] = sc[0]; pt[2 * i] = ptg; - rustsecp256k1_v0_7_0_ge_neg(&pt[2*i+1], &pt[2*i]); + rustsecp256k1_v0_8_0_ge_neg(&pt[2*i+1], &pt[2*i]); } CHECK(ecmult_multi(&ctx->error_callback, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&r)); } random_group_element_test(&ptg); - rustsecp256k1_v0_7_0_scalar_set_int(&sc[0], 0); + rustsecp256k1_v0_8_0_scalar_set_int(&sc[0], 0); pt[0] = ptg; for (i = 1; i < 32; i++) { pt[i] = ptg; random_scalar_order(&sc[i]); - rustsecp256k1_v0_7_0_scalar_add(&sc[0], &sc[0], &sc[i]); - rustsecp256k1_v0_7_0_scalar_negate(&sc[i], &sc[i]); + rustsecp256k1_v0_8_0_scalar_add(&sc[0], &sc[0], &sc[i]); + rustsecp256k1_v0_8_0_scalar_negate(&sc[i], &sc[i]); } CHECK(ecmult_multi(&ctx->error_callback, scratch, &r, &szero, ecmult_multi_callback, &data, 32)); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&r)); } /* Check random points, constant scalar */ for (ncount = 0; ncount < count; ncount++) { size_t i; - rustsecp256k1_v0_7_0_gej_set_infinity(&r); + rustsecp256k1_v0_8_0_gej_set_infinity(&r); random_scalar_order(&sc[0]); for (i = 0; i < 20; i++) { - rustsecp256k1_v0_7_0_ge ptg; + rustsecp256k1_v0_8_0_ge ptg; sc[i] = sc[0]; random_group_element_test(&ptg); pt[i] = ptg; - rustsecp256k1_v0_7_0_gej_add_ge_var(&r, &r, &pt[i], NULL); + rustsecp256k1_v0_8_0_gej_add_ge_var(&r, &r, &pt[i], NULL); } - rustsecp256k1_v0_7_0_ecmult(&r2, &r, &sc[0], &szero); + rustsecp256k1_v0_8_0_ecmult(&r2, &r, &sc[0], &szero); CHECK(ecmult_multi(&ctx->error_callback, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); - rustsecp256k1_v0_7_0_gej_neg(&r2, &r2); - rustsecp256k1_v0_7_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_8_0_gej_eq_var(&r, &r2)); } /* Check random scalars, constant point */ for (ncount = 0; ncount < count; ncount++) { size_t i; - rustsecp256k1_v0_7_0_ge ptg; - rustsecp256k1_v0_7_0_gej p0j; - rustsecp256k1_v0_7_0_scalar rs; - rustsecp256k1_v0_7_0_scalar_set_int(&rs, 0); + rustsecp256k1_v0_8_0_ge ptg; + rustsecp256k1_v0_8_0_gej p0j; + rustsecp256k1_v0_8_0_scalar rs; + rustsecp256k1_v0_8_0_scalar_set_int(&rs, 0); random_group_element_test(&ptg); for (i = 0; i < 20; i++) { random_scalar_order(&sc[i]); pt[i] = ptg; - rustsecp256k1_v0_7_0_scalar_add(&rs, &rs, &sc[i]); + rustsecp256k1_v0_8_0_scalar_add(&rs, &rs, &sc[i]); } - rustsecp256k1_v0_7_0_gej_set_ge(&p0j, &pt[0]); - rustsecp256k1_v0_7_0_ecmult(&r2, &p0j, &rs, &szero); + rustsecp256k1_v0_8_0_gej_set_ge(&p0j, &pt[0]); + rustsecp256k1_v0_8_0_ecmult(&r2, &p0j, &rs, &szero); CHECK(ecmult_multi(&ctx->error_callback, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); - rustsecp256k1_v0_7_0_gej_neg(&r2, &r2); - rustsecp256k1_v0_7_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_8_0_gej_eq_var(&r, &r2)); } /* Sanity check that zero scalars don't cause problems */ @@ -4029,62 +4496,60 @@ void test_ecmult_multi(rustsecp256k1_v0_7_0_scratch *scratch, rustsecp256k1_v0_7 random_group_element_test(&pt[ncount]); } - rustsecp256k1_v0_7_0_scalar_clear(&sc[0]); + rustsecp256k1_v0_8_0_scalar_clear(&sc[0]); CHECK(ecmult_multi(&ctx->error_callback, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); - rustsecp256k1_v0_7_0_scalar_clear(&sc[1]); - rustsecp256k1_v0_7_0_scalar_clear(&sc[2]); - rustsecp256k1_v0_7_0_scalar_clear(&sc[3]); - rustsecp256k1_v0_7_0_scalar_clear(&sc[4]); + rustsecp256k1_v0_8_0_scalar_clear(&sc[1]); + rustsecp256k1_v0_8_0_scalar_clear(&sc[2]); + rustsecp256k1_v0_8_0_scalar_clear(&sc[3]); + rustsecp256k1_v0_8_0_scalar_clear(&sc[4]); CHECK(ecmult_multi(&ctx->error_callback, scratch, &r, &szero, ecmult_multi_callback, &data, 6)); CHECK(ecmult_multi(&ctx->error_callback, scratch, &r, &szero, ecmult_multi_callback, &data, 5)); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&r)); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&r)); /* Run through s0*(t0*P) + s1*(t1*P) exhaustively for many small values of s0, s1, t0, t1 */ { const size_t TOP = 8; size_t s0i, s1i; size_t t0i, t1i; - rustsecp256k1_v0_7_0_ge ptg; - rustsecp256k1_v0_7_0_gej ptgj; + rustsecp256k1_v0_8_0_ge ptg; + rustsecp256k1_v0_8_0_gej ptgj; random_group_element_test(&ptg); - rustsecp256k1_v0_7_0_gej_set_ge(&ptgj, &ptg); + rustsecp256k1_v0_8_0_gej_set_ge(&ptgj, &ptg); for(t0i = 0; t0i < TOP; t0i++) { for(t1i = 0; t1i < TOP; t1i++) { - rustsecp256k1_v0_7_0_gej t0p, t1p; - rustsecp256k1_v0_7_0_scalar t0, t1; + rustsecp256k1_v0_8_0_gej t0p, t1p; + rustsecp256k1_v0_8_0_scalar t0, t1; - rustsecp256k1_v0_7_0_scalar_set_int(&t0, (t0i + 1) / 2); - rustsecp256k1_v0_7_0_scalar_cond_negate(&t0, t0i & 1); - rustsecp256k1_v0_7_0_scalar_set_int(&t1, (t1i + 1) / 2); - rustsecp256k1_v0_7_0_scalar_cond_negate(&t1, t1i & 1); + rustsecp256k1_v0_8_0_scalar_set_int(&t0, (t0i + 1) / 2); + rustsecp256k1_v0_8_0_scalar_cond_negate(&t0, t0i & 1); + rustsecp256k1_v0_8_0_scalar_set_int(&t1, (t1i + 1) / 2); + rustsecp256k1_v0_8_0_scalar_cond_negate(&t1, t1i & 1); - rustsecp256k1_v0_7_0_ecmult(&t0p, &ptgj, &t0, &szero); - rustsecp256k1_v0_7_0_ecmult(&t1p, &ptgj, &t1, &szero); + rustsecp256k1_v0_8_0_ecmult(&t0p, &ptgj, &t0, &szero); + rustsecp256k1_v0_8_0_ecmult(&t1p, &ptgj, &t1, &szero); for(s0i = 0; s0i < TOP; s0i++) { for(s1i = 0; s1i < TOP; s1i++) { - rustsecp256k1_v0_7_0_scalar tmp1, tmp2; - rustsecp256k1_v0_7_0_gej expected, actual; + rustsecp256k1_v0_8_0_scalar tmp1, tmp2; + rustsecp256k1_v0_8_0_gej expected, actual; - rustsecp256k1_v0_7_0_ge_set_gej(&pt[0], &t0p); - rustsecp256k1_v0_7_0_ge_set_gej(&pt[1], &t1p); + rustsecp256k1_v0_8_0_ge_set_gej(&pt[0], &t0p); + rustsecp256k1_v0_8_0_ge_set_gej(&pt[1], &t1p); - rustsecp256k1_v0_7_0_scalar_set_int(&sc[0], (s0i + 1) / 2); - rustsecp256k1_v0_7_0_scalar_cond_negate(&sc[0], s0i & 1); - rustsecp256k1_v0_7_0_scalar_set_int(&sc[1], (s1i + 1) / 2); - rustsecp256k1_v0_7_0_scalar_cond_negate(&sc[1], s1i & 1); + rustsecp256k1_v0_8_0_scalar_set_int(&sc[0], (s0i + 1) / 2); + rustsecp256k1_v0_8_0_scalar_cond_negate(&sc[0], s0i & 1); + rustsecp256k1_v0_8_0_scalar_set_int(&sc[1], (s1i + 1) / 2); + rustsecp256k1_v0_8_0_scalar_cond_negate(&sc[1], s1i & 1); - rustsecp256k1_v0_7_0_scalar_mul(&tmp1, &t0, &sc[0]); - rustsecp256k1_v0_7_0_scalar_mul(&tmp2, &t1, &sc[1]); - rustsecp256k1_v0_7_0_scalar_add(&tmp1, &tmp1, &tmp2); + rustsecp256k1_v0_8_0_scalar_mul(&tmp1, &t0, &sc[0]); + rustsecp256k1_v0_8_0_scalar_mul(&tmp2, &t1, &sc[1]); + rustsecp256k1_v0_8_0_scalar_add(&tmp1, &tmp1, &tmp2); - rustsecp256k1_v0_7_0_ecmult(&expected, &ptgj, &tmp1, &szero); + rustsecp256k1_v0_8_0_ecmult(&expected, &ptgj, &tmp1, &szero); CHECK(ecmult_multi(&ctx->error_callback, scratch, &actual, &szero, ecmult_multi_callback, &data, 2)); - rustsecp256k1_v0_7_0_gej_neg(&expected, &expected); - rustsecp256k1_v0_7_0_gej_add_var(&actual, &actual, &expected, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&actual)); + CHECK(rustsecp256k1_v0_8_0_gej_eq_var(&actual, &expected)); } } } @@ -4092,7 +4557,7 @@ void test_ecmult_multi(rustsecp256k1_v0_7_0_scratch *scratch, rustsecp256k1_v0_7 } } -int test_ecmult_multi_random(rustsecp256k1_v0_7_0_scratch *scratch) { +int test_ecmult_multi_random(rustsecp256k1_v0_8_0_scratch *scratch) { /* Large random test for ecmult_multi_* functions which exercises: * - Few or many inputs (0 up to 128, roughly exponentially distributed). * - Few or many 0*P or a*INF inputs (roughly uniformly distributed). @@ -4106,48 +4571,48 @@ int test_ecmult_multi_random(rustsecp256k1_v0_7_0_scratch *scratch) { * scalars[0..filled-1] and gejs[0..filled-1] are the scalars and points * which form its normal inputs. */ int filled = 0; - rustsecp256k1_v0_7_0_scalar g_scalar = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - rustsecp256k1_v0_7_0_scalar scalars[128]; - rustsecp256k1_v0_7_0_gej gejs[128]; + rustsecp256k1_v0_8_0_scalar g_scalar = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + rustsecp256k1_v0_8_0_scalar scalars[128]; + rustsecp256k1_v0_8_0_gej gejs[128]; /* The expected result, and the computed result. */ - rustsecp256k1_v0_7_0_gej expected, computed; + rustsecp256k1_v0_8_0_gej expected, computed; /* Temporaries. */ - rustsecp256k1_v0_7_0_scalar sc_tmp; - rustsecp256k1_v0_7_0_ge ge_tmp; + rustsecp256k1_v0_8_0_scalar sc_tmp; + rustsecp256k1_v0_8_0_ge ge_tmp; /* Variables needed for the actual input to ecmult_multi. */ - rustsecp256k1_v0_7_0_ge ges[128]; + rustsecp256k1_v0_8_0_ge ges[128]; ecmult_multi_data data; int i; /* Which multiplication function to use */ - int fn = rustsecp256k1_v0_7_0_testrand_int(3); - rustsecp256k1_v0_7_0_ecmult_multi_func ecmult_multi = fn == 0 ? rustsecp256k1_v0_7_0_ecmult_multi_var : - fn == 1 ? rustsecp256k1_v0_7_0_ecmult_strauss_batch_single : - rustsecp256k1_v0_7_0_ecmult_pippenger_batch_single; + int fn = rustsecp256k1_v0_8_0_testrand_int(3); + rustsecp256k1_v0_8_0_ecmult_multi_func ecmult_multi = fn == 0 ? rustsecp256k1_v0_8_0_ecmult_multi_var : + fn == 1 ? rustsecp256k1_v0_8_0_ecmult_strauss_batch_single : + rustsecp256k1_v0_8_0_ecmult_pippenger_batch_single; /* Simulate exponentially distributed num. */ - int num_bits = 2 + rustsecp256k1_v0_7_0_testrand_int(6); + int num_bits = 2 + rustsecp256k1_v0_8_0_testrand_int(6); /* Number of (scalar, point) inputs (excluding g). */ - int num = rustsecp256k1_v0_7_0_testrand_int((1 << num_bits) + 1); + int num = rustsecp256k1_v0_8_0_testrand_int((1 << num_bits) + 1); /* Number of those which are nonzero. */ - int num_nonzero = rustsecp256k1_v0_7_0_testrand_int(num + 1); + int num_nonzero = rustsecp256k1_v0_8_0_testrand_int(num + 1); /* Whether we're aiming to create an input with nonzero expected result. */ - int nonzero_result = rustsecp256k1_v0_7_0_testrand_bits(1); + int nonzero_result = rustsecp256k1_v0_8_0_testrand_bits(1); /* Whether we will provide nonzero g multiplicand. In some cases our hand * is forced here based on num_nonzero and nonzero_result. */ int g_nonzero = num_nonzero == 0 ? nonzero_result : num_nonzero == 1 && !nonzero_result ? 1 : - (int)rustsecp256k1_v0_7_0_testrand_bits(1); + (int)rustsecp256k1_v0_8_0_testrand_bits(1); /* Which g_scalar pointer to pass into ecmult_multi(). */ - const rustsecp256k1_v0_7_0_scalar* g_scalar_ptr = (g_nonzero || rustsecp256k1_v0_7_0_testrand_bits(1)) ? &g_scalar : NULL; + const rustsecp256k1_v0_8_0_scalar* g_scalar_ptr = (g_nonzero || rustsecp256k1_v0_8_0_testrand_bits(1)) ? &g_scalar : NULL; /* How many EC multiplications were performed in this function. */ int mults = 0; /* How many randomization steps to apply to the input list. */ - int rands = (int)rustsecp256k1_v0_7_0_testrand_bits(3); + int rands = (int)rustsecp256k1_v0_8_0_testrand_bits(3); if (rands > num_nonzero) rands = num_nonzero; - rustsecp256k1_v0_7_0_gej_set_infinity(&expected); - rustsecp256k1_v0_7_0_gej_set_infinity(&gejs[0]); - rustsecp256k1_v0_7_0_scalar_set_int(&scalars[0], 0); + rustsecp256k1_v0_8_0_gej_set_infinity(&expected); + rustsecp256k1_v0_8_0_gej_set_infinity(&gejs[0]); + rustsecp256k1_v0_8_0_scalar_set_int(&scalars[0], 0); if (g_nonzero) { /* If g_nonzero, set g_scalar to nonzero value r. */ @@ -4156,10 +4621,10 @@ int test_ecmult_multi_random(rustsecp256k1_v0_7_0_scratch *scratch) { /* If expected=0 is desired, add a (a*r, -(1/a)*g) term to compensate. */ CHECK(num_nonzero > filled); random_scalar_order_test(&sc_tmp); - rustsecp256k1_v0_7_0_scalar_mul(&scalars[filled], &sc_tmp, &g_scalar); - rustsecp256k1_v0_7_0_scalar_inverse_var(&sc_tmp, &sc_tmp); - rustsecp256k1_v0_7_0_scalar_negate(&sc_tmp, &sc_tmp); - rustsecp256k1_v0_7_0_ecmult_gen(&ctx->ecmult_gen_ctx, &gejs[filled], &sc_tmp); + rustsecp256k1_v0_8_0_scalar_mul(&scalars[filled], &sc_tmp, &g_scalar); + rustsecp256k1_v0_8_0_scalar_inverse_var(&sc_tmp, &sc_tmp); + rustsecp256k1_v0_8_0_scalar_negate(&sc_tmp, &sc_tmp); + rustsecp256k1_v0_8_0_ecmult_gen(&ctx->ecmult_gen_ctx, &gejs[filled], &sc_tmp); ++filled; ++mults; } @@ -4169,14 +4634,14 @@ int test_ecmult_multi_random(rustsecp256k1_v0_7_0_scratch *scratch) { /* If a nonzero result is desired, and there is space, add a random nonzero term. */ random_scalar_order_test(&scalars[filled]); random_group_element_test(&ge_tmp); - rustsecp256k1_v0_7_0_gej_set_ge(&gejs[filled], &ge_tmp); + rustsecp256k1_v0_8_0_gej_set_ge(&gejs[filled], &ge_tmp); ++filled; } if (nonzero_result) { /* Compute the expected result using normal ecmult. */ CHECK(filled <= 1); - rustsecp256k1_v0_7_0_ecmult(&expected, &gejs[0], &scalars[0], &g_scalar); + rustsecp256k1_v0_8_0_ecmult(&expected, &gejs[0], &scalars[0], &g_scalar); mults += filled + g_nonzero; } @@ -4187,13 +4652,13 @@ int test_ecmult_multi_random(rustsecp256k1_v0_7_0_scratch *scratch) { /* Add entries to scalars,gejs so that there are num of them. All the added entries * either have scalar=0 or point=infinity, so these do not change the expected result. */ while (filled < num) { - if (rustsecp256k1_v0_7_0_testrand_bits(1)) { - rustsecp256k1_v0_7_0_gej_set_infinity(&gejs[filled]); + if (rustsecp256k1_v0_8_0_testrand_bits(1)) { + rustsecp256k1_v0_8_0_gej_set_infinity(&gejs[filled]); random_scalar_order_test(&scalars[filled]); } else { - rustsecp256k1_v0_7_0_scalar_set_int(&scalars[filled], 0); + rustsecp256k1_v0_8_0_scalar_set_int(&scalars[filled], 0); random_group_element_test(&ge_tmp); - rustsecp256k1_v0_7_0_gej_set_ge(&gejs[filled], &ge_tmp); + rustsecp256k1_v0_8_0_gej_set_ge(&gejs[filled], &ge_tmp); } ++filled; } @@ -4203,13 +4668,13 @@ int test_ecmult_multi_random(rustsecp256k1_v0_7_0_scratch *scratch) { * convert some of them to be both non-0-scalar and non-infinity-point. */ for (i = 0; i < rands; ++i) { int j; - rustsecp256k1_v0_7_0_scalar v, iv; + rustsecp256k1_v0_8_0_scalar v, iv; /* Shuffle the entries. */ for (j = 0; j < num_nonzero; ++j) { - int k = rustsecp256k1_v0_7_0_testrand_int(num_nonzero - j); + int k = rustsecp256k1_v0_8_0_testrand_int(num_nonzero - j); if (k != 0) { - rustsecp256k1_v0_7_0_gej gej = gejs[j]; - rustsecp256k1_v0_7_0_scalar sc = scalars[j]; + rustsecp256k1_v0_8_0_gej gej = gejs[j]; + rustsecp256k1_v0_8_0_scalar sc = scalars[j]; gejs[j] = gejs[j + k]; scalars[j] = scalars[j + k]; gejs[j + k] = gej; @@ -4219,26 +4684,26 @@ int test_ecmult_multi_random(rustsecp256k1_v0_7_0_scratch *scratch) { /* Perturb all consecutive pairs of inputs: * a*P + b*Q -> (a+b)*P + b*(Q-P). */ for (j = 0; j + 1 < num_nonzero; j += 2) { - rustsecp256k1_v0_7_0_gej gej; - rustsecp256k1_v0_7_0_scalar_add(&scalars[j], &scalars[j], &scalars[j+1]); - rustsecp256k1_v0_7_0_gej_neg(&gej, &gejs[j]); - rustsecp256k1_v0_7_0_gej_add_var(&gejs[j+1], &gejs[j+1], &gej, NULL); + rustsecp256k1_v0_8_0_gej gej; + rustsecp256k1_v0_8_0_scalar_add(&scalars[j], &scalars[j], &scalars[j+1]); + rustsecp256k1_v0_8_0_gej_neg(&gej, &gejs[j]); + rustsecp256k1_v0_8_0_gej_add_var(&gejs[j+1], &gejs[j+1], &gej, NULL); } /* Transform the last input: a*P -> (v*a) * ((1/v)*P). */ CHECK(num_nonzero >= 1); random_scalar_order_test(&v); - rustsecp256k1_v0_7_0_scalar_inverse(&iv, &v); - rustsecp256k1_v0_7_0_scalar_mul(&scalars[num_nonzero - 1], &scalars[num_nonzero - 1], &v); - rustsecp256k1_v0_7_0_ecmult(&gejs[num_nonzero - 1], &gejs[num_nonzero - 1], &iv, NULL); + rustsecp256k1_v0_8_0_scalar_inverse(&iv, &v); + rustsecp256k1_v0_8_0_scalar_mul(&scalars[num_nonzero - 1], &scalars[num_nonzero - 1], &v); + rustsecp256k1_v0_8_0_ecmult(&gejs[num_nonzero - 1], &gejs[num_nonzero - 1], &iv, NULL); ++mults; } /* Shuffle all entries (0..num-1). */ for (i = 0; i < num; ++i) { - int j = rustsecp256k1_v0_7_0_testrand_int(num - i); + int j = rustsecp256k1_v0_8_0_testrand_int(num - i); if (j != 0) { - rustsecp256k1_v0_7_0_gej gej = gejs[i]; - rustsecp256k1_v0_7_0_scalar sc = scalars[i]; + rustsecp256k1_v0_8_0_gej gej = gejs[i]; + rustsecp256k1_v0_8_0_scalar sc = scalars[i]; gejs[i] = gejs[i + j]; scalars[i] = scalars[i + j]; gejs[i + j] = gej; @@ -4247,51 +4712,49 @@ int test_ecmult_multi_random(rustsecp256k1_v0_7_0_scratch *scratch) { } /* Compute affine versions of all inputs. */ - rustsecp256k1_v0_7_0_ge_set_all_gej_var(ges, gejs, filled); + rustsecp256k1_v0_8_0_ge_set_all_gej_var(ges, gejs, filled); /* Invoke ecmult_multi code. */ data.sc = scalars; data.pt = ges; CHECK(ecmult_multi(&ctx->error_callback, scratch, &computed, g_scalar_ptr, ecmult_multi_callback, &data, filled)); mults += num_nonzero + g_nonzero; /* Compare with expected result. */ - rustsecp256k1_v0_7_0_gej_neg(&computed, &computed); - rustsecp256k1_v0_7_0_gej_add_var(&computed, &computed, &expected, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&computed)); + CHECK(rustsecp256k1_v0_8_0_gej_eq_var(&computed, &expected)); return mults; } -void test_ecmult_multi_batch_single(rustsecp256k1_v0_7_0_ecmult_multi_func ecmult_multi) { - rustsecp256k1_v0_7_0_scalar szero; - rustsecp256k1_v0_7_0_scalar sc; - rustsecp256k1_v0_7_0_ge pt; - rustsecp256k1_v0_7_0_gej r; +void test_ecmult_multi_batch_single(rustsecp256k1_v0_8_0_ecmult_multi_func ecmult_multi) { + rustsecp256k1_v0_8_0_scalar szero; + rustsecp256k1_v0_8_0_scalar sc; + rustsecp256k1_v0_8_0_ge pt; + rustsecp256k1_v0_8_0_gej r; ecmult_multi_data data; - rustsecp256k1_v0_7_0_scratch *scratch_empty; + rustsecp256k1_v0_8_0_scratch *scratch_empty; random_group_element_test(&pt); random_scalar_order(&sc); data.sc = ≻ data.pt = &pt; - rustsecp256k1_v0_7_0_scalar_set_int(&szero, 0); + rustsecp256k1_v0_8_0_scalar_set_int(&szero, 0); /* Try to multiply 1 point, but scratch space is empty.*/ - scratch_empty = rustsecp256k1_v0_7_0_scratch_create(&ctx->error_callback, 0); + scratch_empty = rustsecp256k1_v0_8_0_scratch_create(&ctx->error_callback, 0); CHECK(!ecmult_multi(&ctx->error_callback, scratch_empty, &r, &szero, ecmult_multi_callback, &data, 1)); - rustsecp256k1_v0_7_0_scratch_destroy(&ctx->error_callback, scratch_empty); + rustsecp256k1_v0_8_0_scratch_destroy(&ctx->error_callback, scratch_empty); } -void test_rustsecp256k1_v0_7_0_pippenger_bucket_window_inv(void) { +void test_rustsecp256k1_v0_8_0_pippenger_bucket_window_inv(void) { int i; - CHECK(rustsecp256k1_v0_7_0_pippenger_bucket_window_inv(0) == 0); + CHECK(rustsecp256k1_v0_8_0_pippenger_bucket_window_inv(0) == 0); for(i = 1; i <= PIPPENGER_MAX_BUCKET_WINDOW; i++) { /* Bucket_window of 8 is not used with endo */ if (i == 8) { continue; } - CHECK(rustsecp256k1_v0_7_0_pippenger_bucket_window(rustsecp256k1_v0_7_0_pippenger_bucket_window_inv(i)) == i); + CHECK(rustsecp256k1_v0_8_0_pippenger_bucket_window(rustsecp256k1_v0_8_0_pippenger_bucket_window_inv(i)) == i); if (i != PIPPENGER_MAX_BUCKET_WINDOW) { - CHECK(rustsecp256k1_v0_7_0_pippenger_bucket_window(rustsecp256k1_v0_7_0_pippenger_bucket_window_inv(i)+1) > i); + CHECK(rustsecp256k1_v0_8_0_pippenger_bucket_window(rustsecp256k1_v0_8_0_pippenger_bucket_window_inv(i)+1) > i); } } } @@ -4301,9 +4764,9 @@ void test_rustsecp256k1_v0_7_0_pippenger_bucket_window_inv(void) { * for a given scratch space. */ void test_ecmult_multi_pippenger_max_points(void) { - size_t scratch_size = rustsecp256k1_v0_7_0_testrand_bits(8); - size_t max_size = rustsecp256k1_v0_7_0_pippenger_scratch_size(rustsecp256k1_v0_7_0_pippenger_bucket_window_inv(PIPPENGER_MAX_BUCKET_WINDOW-1)+512, 12); - rustsecp256k1_v0_7_0_scratch *scratch; + size_t scratch_size = rustsecp256k1_v0_8_0_testrand_bits(8); + size_t max_size = rustsecp256k1_v0_8_0_pippenger_scratch_size(rustsecp256k1_v0_8_0_pippenger_bucket_window_inv(PIPPENGER_MAX_BUCKET_WINDOW-1)+512, 12); + rustsecp256k1_v0_8_0_scratch *scratch; size_t n_points_supported; int bucket_window = 0; @@ -4311,24 +4774,24 @@ void test_ecmult_multi_pippenger_max_points(void) { size_t i; size_t total_alloc; size_t checkpoint; - scratch = rustsecp256k1_v0_7_0_scratch_create(&ctx->error_callback, scratch_size); + scratch = rustsecp256k1_v0_8_0_scratch_create(&ctx->error_callback, scratch_size); CHECK(scratch != NULL); - checkpoint = rustsecp256k1_v0_7_0_scratch_checkpoint(&ctx->error_callback, scratch); - n_points_supported = rustsecp256k1_v0_7_0_pippenger_max_points(&ctx->error_callback, scratch); + checkpoint = rustsecp256k1_v0_8_0_scratch_checkpoint(&ctx->error_callback, scratch); + n_points_supported = rustsecp256k1_v0_8_0_pippenger_max_points(&ctx->error_callback, scratch); if (n_points_supported == 0) { - rustsecp256k1_v0_7_0_scratch_destroy(&ctx->error_callback, scratch); + rustsecp256k1_v0_8_0_scratch_destroy(&ctx->error_callback, scratch); continue; } - bucket_window = rustsecp256k1_v0_7_0_pippenger_bucket_window(n_points_supported); + bucket_window = rustsecp256k1_v0_8_0_pippenger_bucket_window(n_points_supported); /* allocate `total_alloc` bytes over `PIPPENGER_SCRATCH_OBJECTS` many allocations */ - total_alloc = rustsecp256k1_v0_7_0_pippenger_scratch_size(n_points_supported, bucket_window); + total_alloc = rustsecp256k1_v0_8_0_pippenger_scratch_size(n_points_supported, bucket_window); for (i = 0; i < PIPPENGER_SCRATCH_OBJECTS - 1; i++) { - CHECK(rustsecp256k1_v0_7_0_scratch_alloc(&ctx->error_callback, scratch, 1)); + CHECK(rustsecp256k1_v0_8_0_scratch_alloc(&ctx->error_callback, scratch, 1)); total_alloc--; } - CHECK(rustsecp256k1_v0_7_0_scratch_alloc(&ctx->error_callback, scratch, total_alloc)); - rustsecp256k1_v0_7_0_scratch_apply_checkpoint(&ctx->error_callback, scratch, checkpoint); - rustsecp256k1_v0_7_0_scratch_destroy(&ctx->error_callback, scratch); + CHECK(rustsecp256k1_v0_8_0_scratch_alloc(&ctx->error_callback, scratch, total_alloc)); + rustsecp256k1_v0_8_0_scratch_apply_checkpoint(&ctx->error_callback, scratch, checkpoint); + rustsecp256k1_v0_8_0_scratch_destroy(&ctx->error_callback, scratch); } CHECK(bucket_window == PIPPENGER_MAX_BUCKET_WINDOW); } @@ -4338,156 +4801,156 @@ void test_ecmult_multi_batch_size_helper(void) { max_n_batch_points = 0; n = 1; - CHECK(rustsecp256k1_v0_7_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 0); + CHECK(rustsecp256k1_v0_8_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 0); max_n_batch_points = 1; n = 0; - CHECK(rustsecp256k1_v0_7_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_8_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 0); CHECK(n_batch_points == 0); max_n_batch_points = 2; n = 5; - CHECK(rustsecp256k1_v0_7_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_8_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 3); CHECK(n_batch_points == 2); max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH; n = ECMULT_MAX_POINTS_PER_BATCH; - CHECK(rustsecp256k1_v0_7_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_8_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 1); CHECK(n_batch_points == ECMULT_MAX_POINTS_PER_BATCH); max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH + 1; n = ECMULT_MAX_POINTS_PER_BATCH + 1; - CHECK(rustsecp256k1_v0_7_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_8_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 2); CHECK(n_batch_points == ECMULT_MAX_POINTS_PER_BATCH/2 + 1); max_n_batch_points = 1; n = SIZE_MAX; - CHECK(rustsecp256k1_v0_7_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_8_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == SIZE_MAX); CHECK(n_batch_points == 1); max_n_batch_points = 2; n = SIZE_MAX; - CHECK(rustsecp256k1_v0_7_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(rustsecp256k1_v0_8_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == SIZE_MAX/2 + 1); CHECK(n_batch_points == 2); } /** - * Run rustsecp256k1_v0_7_0_ecmult_multi_var with num points and a scratch space restricted to + * Run rustsecp256k1_v0_8_0_ecmult_multi_var with num points and a scratch space restricted to * 1 <= i <= num points. */ void test_ecmult_multi_batching(void) { static const int n_points = 2*ECMULT_PIPPENGER_THRESHOLD; - rustsecp256k1_v0_7_0_scalar scG; - rustsecp256k1_v0_7_0_scalar szero; - rustsecp256k1_v0_7_0_scalar *sc = (rustsecp256k1_v0_7_0_scalar *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_7_0_scalar) * n_points); - rustsecp256k1_v0_7_0_ge *pt = (rustsecp256k1_v0_7_0_ge *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_7_0_ge) * n_points); - rustsecp256k1_v0_7_0_gej r; - rustsecp256k1_v0_7_0_gej r2; + rustsecp256k1_v0_8_0_scalar scG; + rustsecp256k1_v0_8_0_scalar szero; + rustsecp256k1_v0_8_0_scalar *sc = (rustsecp256k1_v0_8_0_scalar *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_8_0_scalar) * n_points); + rustsecp256k1_v0_8_0_ge *pt = (rustsecp256k1_v0_8_0_ge *)checked_malloc(&ctx->error_callback, sizeof(rustsecp256k1_v0_8_0_ge) * n_points); + rustsecp256k1_v0_8_0_gej r; + rustsecp256k1_v0_8_0_gej r2; ecmult_multi_data data; int i; - rustsecp256k1_v0_7_0_scratch *scratch; + rustsecp256k1_v0_8_0_scratch *scratch; - rustsecp256k1_v0_7_0_gej_set_infinity(&r2); - rustsecp256k1_v0_7_0_scalar_set_int(&szero, 0); + rustsecp256k1_v0_8_0_gej_set_infinity(&r2); + rustsecp256k1_v0_8_0_scalar_set_int(&szero, 0); /* Get random scalars and group elements and compute result */ random_scalar_order(&scG); - rustsecp256k1_v0_7_0_ecmult(&r2, &r2, &szero, &scG); + rustsecp256k1_v0_8_0_ecmult(&r2, &r2, &szero, &scG); for(i = 0; i < n_points; i++) { - rustsecp256k1_v0_7_0_ge ptg; - rustsecp256k1_v0_7_0_gej ptgj; + rustsecp256k1_v0_8_0_ge ptg; + rustsecp256k1_v0_8_0_gej ptgj; random_group_element_test(&ptg); - rustsecp256k1_v0_7_0_gej_set_ge(&ptgj, &ptg); + rustsecp256k1_v0_8_0_gej_set_ge(&ptgj, &ptg); pt[i] = ptg; random_scalar_order(&sc[i]); - rustsecp256k1_v0_7_0_ecmult(&ptgj, &ptgj, &sc[i], NULL); - rustsecp256k1_v0_7_0_gej_add_var(&r2, &r2, &ptgj, NULL); + rustsecp256k1_v0_8_0_ecmult(&ptgj, &ptgj, &sc[i], NULL); + rustsecp256k1_v0_8_0_gej_add_var(&r2, &r2, &ptgj, NULL); } data.sc = sc; data.pt = pt; - rustsecp256k1_v0_7_0_gej_neg(&r2, &r2); + rustsecp256k1_v0_8_0_gej_neg(&r2, &r2); /* Test with empty scratch space. It should compute the correct result using * ecmult_mult_simple algorithm which doesn't require a scratch space. */ - scratch = rustsecp256k1_v0_7_0_scratch_create(&ctx->error_callback, 0); - CHECK(rustsecp256k1_v0_7_0_ecmult_multi_var(&ctx->error_callback, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); - rustsecp256k1_v0_7_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&r)); - rustsecp256k1_v0_7_0_scratch_destroy(&ctx->error_callback, scratch); + scratch = rustsecp256k1_v0_8_0_scratch_create(&ctx->error_callback, 0); + CHECK(rustsecp256k1_v0_8_0_ecmult_multi_var(&ctx->error_callback, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); + rustsecp256k1_v0_8_0_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&r)); + rustsecp256k1_v0_8_0_scratch_destroy(&ctx->error_callback, scratch); /* Test with space for 1 point in pippenger. That's not enough because * ecmult_multi selects strauss which requires more memory. It should * therefore select the simple algorithm. */ - scratch = rustsecp256k1_v0_7_0_scratch_create(&ctx->error_callback, rustsecp256k1_v0_7_0_pippenger_scratch_size(1, 1) + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); - CHECK(rustsecp256k1_v0_7_0_ecmult_multi_var(&ctx->error_callback, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); - rustsecp256k1_v0_7_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&r)); - rustsecp256k1_v0_7_0_scratch_destroy(&ctx->error_callback, scratch); + scratch = rustsecp256k1_v0_8_0_scratch_create(&ctx->error_callback, rustsecp256k1_v0_8_0_pippenger_scratch_size(1, 1) + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); + CHECK(rustsecp256k1_v0_8_0_ecmult_multi_var(&ctx->error_callback, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); + rustsecp256k1_v0_8_0_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&r)); + rustsecp256k1_v0_8_0_scratch_destroy(&ctx->error_callback, scratch); for(i = 1; i <= n_points; i++) { if (i > ECMULT_PIPPENGER_THRESHOLD) { - int bucket_window = rustsecp256k1_v0_7_0_pippenger_bucket_window(i); - size_t scratch_size = rustsecp256k1_v0_7_0_pippenger_scratch_size(i, bucket_window); - scratch = rustsecp256k1_v0_7_0_scratch_create(&ctx->error_callback, scratch_size + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); + int bucket_window = rustsecp256k1_v0_8_0_pippenger_bucket_window(i); + size_t scratch_size = rustsecp256k1_v0_8_0_pippenger_scratch_size(i, bucket_window); + scratch = rustsecp256k1_v0_8_0_scratch_create(&ctx->error_callback, scratch_size + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); } else { - size_t scratch_size = rustsecp256k1_v0_7_0_strauss_scratch_size(i); - scratch = rustsecp256k1_v0_7_0_scratch_create(&ctx->error_callback, scratch_size + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); + size_t scratch_size = rustsecp256k1_v0_8_0_strauss_scratch_size(i); + scratch = rustsecp256k1_v0_8_0_scratch_create(&ctx->error_callback, scratch_size + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); } - CHECK(rustsecp256k1_v0_7_0_ecmult_multi_var(&ctx->error_callback, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); - rustsecp256k1_v0_7_0_gej_add_var(&r, &r, &r2, NULL); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&r)); - rustsecp256k1_v0_7_0_scratch_destroy(&ctx->error_callback, scratch); + CHECK(rustsecp256k1_v0_8_0_ecmult_multi_var(&ctx->error_callback, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); + rustsecp256k1_v0_8_0_gej_add_var(&r, &r, &r2, NULL); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&r)); + rustsecp256k1_v0_8_0_scratch_destroy(&ctx->error_callback, scratch); } free(sc); free(pt); } void run_ecmult_multi_tests(void) { - rustsecp256k1_v0_7_0_scratch *scratch; + rustsecp256k1_v0_8_0_scratch *scratch; int64_t todo = (int64_t)320 * count; - test_rustsecp256k1_v0_7_0_pippenger_bucket_window_inv(); + test_rustsecp256k1_v0_8_0_pippenger_bucket_window_inv(); test_ecmult_multi_pippenger_max_points(); - scratch = rustsecp256k1_v0_7_0_scratch_create(&ctx->error_callback, 819200); - test_ecmult_multi(scratch, rustsecp256k1_v0_7_0_ecmult_multi_var); - test_ecmult_multi(NULL, rustsecp256k1_v0_7_0_ecmult_multi_var); - test_ecmult_multi(scratch, rustsecp256k1_v0_7_0_ecmult_pippenger_batch_single); - test_ecmult_multi_batch_single(rustsecp256k1_v0_7_0_ecmult_pippenger_batch_single); - test_ecmult_multi(scratch, rustsecp256k1_v0_7_0_ecmult_strauss_batch_single); - test_ecmult_multi_batch_single(rustsecp256k1_v0_7_0_ecmult_strauss_batch_single); + scratch = rustsecp256k1_v0_8_0_scratch_create(&ctx->error_callback, 819200); + test_ecmult_multi(scratch, rustsecp256k1_v0_8_0_ecmult_multi_var); + test_ecmult_multi(NULL, rustsecp256k1_v0_8_0_ecmult_multi_var); + test_ecmult_multi(scratch, rustsecp256k1_v0_8_0_ecmult_pippenger_batch_single); + test_ecmult_multi_batch_single(rustsecp256k1_v0_8_0_ecmult_pippenger_batch_single); + test_ecmult_multi(scratch, rustsecp256k1_v0_8_0_ecmult_strauss_batch_single); + test_ecmult_multi_batch_single(rustsecp256k1_v0_8_0_ecmult_strauss_batch_single); while (todo > 0) { todo -= test_ecmult_multi_random(scratch); } - rustsecp256k1_v0_7_0_scratch_destroy(&ctx->error_callback, scratch); + rustsecp256k1_v0_8_0_scratch_destroy(&ctx->error_callback, scratch); /* Run test_ecmult_multi with space for exactly one point */ - scratch = rustsecp256k1_v0_7_0_scratch_create(&ctx->error_callback, rustsecp256k1_v0_7_0_strauss_scratch_size(1) + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); - test_ecmult_multi(scratch, rustsecp256k1_v0_7_0_ecmult_multi_var); - rustsecp256k1_v0_7_0_scratch_destroy(&ctx->error_callback, scratch); + scratch = rustsecp256k1_v0_8_0_scratch_create(&ctx->error_callback, rustsecp256k1_v0_8_0_strauss_scratch_size(1) + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); + test_ecmult_multi(scratch, rustsecp256k1_v0_8_0_ecmult_multi_var); + rustsecp256k1_v0_8_0_scratch_destroy(&ctx->error_callback, scratch); test_ecmult_multi_batch_size_helper(); test_ecmult_multi_batching(); } -void test_wnaf(const rustsecp256k1_v0_7_0_scalar *number, int w) { - rustsecp256k1_v0_7_0_scalar x, two, t; +void test_wnaf(const rustsecp256k1_v0_8_0_scalar *number, int w) { + rustsecp256k1_v0_8_0_scalar x, two, t; int wnaf[256]; int zeroes = -1; int i; int bits; - rustsecp256k1_v0_7_0_scalar_set_int(&x, 0); - rustsecp256k1_v0_7_0_scalar_set_int(&two, 2); - bits = rustsecp256k1_v0_7_0_ecmult_wnaf(wnaf, 256, number, w); + rustsecp256k1_v0_8_0_scalar_set_int(&x, 0); + rustsecp256k1_v0_8_0_scalar_set_int(&two, 2); + bits = rustsecp256k1_v0_8_0_ecmult_wnaf(wnaf, 256, number, w); CHECK(bits <= 256); for (i = bits-1; i >= 0; i--) { int v = wnaf[i]; - rustsecp256k1_v0_7_0_scalar_mul(&x, &x, &two); + rustsecp256k1_v0_8_0_scalar_mul(&x, &x, &two); if (v) { CHECK(zeroes == -1 || zeroes >= w-1); /* check that distance between non-zero elements is at least w-1 */ zeroes=0; @@ -4499,104 +4962,104 @@ void test_wnaf(const rustsecp256k1_v0_7_0_scalar *number, int w) { zeroes++; } if (v >= 0) { - rustsecp256k1_v0_7_0_scalar_set_int(&t, v); + rustsecp256k1_v0_8_0_scalar_set_int(&t, v); } else { - rustsecp256k1_v0_7_0_scalar_set_int(&t, -v); - rustsecp256k1_v0_7_0_scalar_negate(&t, &t); + rustsecp256k1_v0_8_0_scalar_set_int(&t, -v); + rustsecp256k1_v0_8_0_scalar_negate(&t, &t); } - rustsecp256k1_v0_7_0_scalar_add(&x, &x, &t); + rustsecp256k1_v0_8_0_scalar_add(&x, &x, &t); } - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&x, number)); /* check that wnaf represents number */ + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&x, number)); /* check that wnaf represents number */ } -void test_constant_wnaf_negate(const rustsecp256k1_v0_7_0_scalar *number) { - rustsecp256k1_v0_7_0_scalar neg1 = *number; - rustsecp256k1_v0_7_0_scalar neg2 = *number; +void test_constant_wnaf_negate(const rustsecp256k1_v0_8_0_scalar *number) { + rustsecp256k1_v0_8_0_scalar neg1 = *number; + rustsecp256k1_v0_8_0_scalar neg2 = *number; int sign1 = 1; int sign2 = 1; - if (!rustsecp256k1_v0_7_0_scalar_get_bits(&neg1, 0, 1)) { - rustsecp256k1_v0_7_0_scalar_negate(&neg1, &neg1); + if (!rustsecp256k1_v0_8_0_scalar_get_bits(&neg1, 0, 1)) { + rustsecp256k1_v0_8_0_scalar_negate(&neg1, &neg1); sign1 = -1; } - sign2 = rustsecp256k1_v0_7_0_scalar_cond_negate(&neg2, rustsecp256k1_v0_7_0_scalar_is_even(&neg2)); + sign2 = rustsecp256k1_v0_8_0_scalar_cond_negate(&neg2, rustsecp256k1_v0_8_0_scalar_is_even(&neg2)); CHECK(sign1 == sign2); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&neg1, &neg2)); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&neg1, &neg2)); } -void test_constant_wnaf(const rustsecp256k1_v0_7_0_scalar *number, int w) { - rustsecp256k1_v0_7_0_scalar x, shift; +void test_constant_wnaf(const rustsecp256k1_v0_8_0_scalar *number, int w) { + rustsecp256k1_v0_8_0_scalar x, shift; int wnaf[256] = {0}; int i; int skew; int bits = 256; - rustsecp256k1_v0_7_0_scalar num = *number; - rustsecp256k1_v0_7_0_scalar scalar_skew; + rustsecp256k1_v0_8_0_scalar num = *number; + rustsecp256k1_v0_8_0_scalar scalar_skew; - rustsecp256k1_v0_7_0_scalar_set_int(&x, 0); - rustsecp256k1_v0_7_0_scalar_set_int(&shift, 1 << w); + rustsecp256k1_v0_8_0_scalar_set_int(&x, 0); + rustsecp256k1_v0_8_0_scalar_set_int(&shift, 1 << w); for (i = 0; i < 16; ++i) { - rustsecp256k1_v0_7_0_scalar_shr_int(&num, 8); + rustsecp256k1_v0_8_0_scalar_shr_int(&num, 8); } bits = 128; - skew = rustsecp256k1_v0_7_0_wnaf_const(wnaf, &num, w, bits); + skew = rustsecp256k1_v0_8_0_wnaf_const(wnaf, &num, w, bits); for (i = WNAF_SIZE_BITS(bits, w); i >= 0; --i) { - rustsecp256k1_v0_7_0_scalar t; + rustsecp256k1_v0_8_0_scalar t; int v = wnaf[i]; CHECK(v != 0); /* check nonzero */ CHECK(v & 1); /* check parity */ CHECK(v > -(1 << w)); /* check range above */ CHECK(v < (1 << w)); /* check range below */ - rustsecp256k1_v0_7_0_scalar_mul(&x, &x, &shift); + rustsecp256k1_v0_8_0_scalar_mul(&x, &x, &shift); if (v >= 0) { - rustsecp256k1_v0_7_0_scalar_set_int(&t, v); + rustsecp256k1_v0_8_0_scalar_set_int(&t, v); } else { - rustsecp256k1_v0_7_0_scalar_set_int(&t, -v); - rustsecp256k1_v0_7_0_scalar_negate(&t, &t); + rustsecp256k1_v0_8_0_scalar_set_int(&t, -v); + rustsecp256k1_v0_8_0_scalar_negate(&t, &t); } - rustsecp256k1_v0_7_0_scalar_add(&x, &x, &t); + rustsecp256k1_v0_8_0_scalar_add(&x, &x, &t); } /* Skew num because when encoding numbers as odd we use an offset */ - rustsecp256k1_v0_7_0_scalar_set_int(&scalar_skew, skew); - rustsecp256k1_v0_7_0_scalar_add(&num, &num, &scalar_skew); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&x, &num)); + rustsecp256k1_v0_8_0_scalar_set_int(&scalar_skew, skew); + rustsecp256k1_v0_8_0_scalar_add(&num, &num, &scalar_skew); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&x, &num)); } -void test_fixed_wnaf(const rustsecp256k1_v0_7_0_scalar *number, int w) { - rustsecp256k1_v0_7_0_scalar x, shift; +void test_fixed_wnaf(const rustsecp256k1_v0_8_0_scalar *number, int w) { + rustsecp256k1_v0_8_0_scalar x, shift; int wnaf[256] = {0}; int i; int skew; - rustsecp256k1_v0_7_0_scalar num = *number; + rustsecp256k1_v0_8_0_scalar num = *number; - rustsecp256k1_v0_7_0_scalar_set_int(&x, 0); - rustsecp256k1_v0_7_0_scalar_set_int(&shift, 1 << w); + rustsecp256k1_v0_8_0_scalar_set_int(&x, 0); + rustsecp256k1_v0_8_0_scalar_set_int(&shift, 1 << w); for (i = 0; i < 16; ++i) { - rustsecp256k1_v0_7_0_scalar_shr_int(&num, 8); + rustsecp256k1_v0_8_0_scalar_shr_int(&num, 8); } - skew = rustsecp256k1_v0_7_0_wnaf_fixed(wnaf, &num, w); + skew = rustsecp256k1_v0_8_0_wnaf_fixed(wnaf, &num, w); for (i = WNAF_SIZE(w)-1; i >= 0; --i) { - rustsecp256k1_v0_7_0_scalar t; + rustsecp256k1_v0_8_0_scalar t; int v = wnaf[i]; CHECK(v == 0 || v & 1); /* check parity */ CHECK(v > -(1 << w)); /* check range above */ CHECK(v < (1 << w)); /* check range below */ - rustsecp256k1_v0_7_0_scalar_mul(&x, &x, &shift); + rustsecp256k1_v0_8_0_scalar_mul(&x, &x, &shift); if (v >= 0) { - rustsecp256k1_v0_7_0_scalar_set_int(&t, v); + rustsecp256k1_v0_8_0_scalar_set_int(&t, v); } else { - rustsecp256k1_v0_7_0_scalar_set_int(&t, -v); - rustsecp256k1_v0_7_0_scalar_negate(&t, &t); + rustsecp256k1_v0_8_0_scalar_set_int(&t, -v); + rustsecp256k1_v0_8_0_scalar_negate(&t, &t); } - rustsecp256k1_v0_7_0_scalar_add(&x, &x, &t); + rustsecp256k1_v0_8_0_scalar_add(&x, &x, &t); } /* If skew is 1 then add 1 to num */ - rustsecp256k1_v0_7_0_scalar_cadd_bit(&num, 0, skew == 1); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&x, &num)); + rustsecp256k1_v0_8_0_scalar_cadd_bit(&num, 0, skew == 1); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&x, &num)); } /* Checks that the first 8 elements of wnaf are equal to wnaf_expected and the @@ -4616,18 +5079,18 @@ void test_fixed_wnaf_small(void) { int wnaf[256] = {0}; int i; int skew; - rustsecp256k1_v0_7_0_scalar num; + rustsecp256k1_v0_8_0_scalar num; - rustsecp256k1_v0_7_0_scalar_set_int(&num, 0); - skew = rustsecp256k1_v0_7_0_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_8_0_scalar_set_int(&num, 0); + skew = rustsecp256k1_v0_8_0_wnaf_fixed(wnaf, &num, w); for (i = WNAF_SIZE(w)-1; i >= 0; --i) { int v = wnaf[i]; CHECK(v == 0); } CHECK(skew == 0); - rustsecp256k1_v0_7_0_scalar_set_int(&num, 1); - skew = rustsecp256k1_v0_7_0_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_8_0_scalar_set_int(&num, 1); + skew = rustsecp256k1_v0_8_0_wnaf_fixed(wnaf, &num, w); for (i = WNAF_SIZE(w)-1; i >= 1; --i) { int v = wnaf[i]; CHECK(v == 0); @@ -4637,29 +5100,29 @@ void test_fixed_wnaf_small(void) { { int wnaf_expected[8] = { 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf }; - rustsecp256k1_v0_7_0_scalar_set_int(&num, 0xffffffff); - skew = rustsecp256k1_v0_7_0_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_8_0_scalar_set_int(&num, 0xffffffff); + skew = rustsecp256k1_v0_8_0_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 0); } { int wnaf_expected[8] = { -1, -1, -1, -1, -1, -1, -1, 0xf }; - rustsecp256k1_v0_7_0_scalar_set_int(&num, 0xeeeeeeee); - skew = rustsecp256k1_v0_7_0_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_8_0_scalar_set_int(&num, 0xeeeeeeee); + skew = rustsecp256k1_v0_8_0_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 1); } { int wnaf_expected[8] = { 1, 0, 1, 0, 1, 0, 1, 0 }; - rustsecp256k1_v0_7_0_scalar_set_int(&num, 0x01010101); - skew = rustsecp256k1_v0_7_0_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_8_0_scalar_set_int(&num, 0x01010101); + skew = rustsecp256k1_v0_8_0_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 0); } { int wnaf_expected[8] = { -0xf, 0, 0xf, -0xf, 0, 0xf, 1, 0 }; - rustsecp256k1_v0_7_0_scalar_set_int(&num, 0x01ef1ef1); - skew = rustsecp256k1_v0_7_0_wnaf_fixed(wnaf, &num, w); + rustsecp256k1_v0_8_0_scalar_set_int(&num, 0x01ef1ef1); + skew = rustsecp256k1_v0_8_0_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 0); } @@ -4667,7 +5130,7 @@ void test_fixed_wnaf_small(void) { void run_wnaf(void) { int i; - rustsecp256k1_v0_7_0_scalar n = {{0}}; + rustsecp256k1_v0_8_0_scalar n = {{0}}; test_constant_wnaf(&n, 4); /* Sanity check: 1 and 2 are the smallest odd and even numbers and should @@ -4677,21 +5140,21 @@ void run_wnaf(void) { n.d[0] = 2; test_constant_wnaf(&n, 4); /* Test -1, because it's a special case in wnaf_const */ - n = rustsecp256k1_v0_7_0_scalar_one; - rustsecp256k1_v0_7_0_scalar_negate(&n, &n); + n = rustsecp256k1_v0_8_0_scalar_one; + rustsecp256k1_v0_8_0_scalar_negate(&n, &n); test_constant_wnaf(&n, 4); /* Test -2, which may not lead to overflows in wnaf_const */ - rustsecp256k1_v0_7_0_scalar_add(&n, &rustsecp256k1_v0_7_0_scalar_one, &rustsecp256k1_v0_7_0_scalar_one); - rustsecp256k1_v0_7_0_scalar_negate(&n, &n); + rustsecp256k1_v0_8_0_scalar_add(&n, &rustsecp256k1_v0_8_0_scalar_one, &rustsecp256k1_v0_8_0_scalar_one); + rustsecp256k1_v0_8_0_scalar_negate(&n, &n); test_constant_wnaf(&n, 4); /* Test (1/2) - 1 = 1/-2 and 1/2 = (1/-2) + 1 as corner cases of negation handling in wnaf_const */ - rustsecp256k1_v0_7_0_scalar_inverse(&n, &n); + rustsecp256k1_v0_8_0_scalar_inverse(&n, &n); test_constant_wnaf(&n, 4); - rustsecp256k1_v0_7_0_scalar_add(&n, &n, &rustsecp256k1_v0_7_0_scalar_one); + rustsecp256k1_v0_8_0_scalar_add(&n, &n, &rustsecp256k1_v0_8_0_scalar_one); test_constant_wnaf(&n, 4); /* Test 0 for fixed wnaf */ @@ -4704,56 +5167,56 @@ void run_wnaf(void) { test_constant_wnaf(&n, 4 + (i % 10)); test_fixed_wnaf(&n, 4 + (i % 10)); } - rustsecp256k1_v0_7_0_scalar_set_int(&n, 0); - CHECK(rustsecp256k1_v0_7_0_scalar_cond_negate(&n, 1) == -1); - CHECK(rustsecp256k1_v0_7_0_scalar_is_zero(&n)); - CHECK(rustsecp256k1_v0_7_0_scalar_cond_negate(&n, 0) == 1); - CHECK(rustsecp256k1_v0_7_0_scalar_is_zero(&n)); + rustsecp256k1_v0_8_0_scalar_set_int(&n, 0); + CHECK(rustsecp256k1_v0_8_0_scalar_cond_negate(&n, 1) == -1); + CHECK(rustsecp256k1_v0_8_0_scalar_is_zero(&n)); + CHECK(rustsecp256k1_v0_8_0_scalar_cond_negate(&n, 0) == 1); + CHECK(rustsecp256k1_v0_8_0_scalar_is_zero(&n)); } -static int test_ecmult_accumulate_cb(rustsecp256k1_v0_7_0_scalar* sc, rustsecp256k1_v0_7_0_ge* pt, size_t idx, void* data) { - const rustsecp256k1_v0_7_0_scalar* indata = (const rustsecp256k1_v0_7_0_scalar*)data; +static int test_ecmult_accumulate_cb(rustsecp256k1_v0_8_0_scalar* sc, rustsecp256k1_v0_8_0_ge* pt, size_t idx, void* data) { + const rustsecp256k1_v0_8_0_scalar* indata = (const rustsecp256k1_v0_8_0_scalar*)data; *sc = *indata; - *pt = rustsecp256k1_v0_7_0_ge_const_g; + *pt = rustsecp256k1_v0_8_0_ge_const_g; CHECK(idx == 0); return 1; } -void test_ecmult_accumulate(rustsecp256k1_v0_7_0_sha256* acc, const rustsecp256k1_v0_7_0_scalar* x, rustsecp256k1_v0_7_0_scratch* scratch) { +void test_ecmult_accumulate(rustsecp256k1_v0_8_0_sha256* acc, const rustsecp256k1_v0_8_0_scalar* x, rustsecp256k1_v0_8_0_scratch* scratch) { /* Compute x*G in 6 different ways, serialize it uncompressed, and feed it into acc. */ - rustsecp256k1_v0_7_0_gej rj1, rj2, rj3, rj4, rj5, rj6, gj, infj; - rustsecp256k1_v0_7_0_ge r; - const rustsecp256k1_v0_7_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + rustsecp256k1_v0_8_0_gej rj1, rj2, rj3, rj4, rj5, rj6, gj, infj; + rustsecp256k1_v0_8_0_ge r; + const rustsecp256k1_v0_8_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); unsigned char bytes[65]; size_t size = 65; - rustsecp256k1_v0_7_0_gej_set_ge(&gj, &rustsecp256k1_v0_7_0_ge_const_g); - rustsecp256k1_v0_7_0_gej_set_infinity(&infj); - rustsecp256k1_v0_7_0_ecmult_gen(&ctx->ecmult_gen_ctx, &rj1, x); - rustsecp256k1_v0_7_0_ecmult(&rj2, &gj, x, &zero); - rustsecp256k1_v0_7_0_ecmult(&rj3, &infj, &zero, x); - rustsecp256k1_v0_7_0_ecmult_multi_var(NULL, scratch, &rj4, x, NULL, NULL, 0); - rustsecp256k1_v0_7_0_ecmult_multi_var(NULL, scratch, &rj5, &zero, test_ecmult_accumulate_cb, (void*)x, 1); - rustsecp256k1_v0_7_0_ecmult_const(&rj6, &rustsecp256k1_v0_7_0_ge_const_g, x, 256); - rustsecp256k1_v0_7_0_ge_set_gej_var(&r, &rj1); + rustsecp256k1_v0_8_0_gej_set_ge(&gj, &rustsecp256k1_v0_8_0_ge_const_g); + rustsecp256k1_v0_8_0_gej_set_infinity(&infj); + rustsecp256k1_v0_8_0_ecmult_gen(&ctx->ecmult_gen_ctx, &rj1, x); + rustsecp256k1_v0_8_0_ecmult(&rj2, &gj, x, &zero); + rustsecp256k1_v0_8_0_ecmult(&rj3, &infj, &zero, x); + rustsecp256k1_v0_8_0_ecmult_multi_var(NULL, scratch, &rj4, x, NULL, NULL, 0); + rustsecp256k1_v0_8_0_ecmult_multi_var(NULL, scratch, &rj5, &zero, test_ecmult_accumulate_cb, (void*)x, 1); + rustsecp256k1_v0_8_0_ecmult_const(&rj6, &rustsecp256k1_v0_8_0_ge_const_g, x, 256); + rustsecp256k1_v0_8_0_ge_set_gej_var(&r, &rj1); ge_equals_gej(&r, &rj2); ge_equals_gej(&r, &rj3); ge_equals_gej(&r, &rj4); ge_equals_gej(&r, &rj5); ge_equals_gej(&r, &rj6); - if (rustsecp256k1_v0_7_0_ge_is_infinity(&r)) { + if (rustsecp256k1_v0_8_0_ge_is_infinity(&r)) { /* Store infinity as 0x00 */ const unsigned char zerobyte[1] = {0}; - rustsecp256k1_v0_7_0_sha256_write(acc, zerobyte, 1); + rustsecp256k1_v0_8_0_sha256_write(acc, zerobyte, 1); } else { /* Store other points using their uncompressed serialization. */ - rustsecp256k1_v0_7_0_eckey_pubkey_serialize(&r, bytes, &size, 0); + rustsecp256k1_v0_8_0_eckey_pubkey_serialize(&r, bytes, &size, 0); CHECK(size == 65); - rustsecp256k1_v0_7_0_sha256_write(acc, bytes, size); + rustsecp256k1_v0_8_0_sha256_write(acc, bytes, size); } } -void test_ecmult_constants(void) { - /* Test ecmult_gen for: +void test_ecmult_constants_2bit(void) { + /* Using test_ecmult_accumulate, test ecmult for: * - For i in 0..36: * - Key i * - Key -i @@ -4761,11 +5224,11 @@ void test_ecmult_constants(void) { * - For j in 1..255 (only odd values): * - Key (j*2^i) mod order */ - rustsecp256k1_v0_7_0_scalar x; - rustsecp256k1_v0_7_0_sha256 acc; + rustsecp256k1_v0_8_0_scalar x; + rustsecp256k1_v0_8_0_sha256 acc; unsigned char b32[32]; int i, j; - rustsecp256k1_v0_7_0_scratch_space *scratch = rustsecp256k1_v0_7_0_scratch_space_create(ctx, 65536); + rustsecp256k1_v0_8_0_scratch_space *scratch = rustsecp256k1_v0_8_0_scratch_space_create(ctx, 65536); /* Expected hash of all the computed points; created with an independent * implementation. */ @@ -4775,63 +5238,136 @@ void test_ecmult_constants(void) { 0x3a, 0x75, 0x87, 0x60, 0x1a, 0xf9, 0x63, 0x60, 0xd0, 0xcb, 0x1f, 0xaa, 0x85, 0x9a, 0xb7, 0xb4 }; - rustsecp256k1_v0_7_0_sha256_initialize(&acc); + rustsecp256k1_v0_8_0_sha256_initialize(&acc); for (i = 0; i <= 36; ++i) { - rustsecp256k1_v0_7_0_scalar_set_int(&x, i); + rustsecp256k1_v0_8_0_scalar_set_int(&x, i); test_ecmult_accumulate(&acc, &x, scratch); - rustsecp256k1_v0_7_0_scalar_negate(&x, &x); + rustsecp256k1_v0_8_0_scalar_negate(&x, &x); test_ecmult_accumulate(&acc, &x, scratch); }; for (i = 0; i < 256; ++i) { for (j = 1; j < 256; j += 2) { int k; - rustsecp256k1_v0_7_0_scalar_set_int(&x, j); - for (k = 0; k < i; ++k) rustsecp256k1_v0_7_0_scalar_add(&x, &x, &x); + rustsecp256k1_v0_8_0_scalar_set_int(&x, j); + for (k = 0; k < i; ++k) rustsecp256k1_v0_8_0_scalar_add(&x, &x, &x); test_ecmult_accumulate(&acc, &x, scratch); } } - rustsecp256k1_v0_7_0_sha256_finalize(&acc, b32); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(b32, expected32, 32) == 0); + rustsecp256k1_v0_8_0_sha256_finalize(&acc, b32); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(b32, expected32, 32) == 0); + + rustsecp256k1_v0_8_0_scratch_space_destroy(ctx, scratch); +} + +void test_ecmult_constants_sha(uint32_t prefix, size_t iter, const unsigned char* expected32) { + /* Using test_ecmult_accumulate, test ecmult for: + * - Key 0 + * - Key 1 + * - Key -1 + * - For i in range(iter): + * - Key SHA256(LE32(prefix) || LE16(i)) + */ + rustsecp256k1_v0_8_0_scalar x; + rustsecp256k1_v0_8_0_sha256 acc; + unsigned char b32[32]; + unsigned char inp[6]; + size_t i; + rustsecp256k1_v0_8_0_scratch_space *scratch = rustsecp256k1_v0_8_0_scratch_space_create(ctx, 65536); + + inp[0] = prefix & 0xFF; + inp[1] = (prefix >> 8) & 0xFF; + inp[2] = (prefix >> 16) & 0xFF; + inp[3] = (prefix >> 24) & 0xFF; + rustsecp256k1_v0_8_0_sha256_initialize(&acc); + rustsecp256k1_v0_8_0_scalar_set_int(&x, 0); + test_ecmult_accumulate(&acc, &x, scratch); + rustsecp256k1_v0_8_0_scalar_set_int(&x, 1); + test_ecmult_accumulate(&acc, &x, scratch); + rustsecp256k1_v0_8_0_scalar_negate(&x, &x); + test_ecmult_accumulate(&acc, &x, scratch); + + for (i = 0; i < iter; ++i) { + rustsecp256k1_v0_8_0_sha256 gen; + inp[4] = i & 0xff; + inp[5] = (i >> 8) & 0xff; + rustsecp256k1_v0_8_0_sha256_initialize(&gen); + rustsecp256k1_v0_8_0_sha256_write(&gen, inp, sizeof(inp)); + rustsecp256k1_v0_8_0_sha256_finalize(&gen, b32); + rustsecp256k1_v0_8_0_scalar_set_b32(&x, b32, NULL); + test_ecmult_accumulate(&acc, &x, scratch); + } + rustsecp256k1_v0_8_0_sha256_finalize(&acc, b32); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(b32, expected32, 32) == 0); - rustsecp256k1_v0_7_0_scratch_space_destroy(ctx, scratch); + rustsecp256k1_v0_8_0_scratch_space_destroy(ctx, scratch); } void run_ecmult_constants(void) { - test_ecmult_constants(); + /* Expected hashes of all points in the tests below. Computed using an + * independent implementation. */ + static const unsigned char expected32_6bit20[32] = { + 0x68, 0xb6, 0xed, 0x6f, 0x28, 0xca, 0xc9, 0x7f, + 0x8e, 0x8b, 0xd6, 0xc0, 0x61, 0x79, 0x34, 0x6e, + 0x5a, 0x8f, 0x2b, 0xbc, 0x3e, 0x1f, 0xc5, 0x2e, + 0x2a, 0xd0, 0x45, 0x67, 0x7f, 0x95, 0x95, 0x8e + }; + static const unsigned char expected32_8bit8[32] = { + 0x8b, 0x65, 0x8e, 0xea, 0x86, 0xae, 0x3c, 0x95, + 0x90, 0xb6, 0x77, 0xa4, 0x8c, 0x76, 0xd9, 0xec, + 0xf5, 0xab, 0x8a, 0x2f, 0xfd, 0xdb, 0x19, 0x12, + 0x1a, 0xee, 0xe6, 0xb7, 0x6e, 0x05, 0x3f, 0xc6 + }; + /* For every combination of 6 bit positions out of 256, restricted to + * 20-bit windows (i.e., the first and last bit position are no more than + * 19 bits apart), all 64 bit patterns occur in the input scalars used in + * this test. */ + CONDITIONAL_TEST(1, "test_ecmult_constants_sha 1024") { + test_ecmult_constants_sha(4808378u, 1024, expected32_6bit20); + } + + /* For every combination of 8 consecutive bit positions, all 256 bit + * patterns occur in the input scalars used in this test. */ + CONDITIONAL_TEST(3, "test_ecmult_constants_sha 2048") { + test_ecmult_constants_sha(1607366309u, 2048, expected32_8bit8); + } + + CONDITIONAL_TEST(35, "test_ecmult_constants_2bit") { + test_ecmult_constants_2bit(); + } } void test_ecmult_gen_blind(void) { /* Test ecmult_gen() blinding and confirm that the blinding changes, the affine points match, and the z's don't match. */ - rustsecp256k1_v0_7_0_scalar key; - rustsecp256k1_v0_7_0_scalar b; + rustsecp256k1_v0_8_0_scalar key; + rustsecp256k1_v0_8_0_scalar b; unsigned char seed32[32]; - rustsecp256k1_v0_7_0_gej pgej; - rustsecp256k1_v0_7_0_gej pgej2; - rustsecp256k1_v0_7_0_gej i; - rustsecp256k1_v0_7_0_ge pge; + rustsecp256k1_v0_8_0_gej pgej; + rustsecp256k1_v0_8_0_gej pgej2; + rustsecp256k1_v0_8_0_gej i; + rustsecp256k1_v0_8_0_ge pge; random_scalar_order_test(&key); - rustsecp256k1_v0_7_0_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej, &key); - rustsecp256k1_v0_7_0_testrand256(seed32); + rustsecp256k1_v0_8_0_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej, &key); + rustsecp256k1_v0_8_0_testrand256(seed32); b = ctx->ecmult_gen_ctx.blind; i = ctx->ecmult_gen_ctx.initial; - rustsecp256k1_v0_7_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); - CHECK(!rustsecp256k1_v0_7_0_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind)); - rustsecp256k1_v0_7_0_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej2, &key); + rustsecp256k1_v0_8_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); + CHECK(!rustsecp256k1_v0_8_0_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind)); + rustsecp256k1_v0_8_0_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej2, &key); CHECK(!gej_xyz_equals_gej(&pgej, &pgej2)); CHECK(!gej_xyz_equals_gej(&i, &ctx->ecmult_gen_ctx.initial)); - rustsecp256k1_v0_7_0_ge_set_gej(&pge, &pgej); + rustsecp256k1_v0_8_0_ge_set_gej(&pge, &pgej); ge_equals_gej(&pge, &pgej2); } void test_ecmult_gen_blind_reset(void) { /* Test ecmult_gen() blinding reset and confirm that the blinding is consistent. */ - rustsecp256k1_v0_7_0_scalar b; - rustsecp256k1_v0_7_0_gej initial; - rustsecp256k1_v0_7_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0); + rustsecp256k1_v0_8_0_scalar b; + rustsecp256k1_v0_8_0_gej initial; + rustsecp256k1_v0_8_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0); b = ctx->ecmult_gen_ctx.blind; initial = ctx->ecmult_gen_ctx.initial; - rustsecp256k1_v0_7_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind)); + rustsecp256k1_v0_8_0_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind)); CHECK(gej_xyz_equals_gej(&initial, &ctx->ecmult_gen_ctx.initial)); } @@ -4844,46 +5380,46 @@ void run_ecmult_gen_blind(void) { } /***** ENDOMORPHISH TESTS *****/ -void test_scalar_split(const rustsecp256k1_v0_7_0_scalar* full) { - rustsecp256k1_v0_7_0_scalar s, s1, slam; +void test_scalar_split(const rustsecp256k1_v0_8_0_scalar* full) { + rustsecp256k1_v0_8_0_scalar s, s1, slam; const unsigned char zero[32] = {0}; unsigned char tmp[32]; - rustsecp256k1_v0_7_0_scalar_split_lambda(&s1, &slam, full); + rustsecp256k1_v0_8_0_scalar_split_lambda(&s1, &slam, full); /* check slam*lambda + s1 == full */ - rustsecp256k1_v0_7_0_scalar_mul(&s, &rustsecp256k1_v0_7_0_const_lambda, &slam); - rustsecp256k1_v0_7_0_scalar_add(&s, &s, &s1); - CHECK(rustsecp256k1_v0_7_0_scalar_eq(&s, full)); + rustsecp256k1_v0_8_0_scalar_mul(&s, &rustsecp256k1_v0_8_0_const_lambda, &slam); + rustsecp256k1_v0_8_0_scalar_add(&s, &s, &s1); + CHECK(rustsecp256k1_v0_8_0_scalar_eq(&s, full)); /* check that both are <= 128 bits in size */ - if (rustsecp256k1_v0_7_0_scalar_is_high(&s1)) { - rustsecp256k1_v0_7_0_scalar_negate(&s1, &s1); + if (rustsecp256k1_v0_8_0_scalar_is_high(&s1)) { + rustsecp256k1_v0_8_0_scalar_negate(&s1, &s1); } - if (rustsecp256k1_v0_7_0_scalar_is_high(&slam)) { - rustsecp256k1_v0_7_0_scalar_negate(&slam, &slam); + if (rustsecp256k1_v0_8_0_scalar_is_high(&slam)) { + rustsecp256k1_v0_8_0_scalar_negate(&slam, &slam); } - rustsecp256k1_v0_7_0_scalar_get_b32(tmp, &s1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zero, tmp, 16) == 0); - rustsecp256k1_v0_7_0_scalar_get_b32(tmp, &slam); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zero, tmp, 16) == 0); + rustsecp256k1_v0_8_0_scalar_get_b32(tmp, &s1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zero, tmp, 16) == 0); + rustsecp256k1_v0_8_0_scalar_get_b32(tmp, &slam); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zero, tmp, 16) == 0); } void run_endomorphism_tests(void) { unsigned i; - static rustsecp256k1_v0_7_0_scalar s; - test_scalar_split(&rustsecp256k1_v0_7_0_scalar_zero); - test_scalar_split(&rustsecp256k1_v0_7_0_scalar_one); - rustsecp256k1_v0_7_0_scalar_negate(&s,&rustsecp256k1_v0_7_0_scalar_one); + static rustsecp256k1_v0_8_0_scalar s; + test_scalar_split(&rustsecp256k1_v0_8_0_scalar_zero); + test_scalar_split(&rustsecp256k1_v0_8_0_scalar_one); + rustsecp256k1_v0_8_0_scalar_negate(&s,&rustsecp256k1_v0_8_0_scalar_one); test_scalar_split(&s); - test_scalar_split(&rustsecp256k1_v0_7_0_const_lambda); - rustsecp256k1_v0_7_0_scalar_add(&s, &rustsecp256k1_v0_7_0_const_lambda, &rustsecp256k1_v0_7_0_scalar_one); + test_scalar_split(&rustsecp256k1_v0_8_0_const_lambda); + rustsecp256k1_v0_8_0_scalar_add(&s, &rustsecp256k1_v0_8_0_const_lambda, &rustsecp256k1_v0_8_0_scalar_one); test_scalar_split(&s); for (i = 0; i < 100U * count; ++i) { - rustsecp256k1_v0_7_0_scalar full; + rustsecp256k1_v0_8_0_scalar full; random_scalar_order_test(&full); test_scalar_split(&full); } @@ -4894,12 +5430,12 @@ void run_endomorphism_tests(void) { void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvalid) { unsigned char pubkeyc[65]; - rustsecp256k1_v0_7_0_pubkey pubkey; - rustsecp256k1_v0_7_0_ge ge; + rustsecp256k1_v0_8_0_pubkey pubkey; + rustsecp256k1_v0_8_0_ge ge; size_t pubkeyclen; int32_t ecount; ecount = 0; - rustsecp256k1_v0_7_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); for (pubkeyclen = 3; pubkeyclen <= 65; pubkeyclen++) { /* Smaller sizes are tested exhaustively elsewhere. */ int32_t i; @@ -4925,30 +5461,30 @@ void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvali memset(&pubkey, 0, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); ecount = 0; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); outl = 65; VG_UNDEF(pubkeyo, 65); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_COMPRESSED) == 1); VG_CHECK(pubkeyo, outl); CHECK(outl == 33); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkeyo[1], &pubkeyc[1], 32) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkeyo[1], &pubkeyc[1], 32) == 0); CHECK((pubkeyclen != 33) || (pubkeyo[0] == pubkeyc[0])); if (ypass) { /* This test isn't always done because we decode with alternative signs, so the y won't match. */ CHECK(pubkeyo[0] == ysign); - CHECK(rustsecp256k1_v0_7_0_pubkey_load(ctx, &ge, &pubkey) == 1); + CHECK(rustsecp256k1_v0_8_0_pubkey_load(ctx, &ge, &pubkey) == 1); memset(&pubkey, 0, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - rustsecp256k1_v0_7_0_pubkey_save(&pubkey, &ge); + rustsecp256k1_v0_8_0_pubkey_save(&pubkey, &ge); VG_CHECK(&pubkey, sizeof(pubkey)); outl = 65; VG_UNDEF(pubkeyo, 65); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); VG_CHECK(pubkeyo, outl); CHECK(outl == 65); CHECK(pubkeyo[0] == 4); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkeyo[1], input, 64) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkeyo[1], input, 64) == 0); } CHECK(ecount == 0); } else { @@ -4956,15 +5492,15 @@ void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvali memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_8_0_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); } } } - rustsecp256k1_v0_7_0_context_set_illegal_callback(ctx, NULL, NULL); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx, NULL, NULL); } void run_ec_pubkey_parse_test(void) { @@ -5148,8 +5684,8 @@ void run_ec_pubkey_parse_test(void) { }; unsigned char sout[65]; unsigned char shortkey[2]; - rustsecp256k1_v0_7_0_ge ge; - rustsecp256k1_v0_7_0_pubkey pubkey; + rustsecp256k1_v0_8_0_ge ge; + rustsecp256k1_v0_8_0_pubkey pubkey; size_t len; int32_t i; int32_t ecount; @@ -5157,16 +5693,16 @@ void run_ec_pubkey_parse_test(void) { ecount = 0; /* Nothing should be reading this far into pubkeyc. */ VG_UNDEF(&pubkeyc[65], 1); - rustsecp256k1_v0_7_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); /* Zero length claimed, fail, zeroize, no illegal arg error. */ memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(shortkey, 2); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, &pubkey, shortkey, 0) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, &pubkey, shortkey, 0) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_8_0_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); /* Length one claimed, fail, zeroize, no illegal arg error. */ for (i = 0; i < 256 ; i++) { @@ -5175,10 +5711,10 @@ void run_ec_pubkey_parse_test(void) { shortkey[0] = i; VG_UNDEF(&shortkey[1], 1); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, &pubkey, shortkey, 1) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, &pubkey, shortkey, 1) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_8_0_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); } /* Length two claimed, fail, zeroize, no illegal arg error. */ @@ -5188,101 +5724,101 @@ void run_ec_pubkey_parse_test(void) { shortkey[0] = i & 255; shortkey[1] = i >> 8; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, &pubkey, shortkey, 2) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, &pubkey, shortkey, 2) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_8_0_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); } memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); /* 33 bytes claimed on otherwise valid input starting with 0x04, fail, zeroize output, no illegal arg error. */ - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 33) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 33) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_8_0_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); /* NULL pubkey, illegal arg error. Pubkey isn't rewritten before this step, since it's NULL into the parser. */ - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, NULL, pubkeyc, 65) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, NULL, pubkeyc, 65) == 0); CHECK(ecount == 2); /* NULL input string. Illegal arg and zeroize output. */ memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, &pubkey, NULL, 65) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, &pubkey, NULL, 65) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_8_0_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 2); /* 64 bytes claimed on input starting with 0x04, fail, zeroize output, no illegal arg error. */ memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 64) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 64) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_8_0_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); /* 66 bytes claimed, fail, zeroize output, no illegal arg error. */ memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 66) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 66) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_pubkey_load(ctx, &ge, &pubkey) == 0); + CHECK(rustsecp256k1_v0_8_0_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); /* Valid parse. */ memset(&pubkey, 0, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 65) == 1); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(rustsecp256k1_v0_7_0_context_no_precomp, &pubkey, pubkeyc, 65) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 65) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(rustsecp256k1_v0_8_0_context_static, &pubkey, pubkeyc, 65) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); VG_UNDEF(&ge, sizeof(ge)); - CHECK(rustsecp256k1_v0_7_0_pubkey_load(ctx, &ge, &pubkey) == 1); + CHECK(rustsecp256k1_v0_8_0_pubkey_load(ctx, &ge, &pubkey) == 1); VG_CHECK(&ge.x, sizeof(ge.x)); VG_CHECK(&ge.y, sizeof(ge.y)); VG_CHECK(&ge.infinity, sizeof(ge.infinity)); - ge_equals_ge(&rustsecp256k1_v0_7_0_ge_const_g, &ge); + ge_equals_ge(&rustsecp256k1_v0_8_0_ge_const_g, &ge); CHECK(ecount == 0); - /* rustsecp256k1_v0_7_0_ec_pubkey_serialize illegal args. */ + /* rustsecp256k1_v0_8_0_ec_pubkey_serialize illegal args. */ ecount = 0; len = 65; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, NULL, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, NULL, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); CHECK(ecount == 1); CHECK(len == 0); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, sout, NULL, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, sout, NULL, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); CHECK(ecount == 2); len = 65; VG_UNDEF(sout, 65); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, sout, &len, NULL, SECP256K1_EC_UNCOMPRESSED) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, sout, &len, NULL, SECP256K1_EC_UNCOMPRESSED) == 0); VG_CHECK(sout, 65); CHECK(ecount == 3); CHECK(len == 0); len = 65; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, sout, &len, &pubkey, ~0) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, sout, &len, &pubkey, ~0) == 0); CHECK(ecount == 4); CHECK(len == 0); len = 65; VG_UNDEF(sout, 65); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, sout, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, sout, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); VG_CHECK(sout, 65); CHECK(ecount == 4); CHECK(len == 65); /* Multiple illegal args. Should still set arg error only once. */ ecount = 0; ecount2 = 11; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0); CHECK(ecount == 1); /* Does the illegal arg callback actually change the behavior? */ - rustsecp256k1_v0_7_0_context_set_illegal_callback(ctx, uncounting_illegal_callback_fn, &ecount2); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx, uncounting_illegal_callback_fn, &ecount2); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0); CHECK(ecount == 1); CHECK(ecount2 == 10); - rustsecp256k1_v0_7_0_context_set_illegal_callback(ctx, NULL, NULL); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx, NULL, NULL); /* Try a bunch of prefabbed points with all possible encodings. */ for (i = 0; i < SECP256K1_EC_PARSE_TEST_NVALID; i++) { ec_pubkey_parse_pointtest(valid[i], 1, 1); @@ -5302,253 +5838,253 @@ void run_eckey_edge_case_test(void) { 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41 }; - const unsigned char zeros[sizeof(rustsecp256k1_v0_7_0_pubkey)] = {0x00}; + const unsigned char zeros[sizeof(rustsecp256k1_v0_8_0_pubkey)] = {0x00}; unsigned char ctmp[33]; unsigned char ctmp2[33]; - rustsecp256k1_v0_7_0_pubkey pubkey; - rustsecp256k1_v0_7_0_pubkey pubkey2; - rustsecp256k1_v0_7_0_pubkey pubkey_one; - rustsecp256k1_v0_7_0_pubkey pubkey_negone; - const rustsecp256k1_v0_7_0_pubkey *pubkeys[3]; + rustsecp256k1_v0_8_0_pubkey pubkey; + rustsecp256k1_v0_8_0_pubkey pubkey2; + rustsecp256k1_v0_8_0_pubkey pubkey_one; + rustsecp256k1_v0_8_0_pubkey pubkey_negone; + const rustsecp256k1_v0_8_0_pubkey *pubkeys[3]; size_t len; int32_t ecount; /* Group order is too large, reject. */ - CHECK(rustsecp256k1_v0_7_0_ec_seckey_verify(ctx, orderc) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, orderc) == 0); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey, orderc) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey, orderc) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_7_0_pubkey)) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_8_0_pubkey)) == 0); /* Maximum value is too large, reject. */ memset(ctmp, 255, 32); - CHECK(rustsecp256k1_v0_7_0_ec_seckey_verify(ctx, ctmp) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, ctmp) == 0); memset(&pubkey, 1, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_7_0_pubkey)) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_8_0_pubkey)) == 0); /* Zero is too small, reject. */ memset(ctmp, 0, 32); - CHECK(rustsecp256k1_v0_7_0_ec_seckey_verify(ctx, ctmp) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, ctmp) == 0); memset(&pubkey, 1, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_7_0_pubkey)) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_8_0_pubkey)) == 0); /* One must be accepted. */ ctmp[31] = 0x01; - CHECK(rustsecp256k1_v0_7_0_ec_seckey_verify(ctx, ctmp) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, ctmp) == 1); memset(&pubkey, 0, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_7_0_pubkey)) > 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_8_0_pubkey)) > 0); pubkey_one = pubkey; /* Group order + 1 is too large, reject. */ memcpy(ctmp, orderc, 32); ctmp[31] = 0x42; - CHECK(rustsecp256k1_v0_7_0_ec_seckey_verify(ctx, ctmp) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, ctmp) == 0); memset(&pubkey, 1, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_7_0_pubkey)) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_8_0_pubkey)) == 0); /* -1 must be accepted. */ ctmp[31] = 0x40; - CHECK(rustsecp256k1_v0_7_0_ec_seckey_verify(ctx, ctmp) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, ctmp) == 1); memset(&pubkey, 0, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_7_0_pubkey)) > 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_8_0_pubkey)) > 0); pubkey_negone = pubkey; /* Tweak of zero leaves the value unchanged. */ memset(ctmp2, 0, 32); - CHECK(rustsecp256k1_v0_7_0_ec_seckey_tweak_add(ctx, ctmp, ctmp2) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(orderc, ctmp, 31) == 0 && ctmp[31] == 0x40); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_tweak_add(ctx, ctmp, ctmp2) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(orderc, ctmp, 31) == 0 && ctmp[31] == 0x40); memcpy(&pubkey2, &pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Multiply tweak of zero zeroizes the output. */ - CHECK(rustsecp256k1_v0_7_0_ec_seckey_tweak_mul(ctx, ctmp, ctmp2) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros, ctmp, 32) == 0); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_mul(ctx, &pubkey, ctmp2) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_tweak_mul(ctx, ctmp, ctmp2) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros, ctmp, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_mul(ctx, &pubkey, ctmp2) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); /* If seckey_tweak_add or seckey_tweak_mul are called with an overflowing seckey, the seckey is zeroized. */ memcpy(ctmp, orderc, 32); memset(ctmp2, 0, 32); ctmp2[31] = 0x01; - CHECK(rustsecp256k1_v0_7_0_ec_seckey_verify(ctx, ctmp2) == 1); - CHECK(rustsecp256k1_v0_7_0_ec_seckey_verify(ctx, ctmp) == 0); - CHECK(rustsecp256k1_v0_7_0_ec_seckey_tweak_add(ctx, ctmp, ctmp2) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros, ctmp, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, ctmp2) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, ctmp) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_tweak_add(ctx, ctmp, ctmp2) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros, ctmp, 32) == 0); memcpy(ctmp, orderc, 32); - CHECK(rustsecp256k1_v0_7_0_ec_seckey_tweak_mul(ctx, ctmp, ctmp2) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros, ctmp, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_tweak_mul(ctx, ctmp, ctmp2) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros, ctmp, 32) == 0); /* If seckey_tweak_add or seckey_tweak_mul are called with an overflowing tweak, the seckey is zeroized. */ memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; - CHECK(rustsecp256k1_v0_7_0_ec_seckey_tweak_add(ctx, ctmp, orderc) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros, ctmp, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_tweak_add(ctx, ctmp, orderc) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros, ctmp, 32) == 0); memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; - CHECK(rustsecp256k1_v0_7_0_ec_seckey_tweak_mul(ctx, ctmp, orderc) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros, ctmp, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_tweak_mul(ctx, ctmp, orderc) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros, ctmp, 32) == 0); memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; /* If pubkey_tweak_add or pubkey_tweak_mul are called with an overflowing tweak, the pubkey is zeroized. */ - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_add(ctx, &pubkey, orderc) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_add(ctx, &pubkey, orderc) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_mul(ctx, &pubkey, orderc) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_mul(ctx, &pubkey, orderc) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); - /* If the resulting key in rustsecp256k1_v0_7_0_ec_seckey_tweak_add and - * rustsecp256k1_v0_7_0_ec_pubkey_tweak_add is 0 the functions fail and in the latter + /* If the resulting key in rustsecp256k1_v0_8_0_ec_seckey_tweak_add and + * rustsecp256k1_v0_8_0_ec_pubkey_tweak_add is 0 the functions fail and in the latter * case the pubkey is zeroized. */ memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; memset(ctmp2, 0, 32); ctmp2[31] = 1; - CHECK(rustsecp256k1_v0_7_0_ec_seckey_tweak_add(ctx, ctmp2, ctmp) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(zeros, ctmp2, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_tweak_add(ctx, ctmp2, ctmp) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(zeros, ctmp2, 32) == 0); ctmp2[31] = 1; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); /* Tweak computation wraps and results in a key of 1. */ ctmp2[31] = 2; - CHECK(rustsecp256k1_v0_7_0_ec_seckey_tweak_add(ctx, ctmp2, ctmp) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(ctmp2, zeros, 31) == 0 && ctmp2[31] == 1); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_tweak_add(ctx, ctmp2, ctmp) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(ctmp2, zeros, 31) == 0 && ctmp2[31] == 1); ctmp2[31] = 2; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); ctmp2[31] = 1; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey2, ctmp2) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey2, ctmp2) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Tweak mul * 2 = 1+1. */ - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); ctmp2[31] = 2; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Test argument errors. */ ecount = 0; - rustsecp256k1_v0_7_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); CHECK(ecount == 0); /* Zeroize pubkey on parse error. */ memset(&pubkey, 0, 32); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); memset(&pubkey2, 0, 32); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey2, zeros, sizeof(pubkey2)) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey2, zeros, sizeof(pubkey2)) == 0); /* Plain argument errors. */ ecount = 0; - CHECK(rustsecp256k1_v0_7_0_ec_seckey_verify(ctx, ctmp) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, ctmp) == 1); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_ec_seckey_verify(ctx, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, NULL) == 0); CHECK(ecount == 1); ecount = 0; memset(ctmp2, 0, 32); ctmp2[31] = 4; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_add(ctx, NULL, ctmp2) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_add(ctx, NULL, ctmp2) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_add(ctx, &pubkey, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_add(ctx, &pubkey, NULL) == 0); CHECK(ecount == 2); ecount = 0; memset(ctmp2, 0, 32); ctmp2[31] = 4; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_mul(ctx, NULL, ctmp2) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_mul(ctx, NULL, ctmp2) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_tweak_mul(ctx, &pubkey, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_tweak_mul(ctx, &pubkey, NULL) == 0); CHECK(ecount == 2); ecount = 0; memset(ctmp2, 0, 32); - CHECK(rustsecp256k1_v0_7_0_ec_seckey_tweak_add(ctx, NULL, ctmp2) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_tweak_add(ctx, NULL, ctmp2) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_ec_seckey_tweak_add(ctx, ctmp, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_tweak_add(ctx, ctmp, NULL) == 0); CHECK(ecount == 2); ecount = 0; memset(ctmp2, 0, 32); ctmp2[31] = 1; - CHECK(rustsecp256k1_v0_7_0_ec_seckey_tweak_mul(ctx, NULL, ctmp2) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_tweak_mul(ctx, NULL, ctmp2) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_ec_seckey_tweak_mul(ctx, ctmp, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_tweak_mul(ctx, ctmp, NULL) == 0); CHECK(ecount == 2); ecount = 0; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, NULL, ctmp) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, NULL, ctmp) == 0); CHECK(ecount == 1); memset(&pubkey, 1, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey, NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_7_0_pubkey)) == 0); - /* rustsecp256k1_v0_7_0_ec_pubkey_combine tests. */ + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_8_0_pubkey)) == 0); + /* rustsecp256k1_v0_8_0_ec_pubkey_combine tests. */ ecount = 0; pubkeys[0] = &pubkey_one; - VG_UNDEF(&pubkeys[0], sizeof(rustsecp256k1_v0_7_0_pubkey *)); - VG_UNDEF(&pubkeys[1], sizeof(rustsecp256k1_v0_7_0_pubkey *)); - VG_UNDEF(&pubkeys[2], sizeof(rustsecp256k1_v0_7_0_pubkey *)); - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_7_0_pubkey)); - VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_7_0_pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 0) == 0); - VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_7_0_pubkey)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_7_0_pubkey)) == 0); + VG_UNDEF(&pubkeys[0], sizeof(rustsecp256k1_v0_8_0_pubkey *)); + VG_UNDEF(&pubkeys[1], sizeof(rustsecp256k1_v0_8_0_pubkey *)); + VG_UNDEF(&pubkeys[2], sizeof(rustsecp256k1_v0_8_0_pubkey *)); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_8_0_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_8_0_pubkey)); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 0) == 0); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_8_0_pubkey)); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_8_0_pubkey)) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_combine(ctx, NULL, pubkeys, 1) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_7_0_pubkey)) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_combine(ctx, NULL, pubkeys, 1) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_8_0_pubkey)) == 0); CHECK(ecount == 2); - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_7_0_pubkey)); - VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_7_0_pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_combine(ctx, &pubkey, NULL, 1) == 0); - VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_7_0_pubkey)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_7_0_pubkey)) == 0); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_8_0_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_8_0_pubkey)); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_combine(ctx, &pubkey, NULL, 1) == 0); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_8_0_pubkey)); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_8_0_pubkey)) == 0); CHECK(ecount == 3); pubkeys[0] = &pubkey_negone; - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_7_0_pubkey)); - VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_7_0_pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 1) == 1); - VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_7_0_pubkey)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_7_0_pubkey)) > 0); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_8_0_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_8_0_pubkey)); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 1) == 1); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_8_0_pubkey)); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_8_0_pubkey)) > 0); CHECK(ecount == 3); len = 33; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_negone, SECP256K1_EC_COMPRESSED) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(ctmp, ctmp2, 33) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_negone, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(ctmp, ctmp2, 33) == 0); /* Result is infinity. */ pubkeys[0] = &pubkey_one; pubkeys[1] = &pubkey_negone; - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_7_0_pubkey)); - VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_7_0_pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 0); - VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_7_0_pubkey)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_7_0_pubkey)) == 0); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_8_0_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_8_0_pubkey)); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 0); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_8_0_pubkey)); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_8_0_pubkey)) == 0); CHECK(ecount == 3); /* Passes through infinity but comes out one. */ pubkeys[2] = &pubkey_one; - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_7_0_pubkey)); - VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_7_0_pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 3) == 1); - VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_7_0_pubkey)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_7_0_pubkey)) > 0); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_8_0_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_8_0_pubkey)); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 3) == 1); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_8_0_pubkey)); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_8_0_pubkey)) > 0); CHECK(ecount == 3); len = 33; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_one, SECP256K1_EC_COMPRESSED) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(ctmp, ctmp2, 33) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_one, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(ctmp, ctmp2, 33) == 0); /* Adds to two. */ pubkeys[1] = &pubkey_one; - memset(&pubkey, 255, sizeof(rustsecp256k1_v0_7_0_pubkey)); - VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_7_0_pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 1); - VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_7_0_pubkey)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_7_0_pubkey)) > 0); + memset(&pubkey, 255, sizeof(rustsecp256k1_v0_8_0_pubkey)); + VG_UNDEF(&pubkey, sizeof(rustsecp256k1_v0_8_0_pubkey)); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 1); + VG_CHECK(&pubkey, sizeof(rustsecp256k1_v0_8_0_pubkey)); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, zeros, sizeof(rustsecp256k1_v0_8_0_pubkey)) > 0); CHECK(ecount == 3); - rustsecp256k1_v0_7_0_context_set_illegal_callback(ctx, NULL, NULL); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx, NULL, NULL); } void run_eckey_negate_test(void) { @@ -5559,22 +6095,22 @@ void run_eckey_negate_test(void) { memcpy(seckey_tmp, seckey, 32); /* Verify negation changes the key and changes it back */ - CHECK(rustsecp256k1_v0_7_0_ec_seckey_negate(ctx, seckey) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(seckey, seckey_tmp, 32) != 0); - CHECK(rustsecp256k1_v0_7_0_ec_seckey_negate(ctx, seckey) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(seckey, seckey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_negate(ctx, seckey) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(seckey, seckey_tmp, 32) != 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_negate(ctx, seckey) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(seckey, seckey_tmp, 32) == 0); /* Check that privkey alias gives same result */ - CHECK(rustsecp256k1_v0_7_0_ec_seckey_negate(ctx, seckey) == 1); - CHECK(rustsecp256k1_v0_7_0_ec_privkey_negate(ctx, seckey_tmp) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(seckey, seckey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_negate(ctx, seckey) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_privkey_negate(ctx, seckey_tmp) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(seckey, seckey_tmp, 32) == 0); /* Negating all 0s fails */ memset(seckey, 0, 32); memset(seckey_tmp, 0, 32); - CHECK(rustsecp256k1_v0_7_0_ec_seckey_negate(ctx, seckey) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_negate(ctx, seckey) == 0); /* Check that seckey is not modified */ - CHECK(rustsecp256k1_v0_7_0_memcmp_var(seckey, seckey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(seckey, seckey_tmp, 32) == 0); /* Negating an overflowing seckey fails and the seckey is zeroed. In this * test, the seckey has 16 random bytes to ensure that ec_seckey_negate @@ -5582,30 +6118,30 @@ void run_eckey_negate_test(void) { random_scalar_order_b32(seckey); memset(seckey, 0xFF, 16); memset(seckey_tmp, 0, 32); - CHECK(rustsecp256k1_v0_7_0_ec_seckey_negate(ctx, seckey) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(seckey, seckey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_negate(ctx, seckey) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(seckey, seckey_tmp, 32) == 0); } -void random_sign(rustsecp256k1_v0_7_0_scalar *sigr, rustsecp256k1_v0_7_0_scalar *sigs, const rustsecp256k1_v0_7_0_scalar *key, const rustsecp256k1_v0_7_0_scalar *msg, int *recid) { - rustsecp256k1_v0_7_0_scalar nonce; +void random_sign(rustsecp256k1_v0_8_0_scalar *sigr, rustsecp256k1_v0_8_0_scalar *sigs, const rustsecp256k1_v0_8_0_scalar *key, const rustsecp256k1_v0_8_0_scalar *msg, int *recid) { + rustsecp256k1_v0_8_0_scalar nonce; do { random_scalar_order_test(&nonce); - } while(!rustsecp256k1_v0_7_0_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, sigr, sigs, key, msg, &nonce, recid)); + } while(!rustsecp256k1_v0_8_0_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, sigr, sigs, key, msg, &nonce, recid)); } void test_ecdsa_sign_verify(void) { - rustsecp256k1_v0_7_0_gej pubj; - rustsecp256k1_v0_7_0_ge pub; - rustsecp256k1_v0_7_0_scalar one; - rustsecp256k1_v0_7_0_scalar msg, key; - rustsecp256k1_v0_7_0_scalar sigr, sigs; + rustsecp256k1_v0_8_0_gej pubj; + rustsecp256k1_v0_8_0_ge pub; + rustsecp256k1_v0_8_0_scalar one; + rustsecp256k1_v0_8_0_scalar msg, key; + rustsecp256k1_v0_8_0_scalar sigr, sigs; int getrec; int recid; random_scalar_order_test(&msg); random_scalar_order_test(&key); - rustsecp256k1_v0_7_0_ecmult_gen(&ctx->ecmult_gen_ctx, &pubj, &key); - rustsecp256k1_v0_7_0_ge_set_gej(&pub, &pubj); - getrec = rustsecp256k1_v0_7_0_testrand_bits(1); + rustsecp256k1_v0_8_0_ecmult_gen(&ctx->ecmult_gen_ctx, &pubj, &key); + rustsecp256k1_v0_8_0_ge_set_gej(&pub, &pubj); + getrec = rustsecp256k1_v0_8_0_testrand_bits(1); /* The specific way in which this conditional is written sidesteps a potential bug in clang. See the commit messages of the commit that introduced this comment for details. */ if (getrec) { @@ -5614,10 +6150,10 @@ void test_ecdsa_sign_verify(void) { } else { random_sign(&sigr, &sigs, &key, &msg, NULL); } - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sigr, &sigs, &pub, &msg)); - rustsecp256k1_v0_7_0_scalar_set_int(&one, 1); - rustsecp256k1_v0_7_0_scalar_add(&msg, &msg, &one); - CHECK(!rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sigr, &sigs, &pub, &msg)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sigr, &sigs, &pub, &msg)); + rustsecp256k1_v0_8_0_scalar_set_int(&one, 1); + rustsecp256k1_v0_8_0_scalar_add(&msg, &msg, &one); + CHECK(!rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sigr, &sigs, &pub, &msg)); } void run_ecdsa_sign_verify(void) { @@ -5674,9 +6210,9 @@ static int nonce_function_test_retry(unsigned char *nonce32, const unsigned char return nonce_function_rfc6979(nonce32, msg32, key32, algo16, data, counter - 5); } -int is_empty_signature(const rustsecp256k1_v0_7_0_ecdsa_signature *sig) { - static const unsigned char res[sizeof(rustsecp256k1_v0_7_0_ecdsa_signature)] = {0}; - return rustsecp256k1_v0_7_0_memcmp_var(sig, res, sizeof(rustsecp256k1_v0_7_0_ecdsa_signature)) == 0; +int is_empty_signature(const rustsecp256k1_v0_8_0_ecdsa_signature *sig) { + static const unsigned char res[sizeof(rustsecp256k1_v0_8_0_ecdsa_signature)] = {0}; + return rustsecp256k1_v0_8_0_memcmp_var(sig, res, sizeof(rustsecp256k1_v0_8_0_ecdsa_signature)) == 0; } void test_ecdsa_end_to_end(void) { @@ -5684,191 +6220,191 @@ void test_ecdsa_end_to_end(void) { unsigned char privkey[32]; unsigned char message[32]; unsigned char privkey2[32]; - rustsecp256k1_v0_7_0_ecdsa_signature signature[6]; - rustsecp256k1_v0_7_0_scalar r, s; + rustsecp256k1_v0_8_0_ecdsa_signature signature[6]; + rustsecp256k1_v0_8_0_scalar r, s; unsigned char sig[74]; size_t siglen = 74; unsigned char pubkeyc[65]; size_t pubkeyclen = 65; - rustsecp256k1_v0_7_0_pubkey pubkey; - rustsecp256k1_v0_7_0_pubkey pubkey_tmp; + rustsecp256k1_v0_8_0_pubkey pubkey; + rustsecp256k1_v0_8_0_pubkey pubkey_tmp; unsigned char seckey[300]; size_t seckeylen = 300; /* Generate a random key and message. */ { - rustsecp256k1_v0_7_0_scalar msg, key; + rustsecp256k1_v0_8_0_scalar msg, key; random_scalar_order_test(&msg); random_scalar_order_test(&key); - rustsecp256k1_v0_7_0_scalar_get_b32(privkey, &key); - rustsecp256k1_v0_7_0_scalar_get_b32(message, &msg); + rustsecp256k1_v0_8_0_scalar_get_b32(privkey, &key); + rustsecp256k1_v0_8_0_scalar_get_b32(message, &msg); } /* Construct and verify corresponding public key. */ - CHECK(rustsecp256k1_v0_7_0_ec_seckey_verify(ctx, privkey) == 1); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey, privkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, privkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey, privkey) == 1); /* Verify exporting and importing public key. */ - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, pubkeyc, &pubkeyclen, &pubkey, rustsecp256k1_v0_7_0_testrand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED)); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, pubkeyc, &pubkeyclen, &pubkey, rustsecp256k1_v0_8_0_testrand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED)); memset(&pubkey, 0, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); /* Verify negation changes the key and changes it back */ memcpy(&pubkey_tmp, &pubkey, sizeof(pubkey)); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) != 0); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) != 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) == 0); /* Verify private key import and export. */ - CHECK(ec_privkey_export_der(ctx, seckey, &seckeylen, privkey, rustsecp256k1_v0_7_0_testrand_bits(1) == 1)); + CHECK(ec_privkey_export_der(ctx, seckey, &seckeylen, privkey, rustsecp256k1_v0_8_0_testrand_bits(1) == 1)); CHECK(ec_privkey_import_der(ctx, privkey2, seckey, seckeylen) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(privkey, privkey2, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(privkey, privkey2, 32) == 0); /* Optionally tweak the keys using addition. */ - if (rustsecp256k1_v0_7_0_testrand_int(3) == 0) { + if (rustsecp256k1_v0_8_0_testrand_int(3) == 0) { int ret1; int ret2; int ret3; unsigned char rnd[32]; unsigned char privkey_tmp[32]; - rustsecp256k1_v0_7_0_pubkey pubkey2; - rustsecp256k1_v0_7_0_testrand256_test(rnd); + rustsecp256k1_v0_8_0_pubkey pubkey2; + rustsecp256k1_v0_8_0_testrand256_test(rnd); memcpy(privkey_tmp, privkey, 32); - ret1 = rustsecp256k1_v0_7_0_ec_seckey_tweak_add(ctx, privkey, rnd); - ret2 = rustsecp256k1_v0_7_0_ec_pubkey_tweak_add(ctx, &pubkey, rnd); + ret1 = rustsecp256k1_v0_8_0_ec_seckey_tweak_add(ctx, privkey, rnd); + ret2 = rustsecp256k1_v0_8_0_ec_pubkey_tweak_add(ctx, &pubkey, rnd); /* Check that privkey alias gives same result */ - ret3 = rustsecp256k1_v0_7_0_ec_privkey_tweak_add(ctx, privkey_tmp, rnd); + ret3 = rustsecp256k1_v0_8_0_ec_privkey_tweak_add(ctx, privkey_tmp, rnd); CHECK(ret1 == ret2); CHECK(ret2 == ret3); if (ret1 == 0) { return; } - CHECK(rustsecp256k1_v0_7_0_memcmp_var(privkey, privkey_tmp, 32) == 0); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(privkey, privkey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); } /* Optionally tweak the keys using multiplication. */ - if (rustsecp256k1_v0_7_0_testrand_int(3) == 0) { + if (rustsecp256k1_v0_8_0_testrand_int(3) == 0) { int ret1; int ret2; int ret3; unsigned char rnd[32]; unsigned char privkey_tmp[32]; - rustsecp256k1_v0_7_0_pubkey pubkey2; - rustsecp256k1_v0_7_0_testrand256_test(rnd); + rustsecp256k1_v0_8_0_pubkey pubkey2; + rustsecp256k1_v0_8_0_testrand256_test(rnd); memcpy(privkey_tmp, privkey, 32); - ret1 = rustsecp256k1_v0_7_0_ec_seckey_tweak_mul(ctx, privkey, rnd); - ret2 = rustsecp256k1_v0_7_0_ec_pubkey_tweak_mul(ctx, &pubkey, rnd); + ret1 = rustsecp256k1_v0_8_0_ec_seckey_tweak_mul(ctx, privkey, rnd); + ret2 = rustsecp256k1_v0_8_0_ec_pubkey_tweak_mul(ctx, &pubkey, rnd); /* Check that privkey alias gives same result */ - ret3 = rustsecp256k1_v0_7_0_ec_privkey_tweak_mul(ctx, privkey_tmp, rnd); + ret3 = rustsecp256k1_v0_8_0_ec_privkey_tweak_mul(ctx, privkey_tmp, rnd); CHECK(ret1 == ret2); CHECK(ret2 == ret3); if (ret1 == 0) { return; } - CHECK(rustsecp256k1_v0_7_0_memcmp_var(privkey, privkey_tmp, 32) == 0); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(privkey, privkey_tmp, 32) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); } /* Sign. */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &signature[4], message, privkey, NULL, NULL) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &signature[1], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &signature[4], message, privkey, NULL, NULL) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &signature[1], message, privkey, NULL, extra) == 1); extra[31] = 1; - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &signature[2], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &signature[2], message, privkey, NULL, extra) == 1); extra[31] = 0; extra[0] = 1; - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &signature[3], message, privkey, NULL, extra) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&signature[0], &signature[4], sizeof(signature[0])) == 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&signature[0], &signature[1], sizeof(signature[0])) != 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&signature[0], &signature[2], sizeof(signature[0])) != 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&signature[0], &signature[3], sizeof(signature[0])) != 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&signature[1], &signature[2], sizeof(signature[0])) != 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&signature[1], &signature[3], sizeof(signature[0])) != 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&signature[2], &signature[3], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &signature[3], message, privkey, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&signature[0], &signature[4], sizeof(signature[0])) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&signature[0], &signature[1], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&signature[0], &signature[2], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&signature[0], &signature[3], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&signature[1], &signature[2], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&signature[1], &signature[3], sizeof(signature[0])) != 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&signature[2], &signature[3], sizeof(signature[0])) != 0); /* Verify. */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &signature[1], message, &pubkey) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &signature[2], message, &pubkey) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &signature[3], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &signature[1], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &signature[2], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &signature[3], message, &pubkey) == 1); /* Test lower-S form, malleate, verify and fail, test again, malleate again */ - CHECK(!rustsecp256k1_v0_7_0_ecdsa_signature_normalize(ctx, NULL, &signature[0])); - rustsecp256k1_v0_7_0_ecdsa_signature_load(ctx, &r, &s, &signature[0]); - rustsecp256k1_v0_7_0_scalar_negate(&s, &s); - rustsecp256k1_v0_7_0_ecdsa_signature_save(&signature[5], &r, &s); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_normalize(ctx, NULL, &signature[5])); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_normalize(ctx, &signature[5], &signature[5])); - CHECK(!rustsecp256k1_v0_7_0_ecdsa_signature_normalize(ctx, NULL, &signature[5])); - CHECK(!rustsecp256k1_v0_7_0_ecdsa_signature_normalize(ctx, &signature[5], &signature[5])); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1); - rustsecp256k1_v0_7_0_scalar_negate(&s, &s); - rustsecp256k1_v0_7_0_ecdsa_signature_save(&signature[5], &r, &s); - CHECK(!rustsecp256k1_v0_7_0_ecdsa_signature_normalize(ctx, NULL, &signature[5])); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&signature[5], &signature[0], 64) == 0); + CHECK(!rustsecp256k1_v0_8_0_ecdsa_signature_normalize(ctx, NULL, &signature[0])); + rustsecp256k1_v0_8_0_ecdsa_signature_load(ctx, &r, &s, &signature[0]); + rustsecp256k1_v0_8_0_scalar_negate(&s, &s); + rustsecp256k1_v0_8_0_ecdsa_signature_save(&signature[5], &r, &s); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_normalize(ctx, NULL, &signature[5])); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_normalize(ctx, &signature[5], &signature[5])); + CHECK(!rustsecp256k1_v0_8_0_ecdsa_signature_normalize(ctx, NULL, &signature[5])); + CHECK(!rustsecp256k1_v0_8_0_ecdsa_signature_normalize(ctx, &signature[5], &signature[5])); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1); + rustsecp256k1_v0_8_0_scalar_negate(&s, &s); + rustsecp256k1_v0_8_0_ecdsa_signature_save(&signature[5], &r, &s); + CHECK(!rustsecp256k1_v0_8_0_ecdsa_signature_normalize(ctx, NULL, &signature[5])); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&signature[5], &signature[0], 64) == 0); /* Serialize/parse DER and verify again */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); memset(&signature[0], 0, sizeof(signature[0])); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1); /* Serialize/destroy/parse DER and verify again. */ siglen = 74; - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); - sig[rustsecp256k1_v0_7_0_testrand_int(siglen)] += 1 + rustsecp256k1_v0_7_0_testrand_int(255); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 0 || - rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); + sig[rustsecp256k1_v0_8_0_testrand_int(siglen)] += 1 + rustsecp256k1_v0_8_0_testrand_int(255); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 0 || + rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 0); } void test_random_pubkeys(void) { - rustsecp256k1_v0_7_0_ge elem; - rustsecp256k1_v0_7_0_ge elem2; + rustsecp256k1_v0_8_0_ge elem; + rustsecp256k1_v0_8_0_ge elem2; unsigned char in[65]; /* Generate some randomly sized pubkeys. */ - size_t len = rustsecp256k1_v0_7_0_testrand_bits(2) == 0 ? 65 : 33; - if (rustsecp256k1_v0_7_0_testrand_bits(2) == 0) { - len = rustsecp256k1_v0_7_0_testrand_bits(6); + size_t len = rustsecp256k1_v0_8_0_testrand_bits(2) == 0 ? 65 : 33; + if (rustsecp256k1_v0_8_0_testrand_bits(2) == 0) { + len = rustsecp256k1_v0_8_0_testrand_bits(6); } if (len == 65) { - in[0] = rustsecp256k1_v0_7_0_testrand_bits(1) ? 4 : (rustsecp256k1_v0_7_0_testrand_bits(1) ? 6 : 7); + in[0] = rustsecp256k1_v0_8_0_testrand_bits(1) ? 4 : (rustsecp256k1_v0_8_0_testrand_bits(1) ? 6 : 7); } else { - in[0] = rustsecp256k1_v0_7_0_testrand_bits(1) ? 2 : 3; + in[0] = rustsecp256k1_v0_8_0_testrand_bits(1) ? 2 : 3; } - if (rustsecp256k1_v0_7_0_testrand_bits(3) == 0) { - in[0] = rustsecp256k1_v0_7_0_testrand_bits(8); + if (rustsecp256k1_v0_8_0_testrand_bits(3) == 0) { + in[0] = rustsecp256k1_v0_8_0_testrand_bits(8); } if (len > 1) { - rustsecp256k1_v0_7_0_testrand256(&in[1]); + rustsecp256k1_v0_8_0_testrand256(&in[1]); } if (len > 33) { - rustsecp256k1_v0_7_0_testrand256(&in[33]); + rustsecp256k1_v0_8_0_testrand256(&in[33]); } - if (rustsecp256k1_v0_7_0_eckey_pubkey_parse(&elem, in, len)) { + if (rustsecp256k1_v0_8_0_eckey_pubkey_parse(&elem, in, len)) { unsigned char out[65]; unsigned char firstb; int res; size_t size = len; firstb = in[0]; /* If the pubkey can be parsed, it should round-trip... */ - CHECK(rustsecp256k1_v0_7_0_eckey_pubkey_serialize(&elem, out, &size, len == 33)); + CHECK(rustsecp256k1_v0_8_0_eckey_pubkey_serialize(&elem, out, &size, len == 33)); CHECK(size == len); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&in[1], &out[1], len-1) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&in[1], &out[1], len-1) == 0); /* ... except for the type of hybrid inputs. */ if ((in[0] != 6) && (in[0] != 7)) { CHECK(in[0] == out[0]); } size = 65; - CHECK(rustsecp256k1_v0_7_0_eckey_pubkey_serialize(&elem, in, &size, 0)); + CHECK(rustsecp256k1_v0_8_0_eckey_pubkey_serialize(&elem, in, &size, 0)); CHECK(size == 65); - CHECK(rustsecp256k1_v0_7_0_eckey_pubkey_parse(&elem2, in, size)); + CHECK(rustsecp256k1_v0_8_0_eckey_pubkey_parse(&elem2, in, size)); ge_equals_ge(&elem,&elem2); /* Check that the X9.62 hybrid type is checked. */ - in[0] = rustsecp256k1_v0_7_0_testrand_bits(1) ? 6 : 7; - res = rustsecp256k1_v0_7_0_eckey_pubkey_parse(&elem2, in, size); + in[0] = rustsecp256k1_v0_8_0_testrand_bits(1) ? 6 : 7; + res = rustsecp256k1_v0_8_0_eckey_pubkey_parse(&elem2, in, size); if (firstb == 2 || firstb == 3) { if (in[0] == firstb + 4) { CHECK(res); @@ -5878,8 +6414,8 @@ void test_random_pubkeys(void) { } if (res) { ge_equals_ge(&elem,&elem2); - CHECK(rustsecp256k1_v0_7_0_eckey_pubkey_serialize(&elem, out, &size, 0)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&in[1], &out[1], 64) == 0); + CHECK(rustsecp256k1_v0_8_0_eckey_pubkey_serialize(&elem, out, &size, 0)); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&in[1], &out[1], 64) == 0); } } } @@ -5895,42 +6431,42 @@ void run_pubkey_comparison(void) { 0xde, 0x36, 0x0e, 0x87, 0x59, 0x8f, 0x3c, 0x01, 0x36, 0x2a, 0x2a, 0xb8, 0xc6, 0xf4, 0x5e, 0x4d, 0xb2, 0xc2, 0xd5, 0x03, 0xa7, 0xf9, 0xf1, 0x4f, 0xa8, 0xfa, 0x95, 0xa8, 0xe9, 0x69, 0x76, 0x1c }; - rustsecp256k1_v0_7_0_pubkey pk1; - rustsecp256k1_v0_7_0_pubkey pk2; + rustsecp256k1_v0_8_0_pubkey pk1; + rustsecp256k1_v0_8_0_pubkey pk2; int32_t ecount = 0; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, &pk1, pk1_ser, sizeof(pk1_ser)) == 1); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, &pk2, pk2_ser, sizeof(pk2_ser)) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, &pk1, pk1_ser, sizeof(pk1_ser)) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, &pk2, pk2_ser, sizeof(pk2_ser)) == 1); - rustsecp256k1_v0_7_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_cmp(ctx, NULL, &pk2) < 0); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_cmp(ctx, NULL, &pk2) < 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_cmp(ctx, &pk1, NULL) > 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_cmp(ctx, &pk1, NULL) > 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_cmp(ctx, &pk1, &pk2) < 0); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_cmp(ctx, &pk2, &pk1) > 0); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_cmp(ctx, &pk1, &pk1) == 0); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_cmp(ctx, &pk2, &pk2) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_cmp(ctx, &pk1, &pk2) < 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_cmp(ctx, &pk2, &pk1) > 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_cmp(ctx, &pk1, &pk1) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_cmp(ctx, &pk2, &pk2) == 0); CHECK(ecount == 2); { - rustsecp256k1_v0_7_0_pubkey pk_tmp; + rustsecp256k1_v0_8_0_pubkey pk_tmp; memset(&pk_tmp, 0, sizeof(pk_tmp)); /* illegal pubkey */ - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_cmp(ctx, &pk_tmp, &pk2) < 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_cmp(ctx, &pk_tmp, &pk2) < 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_cmp(ctx, &pk_tmp, &pk_tmp) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_cmp(ctx, &pk_tmp, &pk_tmp) == 0); CHECK(ecount == 5); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_cmp(ctx, &pk2, &pk_tmp) > 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_cmp(ctx, &pk2, &pk_tmp) > 0); CHECK(ecount == 6); } - rustsecp256k1_v0_7_0_context_set_illegal_callback(ctx, NULL, NULL); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx, NULL, NULL); /* Make pk2 the same as pk1 but with 3 rather than 2. Note that in * an uncompressed encoding, these would have the opposite ordering */ pk1_ser[0] = 3; - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_parse(ctx, &pk2, pk1_ser, sizeof(pk1_ser)) == 1); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_cmp(ctx, &pk1, &pk2) < 0); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_cmp(ctx, &pk2, &pk1) > 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_parse(ctx, &pk2, pk1_ser, sizeof(pk1_ser)) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_cmp(ctx, &pk1, &pk2) < 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_cmp(ctx, &pk2, &pk1) > 0); } void run_random_pubkeys(void) { @@ -5952,36 +6488,36 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_ int ret = 0; - rustsecp256k1_v0_7_0_ecdsa_signature sig_der; + rustsecp256k1_v0_8_0_ecdsa_signature sig_der; unsigned char roundtrip_der[2048]; unsigned char compact_der[64]; size_t len_der = 2048; int parsed_der = 0, valid_der = 0, roundtrips_der = 0; - rustsecp256k1_v0_7_0_ecdsa_signature sig_der_lax; + rustsecp256k1_v0_8_0_ecdsa_signature sig_der_lax; unsigned char roundtrip_der_lax[2048]; unsigned char compact_der_lax[64]; size_t len_der_lax = 2048; int parsed_der_lax = 0, valid_der_lax = 0, roundtrips_der_lax = 0; - parsed_der = rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig_der, sig, siglen); + parsed_der = rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig_der, sig, siglen); if (parsed_der) { - ret |= (!rustsecp256k1_v0_7_0_ecdsa_signature_serialize_compact(ctx, compact_der, &sig_der)) << 0; - valid_der = (rustsecp256k1_v0_7_0_memcmp_var(compact_der, zeroes, 32) != 0) && (rustsecp256k1_v0_7_0_memcmp_var(compact_der + 32, zeroes, 32) != 0); + ret |= (!rustsecp256k1_v0_8_0_ecdsa_signature_serialize_compact(ctx, compact_der, &sig_der)) << 0; + valid_der = (rustsecp256k1_v0_8_0_memcmp_var(compact_der, zeroes, 32) != 0) && (rustsecp256k1_v0_8_0_memcmp_var(compact_der + 32, zeroes, 32) != 0); } if (valid_der) { - ret |= (!rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der(ctx, roundtrip_der, &len_der, &sig_der)) << 1; - roundtrips_der = (len_der == siglen) && rustsecp256k1_v0_7_0_memcmp_var(roundtrip_der, sig, siglen) == 0; + ret |= (!rustsecp256k1_v0_8_0_ecdsa_signature_serialize_der(ctx, roundtrip_der, &len_der, &sig_der)) << 1; + roundtrips_der = (len_der == siglen) && rustsecp256k1_v0_8_0_memcmp_var(roundtrip_der, sig, siglen) == 0; } - parsed_der_lax = rustsecp256k1_v0_7_0_ecdsa_signature_parse_der_lax(ctx, &sig_der_lax, sig, siglen); + parsed_der_lax = rustsecp256k1_v0_8_0_ecdsa_signature_parse_der_lax(ctx, &sig_der_lax, sig, siglen); if (parsed_der_lax) { - ret |= (!rustsecp256k1_v0_7_0_ecdsa_signature_serialize_compact(ctx, compact_der_lax, &sig_der_lax)) << 10; - valid_der_lax = (rustsecp256k1_v0_7_0_memcmp_var(compact_der_lax, zeroes, 32) != 0) && (rustsecp256k1_v0_7_0_memcmp_var(compact_der_lax + 32, zeroes, 32) != 0); + ret |= (!rustsecp256k1_v0_8_0_ecdsa_signature_serialize_compact(ctx, compact_der_lax, &sig_der_lax)) << 10; + valid_der_lax = (rustsecp256k1_v0_8_0_memcmp_var(compact_der_lax, zeroes, 32) != 0) && (rustsecp256k1_v0_8_0_memcmp_var(compact_der_lax + 32, zeroes, 32) != 0); } if (valid_der_lax) { - ret |= (!rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der(ctx, roundtrip_der_lax, &len_der_lax, &sig_der_lax)) << 11; - roundtrips_der_lax = (len_der_lax == siglen) && rustsecp256k1_v0_7_0_memcmp_var(roundtrip_der_lax, sig, siglen) == 0; + ret |= (!rustsecp256k1_v0_8_0_ecdsa_signature_serialize_der(ctx, roundtrip_der_lax, &len_der_lax, &sig_der_lax)) << 11; + roundtrips_der_lax = (len_der_lax == siglen) && rustsecp256k1_v0_8_0_memcmp_var(roundtrip_der_lax, sig, siglen) == 0; } if (certainly_der) { @@ -5997,7 +6533,7 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_ if (valid_der) { ret |= (!roundtrips_der_lax) << 12; ret |= (len_der != len_der_lax) << 13; - ret |= ((len_der != len_der_lax) || (rustsecp256k1_v0_7_0_memcmp_var(roundtrip_der_lax, roundtrip_der, len_der) != 0)) << 14; + ret |= ((len_der != len_der_lax) || (rustsecp256k1_v0_8_0_memcmp_var(roundtrip_der_lax, roundtrip_der, len_der) != 0)) << 14; } ret |= (roundtrips_der != roundtrips_der_lax) << 15; if (parsed_der) { @@ -6021,27 +6557,27 @@ static void assign_big_endian(unsigned char *ptr, size_t ptrlen, uint32_t val) { static void damage_array(unsigned char *sig, size_t *len) { int pos; - int action = rustsecp256k1_v0_7_0_testrand_bits(3); + int action = rustsecp256k1_v0_8_0_testrand_bits(3); if (action < 1 && *len > 3) { /* Delete a byte. */ - pos = rustsecp256k1_v0_7_0_testrand_int(*len); + pos = rustsecp256k1_v0_8_0_testrand_int(*len); memmove(sig + pos, sig + pos + 1, *len - pos - 1); (*len)--; return; } else if (action < 2 && *len < 2048) { /* Insert a byte. */ - pos = rustsecp256k1_v0_7_0_testrand_int(1 + *len); + pos = rustsecp256k1_v0_8_0_testrand_int(1 + *len); memmove(sig + pos + 1, sig + pos, *len - pos); - sig[pos] = rustsecp256k1_v0_7_0_testrand_bits(8); + sig[pos] = rustsecp256k1_v0_8_0_testrand_bits(8); (*len)++; return; } else if (action < 4) { /* Modify a byte. */ - sig[rustsecp256k1_v0_7_0_testrand_int(*len)] += 1 + rustsecp256k1_v0_7_0_testrand_int(255); + sig[rustsecp256k1_v0_8_0_testrand_int(*len)] += 1 + rustsecp256k1_v0_8_0_testrand_int(255); return; } else { /* action < 8 */ /* Modify a bit. */ - sig[rustsecp256k1_v0_7_0_testrand_int(*len)] ^= 1 << rustsecp256k1_v0_7_0_testrand_bits(3); + sig[rustsecp256k1_v0_8_0_testrand_int(*len)] ^= 1 << rustsecp256k1_v0_8_0_testrand_bits(3); return; } } @@ -6054,23 +6590,23 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly int n; *len = 0; - der = rustsecp256k1_v0_7_0_testrand_bits(2) == 0; + der = rustsecp256k1_v0_8_0_testrand_bits(2) == 0; *certainly_der = der; *certainly_not_der = 0; - indet = der ? 0 : rustsecp256k1_v0_7_0_testrand_int(10) == 0; + indet = der ? 0 : rustsecp256k1_v0_8_0_testrand_int(10) == 0; for (n = 0; n < 2; n++) { /* We generate two classes of numbers: nlow==1 "low" ones (up to 32 bytes), nlow==0 "high" ones (32 bytes with 129 top bits set, or larger than 32 bytes) */ - nlow[n] = der ? 1 : (rustsecp256k1_v0_7_0_testrand_bits(3) != 0); + nlow[n] = der ? 1 : (rustsecp256k1_v0_8_0_testrand_bits(3) != 0); /* The length of the number in bytes (the first byte of which will always be nonzero) */ - nlen[n] = nlow[n] ? rustsecp256k1_v0_7_0_testrand_int(33) : 32 + rustsecp256k1_v0_7_0_testrand_int(200) * rustsecp256k1_v0_7_0_testrand_bits(3) / 8; + nlen[n] = nlow[n] ? rustsecp256k1_v0_8_0_testrand_int(33) : 32 + rustsecp256k1_v0_8_0_testrand_int(200) * rustsecp256k1_v0_8_0_testrand_bits(3) / 8; CHECK(nlen[n] <= 232); /* The top bit of the number. */ - nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : rustsecp256k1_v0_7_0_testrand_bits(1)); + nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : rustsecp256k1_v0_8_0_testrand_bits(1)); /* The top byte of the number (after the potential hardcoded 16 0xFF characters for "high" 32 bytes numbers) */ - nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + rustsecp256k1_v0_7_0_testrand_bits(7) : 1 + rustsecp256k1_v0_7_0_testrand_int(127)); + nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + rustsecp256k1_v0_8_0_testrand_bits(7) : 1 + rustsecp256k1_v0_8_0_testrand_int(127)); /* The number of zero bytes in front of the number (which is 0 or 1 in case of DER, otherwise we extend up to 300 bytes) */ - nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? rustsecp256k1_v0_7_0_testrand_int(3) : rustsecp256k1_v0_7_0_testrand_int(300 - nlen[n]) * rustsecp256k1_v0_7_0_testrand_bits(3) / 8); + nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? rustsecp256k1_v0_8_0_testrand_int(3) : rustsecp256k1_v0_8_0_testrand_int(300 - nlen[n]) * rustsecp256k1_v0_8_0_testrand_bits(3) / 8); if (nzlen[n] > ((nlen[n] == 0 || nhbit[n]) ? 1 : 0)) { *certainly_not_der = 1; } @@ -6079,7 +6615,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly nlenlen[n] = nlen[n] + nzlen[n] < 128 ? 0 : (nlen[n] + nzlen[n] < 256 ? 1 : 2); if (!der) { /* nlenlen[n] max 127 bytes */ - int add = rustsecp256k1_v0_7_0_testrand_int(127 - nlenlen[n]) * rustsecp256k1_v0_7_0_testrand_bits(4) * rustsecp256k1_v0_7_0_testrand_bits(4) / 256; + int add = rustsecp256k1_v0_8_0_testrand_int(127 - nlenlen[n]) * rustsecp256k1_v0_8_0_testrand_bits(4) * rustsecp256k1_v0_8_0_testrand_bits(4) / 256; nlenlen[n] += add; if (add != 0) { *certainly_not_der = 1; @@ -6093,7 +6629,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly CHECK(tlen <= 856); /* The length of the garbage inside the tuple. */ - elen = (der || indet) ? 0 : rustsecp256k1_v0_7_0_testrand_int(980 - tlen) * rustsecp256k1_v0_7_0_testrand_bits(3) / 8; + elen = (der || indet) ? 0 : rustsecp256k1_v0_8_0_testrand_int(980 - tlen) * rustsecp256k1_v0_8_0_testrand_bits(3) / 8; if (elen != 0) { *certainly_not_der = 1; } @@ -6101,7 +6637,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly CHECK(tlen <= 980); /* The length of the garbage after the end of the tuple. */ - glen = der ? 0 : rustsecp256k1_v0_7_0_testrand_int(990 - tlen) * rustsecp256k1_v0_7_0_testrand_bits(3) / 8; + glen = der ? 0 : rustsecp256k1_v0_8_0_testrand_int(990 - tlen) * rustsecp256k1_v0_8_0_testrand_bits(3) / 8; if (glen != 0) { *certainly_not_der = 1; } @@ -6116,7 +6652,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly } else { int tlenlen = tlen < 128 ? 0 : (tlen < 256 ? 1 : 2); if (!der) { - int add = rustsecp256k1_v0_7_0_testrand_int(127 - tlenlen) * rustsecp256k1_v0_7_0_testrand_bits(4) * rustsecp256k1_v0_7_0_testrand_bits(4) / 256; + int add = rustsecp256k1_v0_8_0_testrand_int(127 - tlenlen) * rustsecp256k1_v0_8_0_testrand_bits(4) * rustsecp256k1_v0_8_0_testrand_bits(4) / 256; tlenlen += add; if (add != 0) { *certainly_not_der = 1; @@ -6167,13 +6703,13 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly nlen[n]--; } /* Generate remaining random bytes of number */ - rustsecp256k1_v0_7_0_testrand_bytes_test(sig + *len, nlen[n]); + rustsecp256k1_v0_8_0_testrand_bytes_test(sig + *len, nlen[n]); *len += nlen[n]; nlen[n] = 0; } /* Generate random garbage inside tuple. */ - rustsecp256k1_v0_7_0_testrand_bytes_test(sig + *len, elen); + rustsecp256k1_v0_8_0_testrand_bytes_test(sig + *len, elen); *len += elen; /* Generate end-of-contents bytes. */ @@ -6185,7 +6721,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly CHECK(tlen + glen <= 1121); /* Generate random garbage outside tuple. */ - rustsecp256k1_v0_7_0_testrand_bytes_test(sig + *len, glen); + rustsecp256k1_v0_8_0_testrand_bytes_test(sig + *len, glen); *len += glen; tlen += glen; CHECK(tlen <= 1121); @@ -6226,22 +6762,22 @@ void run_ecdsa_der_parse(void) { /* Tests several edge cases. */ void test_ecdsa_edge_cases(void) { int t; - rustsecp256k1_v0_7_0_ecdsa_signature sig; + rustsecp256k1_v0_8_0_ecdsa_signature sig; /* Test the case where ECDSA recomputes a point that is infinity. */ { - rustsecp256k1_v0_7_0_gej keyj; - rustsecp256k1_v0_7_0_ge key; - rustsecp256k1_v0_7_0_scalar msg; - rustsecp256k1_v0_7_0_scalar sr, ss; - rustsecp256k1_v0_7_0_scalar_set_int(&ss, 1); - rustsecp256k1_v0_7_0_scalar_negate(&ss, &ss); - rustsecp256k1_v0_7_0_scalar_inverse(&ss, &ss); - rustsecp256k1_v0_7_0_scalar_set_int(&sr, 1); - rustsecp256k1_v0_7_0_ecmult_gen(&ctx->ecmult_gen_ctx, &keyj, &sr); - rustsecp256k1_v0_7_0_ge_set_gej(&key, &keyj); + rustsecp256k1_v0_8_0_gej keyj; + rustsecp256k1_v0_8_0_ge key; + rustsecp256k1_v0_8_0_scalar msg; + rustsecp256k1_v0_8_0_scalar sr, ss; + rustsecp256k1_v0_8_0_scalar_set_int(&ss, 1); + rustsecp256k1_v0_8_0_scalar_negate(&ss, &ss); + rustsecp256k1_v0_8_0_scalar_inverse(&ss, &ss); + rustsecp256k1_v0_8_0_scalar_set_int(&sr, 1); + rustsecp256k1_v0_8_0_ecmult_gen(&ctx->ecmult_gen_ctx, &keyj, &sr); + rustsecp256k1_v0_8_0_ge_set_gej(&key, &keyj); msg = ss; - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); } /* Verify signature with r of zero fails. */ @@ -6253,14 +6789,14 @@ void test_ecdsa_edge_cases(void) { 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41 }; - rustsecp256k1_v0_7_0_ge key; - rustsecp256k1_v0_7_0_scalar msg; - rustsecp256k1_v0_7_0_scalar sr, ss; - rustsecp256k1_v0_7_0_scalar_set_int(&ss, 1); - rustsecp256k1_v0_7_0_scalar_set_int(&msg, 0); - rustsecp256k1_v0_7_0_scalar_set_int(&sr, 0); - CHECK(rustsecp256k1_v0_7_0_eckey_pubkey_parse(&key, pubkey_mods_zero, 33)); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify( &sr, &ss, &key, &msg) == 0); + rustsecp256k1_v0_8_0_ge key; + rustsecp256k1_v0_8_0_scalar msg; + rustsecp256k1_v0_8_0_scalar sr, ss; + rustsecp256k1_v0_8_0_scalar_set_int(&ss, 1); + rustsecp256k1_v0_8_0_scalar_set_int(&msg, 0); + rustsecp256k1_v0_8_0_scalar_set_int(&sr, 0); + CHECK(rustsecp256k1_v0_8_0_eckey_pubkey_parse(&key, pubkey_mods_zero, 33)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify( &sr, &ss, &key, &msg) == 0); } /* Verify signature with s of zero fails. */ @@ -6272,14 +6808,14 @@ void test_ecdsa_edge_cases(void) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }; - rustsecp256k1_v0_7_0_ge key; - rustsecp256k1_v0_7_0_scalar msg; - rustsecp256k1_v0_7_0_scalar sr, ss; - rustsecp256k1_v0_7_0_scalar_set_int(&ss, 0); - rustsecp256k1_v0_7_0_scalar_set_int(&msg, 0); - rustsecp256k1_v0_7_0_scalar_set_int(&sr, 1); - CHECK(rustsecp256k1_v0_7_0_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); + rustsecp256k1_v0_8_0_ge key; + rustsecp256k1_v0_8_0_scalar msg; + rustsecp256k1_v0_8_0_scalar sr, ss; + rustsecp256k1_v0_8_0_scalar_set_int(&ss, 0); + rustsecp256k1_v0_8_0_scalar_set_int(&msg, 0); + rustsecp256k1_v0_8_0_scalar_set_int(&sr, 1); + CHECK(rustsecp256k1_v0_8_0_eckey_pubkey_parse(&key, pubkey, 33)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); } /* Verify signature with message 0 passes. */ @@ -6298,23 +6834,23 @@ void test_ecdsa_edge_cases(void) { 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x43 }; - rustsecp256k1_v0_7_0_ge key; - rustsecp256k1_v0_7_0_ge key2; - rustsecp256k1_v0_7_0_scalar msg; - rustsecp256k1_v0_7_0_scalar sr, ss; - rustsecp256k1_v0_7_0_scalar_set_int(&ss, 2); - rustsecp256k1_v0_7_0_scalar_set_int(&msg, 0); - rustsecp256k1_v0_7_0_scalar_set_int(&sr, 2); - CHECK(rustsecp256k1_v0_7_0_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(rustsecp256k1_v0_7_0_eckey_pubkey_parse(&key2, pubkey2, 33)); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 1); - rustsecp256k1_v0_7_0_scalar_negate(&ss, &ss); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 1); - rustsecp256k1_v0_7_0_scalar_set_int(&ss, 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 0); + rustsecp256k1_v0_8_0_ge key; + rustsecp256k1_v0_8_0_ge key2; + rustsecp256k1_v0_8_0_scalar msg; + rustsecp256k1_v0_8_0_scalar sr, ss; + rustsecp256k1_v0_8_0_scalar_set_int(&ss, 2); + rustsecp256k1_v0_8_0_scalar_set_int(&msg, 0); + rustsecp256k1_v0_8_0_scalar_set_int(&sr, 2); + CHECK(rustsecp256k1_v0_8_0_eckey_pubkey_parse(&key, pubkey, 33)); + CHECK(rustsecp256k1_v0_8_0_eckey_pubkey_parse(&key2, pubkey2, 33)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 1); + rustsecp256k1_v0_8_0_scalar_negate(&ss, &ss); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 1); + rustsecp256k1_v0_8_0_scalar_set_int(&ss, 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 0); } /* Verify signature with message 1 passes. */ @@ -6339,24 +6875,24 @@ void test_ecdsa_edge_cases(void) { 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4, 0x40, 0x2d, 0xa1, 0x72, 0x2f, 0xc9, 0xba, 0xeb }; - rustsecp256k1_v0_7_0_ge key; - rustsecp256k1_v0_7_0_ge key2; - rustsecp256k1_v0_7_0_scalar msg; - rustsecp256k1_v0_7_0_scalar sr, ss; - rustsecp256k1_v0_7_0_scalar_set_int(&ss, 1); - rustsecp256k1_v0_7_0_scalar_set_int(&msg, 1); - rustsecp256k1_v0_7_0_scalar_set_b32(&sr, csr, NULL); - CHECK(rustsecp256k1_v0_7_0_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(rustsecp256k1_v0_7_0_eckey_pubkey_parse(&key2, pubkey2, 33)); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 1); - rustsecp256k1_v0_7_0_scalar_negate(&ss, &ss); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 1); - rustsecp256k1_v0_7_0_scalar_set_int(&ss, 2); - rustsecp256k1_v0_7_0_scalar_inverse_var(&ss, &ss); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 0); + rustsecp256k1_v0_8_0_ge key; + rustsecp256k1_v0_8_0_ge key2; + rustsecp256k1_v0_8_0_scalar msg; + rustsecp256k1_v0_8_0_scalar sr, ss; + rustsecp256k1_v0_8_0_scalar_set_int(&ss, 1); + rustsecp256k1_v0_8_0_scalar_set_int(&msg, 1); + rustsecp256k1_v0_8_0_scalar_set_b32(&sr, csr, NULL); + CHECK(rustsecp256k1_v0_8_0_eckey_pubkey_parse(&key, pubkey, 33)); + CHECK(rustsecp256k1_v0_8_0_eckey_pubkey_parse(&key2, pubkey2, 33)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 1); + rustsecp256k1_v0_8_0_scalar_negate(&ss, &ss); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 1); + rustsecp256k1_v0_8_0_scalar_set_int(&ss, 2); + rustsecp256k1_v0_8_0_scalar_inverse_var(&ss, &ss); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sr, &ss, &key2, &msg) == 0); } /* Verify signature with message -1 passes. */ @@ -6374,25 +6910,25 @@ void test_ecdsa_edge_cases(void) { 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4, 0x40, 0x2d, 0xa1, 0x72, 0x2f, 0xc9, 0xba, 0xee }; - rustsecp256k1_v0_7_0_ge key; - rustsecp256k1_v0_7_0_scalar msg; - rustsecp256k1_v0_7_0_scalar sr, ss; - rustsecp256k1_v0_7_0_scalar_set_int(&ss, 1); - rustsecp256k1_v0_7_0_scalar_set_int(&msg, 1); - rustsecp256k1_v0_7_0_scalar_negate(&msg, &msg); - rustsecp256k1_v0_7_0_scalar_set_b32(&sr, csr, NULL); - CHECK(rustsecp256k1_v0_7_0_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); - rustsecp256k1_v0_7_0_scalar_negate(&ss, &ss); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); - rustsecp256k1_v0_7_0_scalar_set_int(&ss, 3); - rustsecp256k1_v0_7_0_scalar_inverse_var(&ss, &ss); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); + rustsecp256k1_v0_8_0_ge key; + rustsecp256k1_v0_8_0_scalar msg; + rustsecp256k1_v0_8_0_scalar sr, ss; + rustsecp256k1_v0_8_0_scalar_set_int(&ss, 1); + rustsecp256k1_v0_8_0_scalar_set_int(&msg, 1); + rustsecp256k1_v0_8_0_scalar_negate(&msg, &msg); + rustsecp256k1_v0_8_0_scalar_set_b32(&sr, csr, NULL); + CHECK(rustsecp256k1_v0_8_0_eckey_pubkey_parse(&key, pubkey, 33)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); + rustsecp256k1_v0_8_0_scalar_negate(&ss, &ss); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 1); + rustsecp256k1_v0_8_0_scalar_set_int(&ss, 3); + rustsecp256k1_v0_8_0_scalar_inverse_var(&ss, &ss); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sig_verify(&sr, &ss, &key, &msg) == 0); } /* Signature where s would be zero. */ { - rustsecp256k1_v0_7_0_pubkey pubkey; + rustsecp256k1_v0_8_0_pubkey pubkey; size_t siglen; int32_t ecount; unsigned char signature[72]; @@ -6421,71 +6957,71 @@ void test_ecdsa_edge_cases(void) { 0x65, 0xdf, 0xdd, 0x31, 0xb9, 0x3e, 0x29, 0xa9, }; ecount = 0; - rustsecp256k1_v0_7_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 0); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 0); msg[31] = 0xaa; - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 1); CHECK(ecount == 0); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, NULL, msg, key, precomputed_nonce_function, nonce2) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, NULL, msg, key, precomputed_nonce_function, nonce2) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &sig, NULL, key, precomputed_nonce_function, nonce2) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &sig, NULL, key, precomputed_nonce_function, nonce2) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &sig, msg, NULL, precomputed_nonce_function, nonce2) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &sig, msg, NULL, precomputed_nonce_function, nonce2) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 1); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey, key) == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, NULL, msg, &pubkey) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey, key) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, NULL, msg, &pubkey) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &sig, NULL, &pubkey) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &sig, NULL, &pubkey) == 0); CHECK(ecount == 5); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &sig, msg, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &sig, msg, NULL) == 0); CHECK(ecount == 6); - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &sig, msg, &pubkey) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &sig, msg, &pubkey) == 1); CHECK(ecount == 6); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey, NULL) == 0); CHECK(ecount == 7); /* That pubkeyload fails via an ARGCHECK is a little odd but makes sense because pubkeys are an opaque data type. */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &sig, msg, &pubkey) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &sig, msg, &pubkey) == 0); CHECK(ecount == 8); siglen = 72; - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der(ctx, NULL, &siglen, &sig) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_serialize_der(ctx, NULL, &siglen, &sig) == 0); CHECK(ecount == 9); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der(ctx, signature, NULL, &sig) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_serialize_der(ctx, signature, NULL, &sig) == 0); CHECK(ecount == 10); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der(ctx, signature, &siglen, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_serialize_der(ctx, signature, &siglen, NULL) == 0); CHECK(ecount == 11); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 1); CHECK(ecount == 11); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, NULL, signature, siglen) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, NULL, signature, siglen) == 0); CHECK(ecount == 12); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, NULL, siglen) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, NULL, siglen) == 0); CHECK(ecount == 13); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_der(ctx, &sig, signature, siglen) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_der(ctx, &sig, signature, siglen) == 1); CHECK(ecount == 13); siglen = 10; /* Too little room for a signature does not fail via ARGCHECK. */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 0); CHECK(ecount == 13); ecount = 0; - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_normalize(ctx, NULL, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_normalize(ctx, NULL, NULL) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_serialize_compact(ctx, NULL, &sig) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_serialize_compact(ctx, NULL, &sig) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_serialize_compact(ctx, signature, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_serialize_compact(ctx, signature, NULL) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_serialize_compact(ctx, signature, &sig) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_serialize_compact(ctx, signature, &sig) == 1); CHECK(ecount == 3); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact(ctx, NULL, signature) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_compact(ctx, NULL, signature) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact(ctx, &sig, NULL) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_compact(ctx, &sig, NULL) == 0); CHECK(ecount == 5); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact(ctx, &sig, signature) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_compact(ctx, &sig, signature) == 1); CHECK(ecount == 5); memset(signature, 255, 64); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact(ctx, &sig, signature) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_parse_compact(ctx, &sig, signature) == 0); CHECK(ecount == 5); - rustsecp256k1_v0_7_0_context_set_illegal_callback(ctx, NULL, NULL); + rustsecp256k1_v0_8_0_context_set_illegal_callback(ctx, NULL, NULL); } /* Nonce function corner cases. */ @@ -6494,43 +7030,43 @@ void test_ecdsa_edge_cases(void) { int i; unsigned char key[32]; unsigned char msg[32]; - rustsecp256k1_v0_7_0_ecdsa_signature sig2; - rustsecp256k1_v0_7_0_scalar sr[512], ss; + rustsecp256k1_v0_8_0_ecdsa_signature sig2; + rustsecp256k1_v0_8_0_scalar sr[512], ss; const unsigned char *extra; extra = t == 0 ? NULL : zero; memset(msg, 0, 32); msg[31] = 1; /* High key results in signature failure. */ memset(key, 0xFF, 32); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0); CHECK(is_empty_signature(&sig)); /* Zero key results in signature failure. */ memset(key, 0, 32); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0); CHECK(is_empty_signature(&sig)); /* Nonce function failure results in signature failure. */ key[31] = 1; - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_fail, extra) == 0); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_fail, extra) == 0); CHECK(is_empty_signature(&sig)); /* The retry loop successfully makes its way to the first good value. */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_retry, extra) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_retry, extra) == 1); CHECK(!is_empty_signature(&sig)); - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &sig2, msg, key, nonce_function_rfc6979, extra) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &sig2, msg, key, nonce_function_rfc6979, extra) == 1); CHECK(!is_empty_signature(&sig2)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&sig, &sig2, sizeof(sig)) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&sig, &sig2, sizeof(sig)) == 0); /* The default nonce function is deterministic. */ - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); CHECK(!is_empty_signature(&sig2)); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&sig, &sig2, sizeof(sig)) == 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&sig, &sig2, sizeof(sig)) == 0); /* The default nonce function changes output with different messages. */ for(i = 0; i < 256; i++) { int j; msg[0] = i; - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); CHECK(!is_empty_signature(&sig2)); - rustsecp256k1_v0_7_0_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2); + rustsecp256k1_v0_8_0_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2); for (j = 0; j < i; j++) { - CHECK(!rustsecp256k1_v0_7_0_scalar_eq(&sr[i], &sr[j])); + CHECK(!rustsecp256k1_v0_8_0_scalar_eq(&sr[i], &sr[j])); } } msg[0] = 0; @@ -6539,11 +7075,11 @@ void test_ecdsa_edge_cases(void) { for(i = 256; i < 512; i++) { int j; key[0] = i - 256; - CHECK(rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); + CHECK(rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); CHECK(!is_empty_signature(&sig2)); - rustsecp256k1_v0_7_0_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2); + rustsecp256k1_v0_8_0_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2); for (j = 0; j < i; j++) { - CHECK(!rustsecp256k1_v0_7_0_scalar_eq(&sr[i], &sr[j])); + CHECK(!rustsecp256k1_v0_8_0_scalar_eq(&sr[i], &sr[j])); } } key[0] = 0; @@ -6568,12 +7104,12 @@ void test_ecdsa_edge_cases(void) { VG_CHECK(nonce3,32); CHECK(nonce_function_rfc6979(nonce4, zeros, zeros, zeros, (void *)zeros, 0) == 1); VG_CHECK(nonce4,32); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(nonce, nonce2, 32) != 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(nonce, nonce3, 32) != 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(nonce, nonce4, 32) != 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(nonce2, nonce3, 32) != 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(nonce2, nonce4, 32) != 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(nonce3, nonce4, 32) != 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(nonce, nonce2, 32) != 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(nonce, nonce3, 32) != 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(nonce, nonce4, 32) != 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(nonce2, nonce3, 32) != 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(nonce2, nonce4, 32) != 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(nonce3, nonce4, 32) != 0); } @@ -6613,166 +7149,179 @@ void run_ecdsa_edge_cases(void) { # include "modules/schnorrsig/tests_impl.h" #endif -void run_rustsecp256k1_v0_7_0_memczero_test(void) { +void run_rustsecp256k1_v0_8_0_memczero_test(void) { unsigned char buf1[6] = {1, 2, 3, 4, 5, 6}; unsigned char buf2[sizeof(buf1)]; - /* rustsecp256k1_v0_7_0_memczero(..., ..., 0) is a noop. */ + /* rustsecp256k1_v0_8_0_memczero(..., ..., 0) is a noop. */ memcpy(buf2, buf1, sizeof(buf1)); - rustsecp256k1_v0_7_0_memczero(buf1, sizeof(buf1), 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(buf1, buf2, sizeof(buf1)) == 0); + rustsecp256k1_v0_8_0_memczero(buf1, sizeof(buf1), 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(buf1, buf2, sizeof(buf1)) == 0); - /* rustsecp256k1_v0_7_0_memczero(..., ..., 1) zeros the buffer. */ + /* rustsecp256k1_v0_8_0_memczero(..., ..., 1) zeros the buffer. */ memset(buf2, 0, sizeof(buf2)); - rustsecp256k1_v0_7_0_memczero(buf1, sizeof(buf1) , 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(buf1, buf2, sizeof(buf1)) == 0); + rustsecp256k1_v0_8_0_memczero(buf1, sizeof(buf1) , 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(buf1, buf2, sizeof(buf1)) == 0); +} + +void run_rustsecp256k1_v0_8_0_byteorder_tests(void) { + const uint32_t x = 0xFF03AB45; + const unsigned char x_be[4] = {0xFF, 0x03, 0xAB, 0x45}; + unsigned char buf[4]; + uint32_t x_; + + rustsecp256k1_v0_8_0_write_be32(buf, x); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(buf, x_be, sizeof(buf)) == 0); + + x_ = rustsecp256k1_v0_8_0_read_be32(buf); + CHECK(x == x_); } void int_cmov_test(void) { int r = INT_MAX; int a = 0; - rustsecp256k1_v0_7_0_int_cmov(&r, &a, 0); + rustsecp256k1_v0_8_0_int_cmov(&r, &a, 0); CHECK(r == INT_MAX); r = 0; a = INT_MAX; - rustsecp256k1_v0_7_0_int_cmov(&r, &a, 1); + rustsecp256k1_v0_8_0_int_cmov(&r, &a, 1); CHECK(r == INT_MAX); a = 0; - rustsecp256k1_v0_7_0_int_cmov(&r, &a, 1); + rustsecp256k1_v0_8_0_int_cmov(&r, &a, 1); CHECK(r == 0); a = 1; - rustsecp256k1_v0_7_0_int_cmov(&r, &a, 1); + rustsecp256k1_v0_8_0_int_cmov(&r, &a, 1); CHECK(r == 1); r = 1; a = 0; - rustsecp256k1_v0_7_0_int_cmov(&r, &a, 0); + rustsecp256k1_v0_8_0_int_cmov(&r, &a, 0); CHECK(r == 1); } void fe_cmov_test(void) { - static const rustsecp256k1_v0_7_0_fe zero = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0); - static const rustsecp256k1_v0_7_0_fe one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); - static const rustsecp256k1_v0_7_0_fe max = SECP256K1_FE_CONST( + static const rustsecp256k1_v0_8_0_fe zero = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0); + static const rustsecp256k1_v0_8_0_fe one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); + static const rustsecp256k1_v0_8_0_fe max = SECP256K1_FE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); - rustsecp256k1_v0_7_0_fe r = max; - rustsecp256k1_v0_7_0_fe a = zero; + rustsecp256k1_v0_8_0_fe r = max; + rustsecp256k1_v0_8_0_fe a = zero; - rustsecp256k1_v0_7_0_fe_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_fe_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; - rustsecp256k1_v0_7_0_fe_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_fe_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; - rustsecp256k1_v0_7_0_fe_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &zero, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_fe_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; - rustsecp256k1_v0_7_0_fe_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_fe_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; - rustsecp256k1_v0_7_0_fe_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_fe_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &one, sizeof(r)) == 0); } void fe_storage_cmov_test(void) { - static const rustsecp256k1_v0_7_0_fe_storage zero = SECP256K1_FE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 0); - static const rustsecp256k1_v0_7_0_fe_storage one = SECP256K1_FE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 1); - static const rustsecp256k1_v0_7_0_fe_storage max = SECP256K1_FE_STORAGE_CONST( + static const rustsecp256k1_v0_8_0_fe_storage zero = SECP256K1_FE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 0); + static const rustsecp256k1_v0_8_0_fe_storage one = SECP256K1_FE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 1); + static const rustsecp256k1_v0_8_0_fe_storage max = SECP256K1_FE_STORAGE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); - rustsecp256k1_v0_7_0_fe_storage r = max; - rustsecp256k1_v0_7_0_fe_storage a = zero; + rustsecp256k1_v0_8_0_fe_storage r = max; + rustsecp256k1_v0_8_0_fe_storage a = zero; - rustsecp256k1_v0_7_0_fe_storage_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_fe_storage_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; - rustsecp256k1_v0_7_0_fe_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_fe_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; - rustsecp256k1_v0_7_0_fe_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &zero, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_fe_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; - rustsecp256k1_v0_7_0_fe_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_fe_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; - rustsecp256k1_v0_7_0_fe_storage_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_fe_storage_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &one, sizeof(r)) == 0); } void scalar_cmov_test(void) { - static const rustsecp256k1_v0_7_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - static const rustsecp256k1_v0_7_0_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); - static const rustsecp256k1_v0_7_0_scalar max = SECP256K1_SCALAR_CONST( + static const rustsecp256k1_v0_8_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + static const rustsecp256k1_v0_8_0_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); + static const rustsecp256k1_v0_8_0_scalar max = SECP256K1_SCALAR_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); - rustsecp256k1_v0_7_0_scalar r = max; - rustsecp256k1_v0_7_0_scalar a = zero; + rustsecp256k1_v0_8_0_scalar r = max; + rustsecp256k1_v0_8_0_scalar a = zero; - rustsecp256k1_v0_7_0_scalar_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_scalar_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; - rustsecp256k1_v0_7_0_scalar_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_scalar_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; - rustsecp256k1_v0_7_0_scalar_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &zero, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_scalar_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; - rustsecp256k1_v0_7_0_scalar_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_scalar_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; - rustsecp256k1_v0_7_0_scalar_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_scalar_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &one, sizeof(r)) == 0); } void ge_storage_cmov_test(void) { - static const rustsecp256k1_v0_7_0_ge_storage zero = SECP256K1_GE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - static const rustsecp256k1_v0_7_0_ge_storage one = SECP256K1_GE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1); - static const rustsecp256k1_v0_7_0_ge_storage max = SECP256K1_GE_STORAGE_CONST( + static const rustsecp256k1_v0_8_0_ge_storage zero = SECP256K1_GE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + static const rustsecp256k1_v0_8_0_ge_storage one = SECP256K1_GE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1); + static const rustsecp256k1_v0_8_0_ge_storage max = SECP256K1_GE_STORAGE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); - rustsecp256k1_v0_7_0_ge_storage r = max; - rustsecp256k1_v0_7_0_ge_storage a = zero; + rustsecp256k1_v0_8_0_ge_storage r = max; + rustsecp256k1_v0_8_0_ge_storage a = zero; - rustsecp256k1_v0_7_0_ge_storage_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_ge_storage_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; - rustsecp256k1_v0_7_0_ge_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &max, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_ge_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; - rustsecp256k1_v0_7_0_ge_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &zero, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_ge_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; - rustsecp256k1_v0_7_0_ge_storage_cmov(&r, &a, 1); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_ge_storage_cmov(&r, &a, 1); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; - rustsecp256k1_v0_7_0_ge_storage_cmov(&r, &a, 0); - CHECK(rustsecp256k1_v0_7_0_memcmp_var(&r, &one, sizeof(r)) == 0); + rustsecp256k1_v0_8_0_ge_storage_cmov(&r, &a, 0); + CHECK(rustsecp256k1_v0_8_0_memcmp_var(&r, &one, sizeof(r)) == 0); } void run_cmov_tests(void) { @@ -6808,27 +7357,36 @@ int main(int argc, char **argv) { printf("test count = %i\n", count); /* find random seed */ - rustsecp256k1_v0_7_0_testrand_init(argc > 2 ? argv[2] : NULL); + rustsecp256k1_v0_8_0_testrand_init(argc > 2 ? argv[2] : NULL); /* initialize */ + run_selftest_tests(); run_context_tests(0); run_context_tests(1); run_scratch_tests(); - ctx = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - if (rustsecp256k1_v0_7_0_testrand_bits(1)) { + + ctx = rustsecp256k1_v0_8_0_context_create(SECP256K1_CONTEXT_NONE); + /* Randomize the context only with probability 15/16 + to make sure we test without context randomization from time to time. + TODO Reconsider this when recalibrating the tests. */ + if (rustsecp256k1_v0_8_0_testrand_bits(4)) { unsigned char rand32[32]; - rustsecp256k1_v0_7_0_testrand256(rand32); - CHECK(rustsecp256k1_v0_7_0_context_randomize(ctx, rustsecp256k1_v0_7_0_testrand_bits(1) ? rand32 : NULL)); + rustsecp256k1_v0_8_0_testrand256(rand32); + CHECK(rustsecp256k1_v0_8_0_context_randomize(ctx, rand32)); } run_rand_bits(); run_rand_int(); +#ifdef SECP256K1_WIDEMUL_INT128 + run_int128_tests(); +#endif run_ctz_tests(); run_modinv_tests(); run_inverse_tests(); - run_sha256_tests(); + run_sha256_known_output_tests(); + run_sha256_counter_tests(); run_hmac_sha256_tests(); run_rfc6979_hmac_sha256_tests(); run_tagged_sha256_tests(); @@ -6837,6 +7395,7 @@ int main(int argc, char **argv) { run_scalar_tests(); /* field tests */ + run_field_half(); run_field_misc(); run_field_convert(); run_fe_mul(); @@ -6899,14 +7458,15 @@ int main(int argc, char **argv) { #endif /* util tests */ - run_rustsecp256k1_v0_7_0_memczero_test(); + run_rustsecp256k1_v0_8_0_memczero_test(); + run_rustsecp256k1_v0_8_0_byteorder_tests(); run_cmov_tests(); - rustsecp256k1_v0_7_0_testrand_finish(); + rustsecp256k1_v0_8_0_testrand_finish(); /* shutdown */ - rustsecp256k1_v0_7_0_context_destroy(ctx); + rustsecp256k1_v0_8_0_context_destroy(ctx); printf("no problems found\n"); return 0; diff --git a/secp256k1-sys/depend/secp256k1/src/tests_exhaustive.c b/secp256k1-sys/depend/secp256k1/src/tests_exhaustive.c index 7e81649e3..f3ca62286 100644 --- a/secp256k1-sys/depend/secp256k1/src/tests_exhaustive.c +++ b/secp256k1-sys/depend/secp256k1/src/tests_exhaustive.c @@ -28,37 +28,37 @@ static int count = 2; /** stolen from tests.c */ -void ge_equals_ge(const rustsecp256k1_v0_7_0_ge *a, const rustsecp256k1_v0_7_0_ge *b) { +void ge_equals_ge(const rustsecp256k1_v0_8_0_ge *a, const rustsecp256k1_v0_8_0_ge *b) { CHECK(a->infinity == b->infinity); if (a->infinity) { return; } - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&a->x, &b->x)); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&a->y, &b->y)); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&a->x, &b->x)); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&a->y, &b->y)); } -void ge_equals_gej(const rustsecp256k1_v0_7_0_ge *a, const rustsecp256k1_v0_7_0_gej *b) { - rustsecp256k1_v0_7_0_fe z2s; - rustsecp256k1_v0_7_0_fe u1, u2, s1, s2; +void ge_equals_gej(const rustsecp256k1_v0_8_0_ge *a, const rustsecp256k1_v0_8_0_gej *b) { + rustsecp256k1_v0_8_0_fe z2s; + rustsecp256k1_v0_8_0_fe u1, u2, s1, s2; CHECK(a->infinity == b->infinity); if (a->infinity) { return; } /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */ - rustsecp256k1_v0_7_0_fe_sqr(&z2s, &b->z); - rustsecp256k1_v0_7_0_fe_mul(&u1, &a->x, &z2s); - u2 = b->x; rustsecp256k1_v0_7_0_fe_normalize_weak(&u2); - rustsecp256k1_v0_7_0_fe_mul(&s1, &a->y, &z2s); rustsecp256k1_v0_7_0_fe_mul(&s1, &s1, &b->z); - s2 = b->y; rustsecp256k1_v0_7_0_fe_normalize_weak(&s2); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&u1, &u2)); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&s1, &s2)); + rustsecp256k1_v0_8_0_fe_sqr(&z2s, &b->z); + rustsecp256k1_v0_8_0_fe_mul(&u1, &a->x, &z2s); + u2 = b->x; rustsecp256k1_v0_8_0_fe_normalize_weak(&u2); + rustsecp256k1_v0_8_0_fe_mul(&s1, &a->y, &z2s); rustsecp256k1_v0_8_0_fe_mul(&s1, &s1, &b->z); + s2 = b->y; rustsecp256k1_v0_8_0_fe_normalize_weak(&s2); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&u1, &u2)); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&s1, &s2)); } -void random_fe(rustsecp256k1_v0_7_0_fe *x) { +void random_fe(rustsecp256k1_v0_8_0_fe *x) { unsigned char bin[32]; do { - rustsecp256k1_v0_7_0_testrand256(bin); - if (rustsecp256k1_v0_7_0_fe_set_b32(x, bin)) { + rustsecp256k1_v0_8_0_testrand256(bin); + if (rustsecp256k1_v0_8_0_fe_set_b32(x, bin)) { return; } } while(1); @@ -74,10 +74,10 @@ SECP256K1_INLINE static int skip_section(uint64_t* iter) { return ((((uint32_t)*iter ^ (*iter >> 32)) * num_cores) >> 32) != this_core; } -int rustsecp256k1_v0_7_0_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, +int rustsecp256k1_v0_8_0_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int attempt) { - rustsecp256k1_v0_7_0_scalar s; + rustsecp256k1_v0_8_0_scalar s; int *idata = data; (void)msg32; (void)key32; @@ -89,97 +89,97 @@ int rustsecp256k1_v0_7_0_nonce_function_smallint(unsigned char *nonce32, const u if (attempt > 0) { *idata = (*idata + 1) % EXHAUSTIVE_TEST_ORDER; } - rustsecp256k1_v0_7_0_scalar_set_int(&s, *idata); - rustsecp256k1_v0_7_0_scalar_get_b32(nonce32, &s); + rustsecp256k1_v0_8_0_scalar_set_int(&s, *idata); + rustsecp256k1_v0_8_0_scalar_get_b32(nonce32, &s); return 1; } -void test_exhaustive_endomorphism(const rustsecp256k1_v0_7_0_ge *group) { +void test_exhaustive_endomorphism(const rustsecp256k1_v0_8_0_ge *group) { int i; for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_7_0_ge res; - rustsecp256k1_v0_7_0_ge_mul_lambda(&res, &group[i]); + rustsecp256k1_v0_8_0_ge res; + rustsecp256k1_v0_8_0_ge_mul_lambda(&res, &group[i]); ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res); } } -void test_exhaustive_addition(const rustsecp256k1_v0_7_0_ge *group, const rustsecp256k1_v0_7_0_gej *groupj) { +void test_exhaustive_addition(const rustsecp256k1_v0_8_0_ge *group, const rustsecp256k1_v0_8_0_gej *groupj) { int i, j; uint64_t iter = 0; /* Sanity-check (and check infinity functions) */ - CHECK(rustsecp256k1_v0_7_0_ge_is_infinity(&group[0])); - CHECK(rustsecp256k1_v0_7_0_gej_is_infinity(&groupj[0])); + CHECK(rustsecp256k1_v0_8_0_ge_is_infinity(&group[0])); + CHECK(rustsecp256k1_v0_8_0_gej_is_infinity(&groupj[0])); for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { - CHECK(!rustsecp256k1_v0_7_0_ge_is_infinity(&group[i])); - CHECK(!rustsecp256k1_v0_7_0_gej_is_infinity(&groupj[i])); + CHECK(!rustsecp256k1_v0_8_0_ge_is_infinity(&group[i])); + CHECK(!rustsecp256k1_v0_8_0_gej_is_infinity(&groupj[i])); } /* Check all addition formulae */ for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { - rustsecp256k1_v0_7_0_fe fe_inv; + rustsecp256k1_v0_8_0_fe fe_inv; if (skip_section(&iter)) continue; - rustsecp256k1_v0_7_0_fe_inv(&fe_inv, &groupj[j].z); + rustsecp256k1_v0_8_0_fe_inv(&fe_inv, &groupj[j].z); for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_7_0_ge zless_gej; - rustsecp256k1_v0_7_0_gej tmp; + rustsecp256k1_v0_8_0_ge zless_gej; + rustsecp256k1_v0_8_0_gej tmp; /* add_var */ - rustsecp256k1_v0_7_0_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL); + rustsecp256k1_v0_8_0_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL); ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); /* add_ge */ if (j > 0) { - rustsecp256k1_v0_7_0_gej_add_ge(&tmp, &groupj[i], &group[j]); + rustsecp256k1_v0_8_0_gej_add_ge(&tmp, &groupj[i], &group[j]); ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); } /* add_ge_var */ - rustsecp256k1_v0_7_0_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL); + rustsecp256k1_v0_8_0_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL); ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); /* add_zinv_var */ zless_gej.infinity = groupj[j].infinity; zless_gej.x = groupj[j].x; zless_gej.y = groupj[j].y; - rustsecp256k1_v0_7_0_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv); + rustsecp256k1_v0_8_0_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv); ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); } } /* Check doubling */ for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_7_0_gej tmp; - rustsecp256k1_v0_7_0_gej_double(&tmp, &groupj[i]); + rustsecp256k1_v0_8_0_gej tmp; + rustsecp256k1_v0_8_0_gej_double(&tmp, &groupj[i]); ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp); - rustsecp256k1_v0_7_0_gej_double_var(&tmp, &groupj[i], NULL); + rustsecp256k1_v0_8_0_gej_double_var(&tmp, &groupj[i], NULL); ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp); } /* Check negation */ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_7_0_ge tmp; - rustsecp256k1_v0_7_0_gej tmpj; - rustsecp256k1_v0_7_0_ge_neg(&tmp, &group[i]); + rustsecp256k1_v0_8_0_ge tmp; + rustsecp256k1_v0_8_0_gej tmpj; + rustsecp256k1_v0_8_0_ge_neg(&tmp, &group[i]); ge_equals_ge(&group[EXHAUSTIVE_TEST_ORDER - i], &tmp); - rustsecp256k1_v0_7_0_gej_neg(&tmpj, &groupj[i]); + rustsecp256k1_v0_8_0_gej_neg(&tmpj, &groupj[i]); ge_equals_gej(&group[EXHAUSTIVE_TEST_ORDER - i], &tmpj); } } -void test_exhaustive_ecmult(const rustsecp256k1_v0_7_0_ge *group, const rustsecp256k1_v0_7_0_gej *groupj) { +void test_exhaustive_ecmult(const rustsecp256k1_v0_8_0_ge *group, const rustsecp256k1_v0_8_0_gej *groupj) { int i, j, r_log; uint64_t iter = 0; for (r_log = 1; r_log < EXHAUSTIVE_TEST_ORDER; r_log++) { for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { if (skip_section(&iter)) continue; for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_7_0_gej tmp; - rustsecp256k1_v0_7_0_scalar na, ng; - rustsecp256k1_v0_7_0_scalar_set_int(&na, i); - rustsecp256k1_v0_7_0_scalar_set_int(&ng, j); + rustsecp256k1_v0_8_0_gej tmp; + rustsecp256k1_v0_8_0_scalar na, ng; + rustsecp256k1_v0_8_0_scalar_set_int(&na, i); + rustsecp256k1_v0_8_0_scalar_set_int(&ng, j); - rustsecp256k1_v0_7_0_ecmult(&tmp, &groupj[r_log], &na, &ng); + rustsecp256k1_v0_8_0_ecmult(&tmp, &groupj[r_log], &na, &ng); ge_equals_gej(&group[(i * r_log + j) % EXHAUSTIVE_TEST_ORDER], &tmp); if (i > 0) { - rustsecp256k1_v0_7_0_ecmult_const(&tmp, &group[i], &ng, 256); + rustsecp256k1_v0_8_0_ecmult_const(&tmp, &group[i], &ng, 256); ge_equals_gej(&group[(i * j) % EXHAUSTIVE_TEST_ORDER], &tmp); } } @@ -188,111 +188,111 @@ void test_exhaustive_ecmult(const rustsecp256k1_v0_7_0_ge *group, const rustsecp } typedef struct { - rustsecp256k1_v0_7_0_scalar sc[2]; - rustsecp256k1_v0_7_0_ge pt[2]; + rustsecp256k1_v0_8_0_scalar sc[2]; + rustsecp256k1_v0_8_0_ge pt[2]; } ecmult_multi_data; -static int ecmult_multi_callback(rustsecp256k1_v0_7_0_scalar *sc, rustsecp256k1_v0_7_0_ge *pt, size_t idx, void *cbdata) { +static int ecmult_multi_callback(rustsecp256k1_v0_8_0_scalar *sc, rustsecp256k1_v0_8_0_ge *pt, size_t idx, void *cbdata) { ecmult_multi_data *data = (ecmult_multi_data*) cbdata; *sc = data->sc[idx]; *pt = data->pt[idx]; return 1; } -void test_exhaustive_ecmult_multi(const rustsecp256k1_v0_7_0_context *ctx, const rustsecp256k1_v0_7_0_ge *group) { +void test_exhaustive_ecmult_multi(const rustsecp256k1_v0_8_0_context *ctx, const rustsecp256k1_v0_8_0_ge *group) { int i, j, k, x, y; uint64_t iter = 0; - rustsecp256k1_v0_7_0_scratch *scratch = rustsecp256k1_v0_7_0_scratch_create(&ctx->error_callback, 4096); + rustsecp256k1_v0_8_0_scratch *scratch = rustsecp256k1_v0_8_0_scratch_create(&ctx->error_callback, 4096); for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) { for (x = 0; x < EXHAUSTIVE_TEST_ORDER; x++) { if (skip_section(&iter)) continue; for (y = 0; y < EXHAUSTIVE_TEST_ORDER; y++) { - rustsecp256k1_v0_7_0_gej tmp; - rustsecp256k1_v0_7_0_scalar g_sc; + rustsecp256k1_v0_8_0_gej tmp; + rustsecp256k1_v0_8_0_scalar g_sc; ecmult_multi_data data; - rustsecp256k1_v0_7_0_scalar_set_int(&data.sc[0], i); - rustsecp256k1_v0_7_0_scalar_set_int(&data.sc[1], j); - rustsecp256k1_v0_7_0_scalar_set_int(&g_sc, k); + rustsecp256k1_v0_8_0_scalar_set_int(&data.sc[0], i); + rustsecp256k1_v0_8_0_scalar_set_int(&data.sc[1], j); + rustsecp256k1_v0_8_0_scalar_set_int(&g_sc, k); data.pt[0] = group[x]; data.pt[1] = group[y]; - rustsecp256k1_v0_7_0_ecmult_multi_var(&ctx->error_callback, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2); + rustsecp256k1_v0_8_0_ecmult_multi_var(&ctx->error_callback, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2); ge_equals_gej(&group[(i * x + j * y + k) % EXHAUSTIVE_TEST_ORDER], &tmp); } } } } } - rustsecp256k1_v0_7_0_scratch_destroy(&ctx->error_callback, scratch); + rustsecp256k1_v0_8_0_scratch_destroy(&ctx->error_callback, scratch); } -void r_from_k(rustsecp256k1_v0_7_0_scalar *r, const rustsecp256k1_v0_7_0_ge *group, int k, int* overflow) { - rustsecp256k1_v0_7_0_fe x; +void r_from_k(rustsecp256k1_v0_8_0_scalar *r, const rustsecp256k1_v0_8_0_ge *group, int k, int* overflow) { + rustsecp256k1_v0_8_0_fe x; unsigned char x_bin[32]; k %= EXHAUSTIVE_TEST_ORDER; x = group[k].x; - rustsecp256k1_v0_7_0_fe_normalize(&x); - rustsecp256k1_v0_7_0_fe_get_b32(x_bin, &x); - rustsecp256k1_v0_7_0_scalar_set_b32(r, x_bin, overflow); + rustsecp256k1_v0_8_0_fe_normalize(&x); + rustsecp256k1_v0_8_0_fe_get_b32(x_bin, &x); + rustsecp256k1_v0_8_0_scalar_set_b32(r, x_bin, overflow); } -void test_exhaustive_verify(const rustsecp256k1_v0_7_0_context *ctx, const rustsecp256k1_v0_7_0_ge *group) { +void test_exhaustive_verify(const rustsecp256k1_v0_8_0_context *ctx, const rustsecp256k1_v0_8_0_ge *group) { int s, r, msg, key; uint64_t iter = 0; for (s = 1; s < EXHAUSTIVE_TEST_ORDER; s++) { for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) { for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) { for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) { - rustsecp256k1_v0_7_0_ge nonconst_ge; - rustsecp256k1_v0_7_0_ecdsa_signature sig; - rustsecp256k1_v0_7_0_pubkey pk; - rustsecp256k1_v0_7_0_scalar sk_s, msg_s, r_s, s_s; - rustsecp256k1_v0_7_0_scalar s_times_k_s, msg_plus_r_times_sk_s; + rustsecp256k1_v0_8_0_ge nonconst_ge; + rustsecp256k1_v0_8_0_ecdsa_signature sig; + rustsecp256k1_v0_8_0_pubkey pk; + rustsecp256k1_v0_8_0_scalar sk_s, msg_s, r_s, s_s; + rustsecp256k1_v0_8_0_scalar s_times_k_s, msg_plus_r_times_sk_s; int k, should_verify; unsigned char msg32[32]; if (skip_section(&iter)) continue; - rustsecp256k1_v0_7_0_scalar_set_int(&s_s, s); - rustsecp256k1_v0_7_0_scalar_set_int(&r_s, r); - rustsecp256k1_v0_7_0_scalar_set_int(&msg_s, msg); - rustsecp256k1_v0_7_0_scalar_set_int(&sk_s, key); + rustsecp256k1_v0_8_0_scalar_set_int(&s_s, s); + rustsecp256k1_v0_8_0_scalar_set_int(&r_s, r); + rustsecp256k1_v0_8_0_scalar_set_int(&msg_s, msg); + rustsecp256k1_v0_8_0_scalar_set_int(&sk_s, key); /* Verify by hand */ /* Run through every k value that gives us this r and check that *one* works. * Note there could be none, there could be multiple, ECDSA is weird. */ should_verify = 0; for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) { - rustsecp256k1_v0_7_0_scalar check_x_s; + rustsecp256k1_v0_8_0_scalar check_x_s; r_from_k(&check_x_s, group, k, NULL); if (r_s == check_x_s) { - rustsecp256k1_v0_7_0_scalar_set_int(&s_times_k_s, k); - rustsecp256k1_v0_7_0_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); - rustsecp256k1_v0_7_0_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); - rustsecp256k1_v0_7_0_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); - should_verify |= rustsecp256k1_v0_7_0_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); + rustsecp256k1_v0_8_0_scalar_set_int(&s_times_k_s, k); + rustsecp256k1_v0_8_0_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); + rustsecp256k1_v0_8_0_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); + rustsecp256k1_v0_8_0_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); + should_verify |= rustsecp256k1_v0_8_0_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); } } /* nb we have a "high s" rule */ - should_verify &= !rustsecp256k1_v0_7_0_scalar_is_high(&s_s); + should_verify &= !rustsecp256k1_v0_8_0_scalar_is_high(&s_s); /* Verify by calling verify */ - rustsecp256k1_v0_7_0_ecdsa_signature_save(&sig, &r_s, &s_s); + rustsecp256k1_v0_8_0_ecdsa_signature_save(&sig, &r_s, &s_s); memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); - rustsecp256k1_v0_7_0_pubkey_save(&pk, &nonconst_ge); - rustsecp256k1_v0_7_0_scalar_get_b32(msg32, &msg_s); + rustsecp256k1_v0_8_0_pubkey_save(&pk, &nonconst_ge); + rustsecp256k1_v0_8_0_scalar_get_b32(msg32, &msg_s); CHECK(should_verify == - rustsecp256k1_v0_7_0_ecdsa_verify(ctx, &sig, msg32, &pk)); + rustsecp256k1_v0_8_0_ecdsa_verify(ctx, &sig, msg32, &pk)); } } } } } -void test_exhaustive_sign(const rustsecp256k1_v0_7_0_context *ctx, const rustsecp256k1_v0_7_0_ge *group) { +void test_exhaustive_sign(const rustsecp256k1_v0_8_0_context *ctx, const rustsecp256k1_v0_8_0_ge *group) { int i, j, k; uint64_t iter = 0; @@ -303,18 +303,18 @@ void test_exhaustive_sign(const rustsecp256k1_v0_7_0_context *ctx, const rustsec for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */ const int starting_k = k; int ret; - rustsecp256k1_v0_7_0_ecdsa_signature sig; - rustsecp256k1_v0_7_0_scalar sk, msg, r, s, expected_r; + rustsecp256k1_v0_8_0_ecdsa_signature sig; + rustsecp256k1_v0_8_0_scalar sk, msg, r, s, expected_r; unsigned char sk32[32], msg32[32]; - rustsecp256k1_v0_7_0_scalar_set_int(&msg, i); - rustsecp256k1_v0_7_0_scalar_set_int(&sk, j); - rustsecp256k1_v0_7_0_scalar_get_b32(sk32, &sk); - rustsecp256k1_v0_7_0_scalar_get_b32(msg32, &msg); + rustsecp256k1_v0_8_0_scalar_set_int(&msg, i); + rustsecp256k1_v0_8_0_scalar_set_int(&sk, j); + rustsecp256k1_v0_8_0_scalar_get_b32(sk32, &sk); + rustsecp256k1_v0_8_0_scalar_get_b32(msg32, &msg); - ret = rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &sig, msg32, sk32, rustsecp256k1_v0_7_0_nonce_function_smallint, &k); + ret = rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &sig, msg32, sk32, rustsecp256k1_v0_8_0_nonce_function_smallint, &k); CHECK(ret == 1); - rustsecp256k1_v0_7_0_ecdsa_signature_load(ctx, &r, &s, &sig); + rustsecp256k1_v0_8_0_ecdsa_signature_load(ctx, &r, &s, &sig); /* Note that we compute expected_r *after* signing -- this is important * because our nonce-computing function function might change k during * signing. */ @@ -342,23 +342,23 @@ void test_exhaustive_sign(const rustsecp256k1_v0_7_0_context *ctx, const rustsec } #ifdef ENABLE_MODULE_RECOVERY -#include "src/modules/recovery/tests_exhaustive_impl.h" +#include "modules/recovery/tests_exhaustive_impl.h" #endif #ifdef ENABLE_MODULE_EXTRAKEYS -#include "src/modules/extrakeys/tests_exhaustive_impl.h" +#include "modules/extrakeys/tests_exhaustive_impl.h" #endif #ifdef ENABLE_MODULE_SCHNORRSIG -#include "src/modules/schnorrsig/tests_exhaustive_impl.h" +#include "modules/schnorrsig/tests_exhaustive_impl.h" #endif int main(int argc, char** argv) { int i; - rustsecp256k1_v0_7_0_gej groupj[EXHAUSTIVE_TEST_ORDER]; - rustsecp256k1_v0_7_0_ge group[EXHAUSTIVE_TEST_ORDER]; + rustsecp256k1_v0_8_0_gej groupj[EXHAUSTIVE_TEST_ORDER]; + rustsecp256k1_v0_8_0_ge group[EXHAUSTIVE_TEST_ORDER]; unsigned char rand32[32]; - rustsecp256k1_v0_7_0_context *ctx; + rustsecp256k1_v0_8_0_context *ctx; /* Disable buffering for stdout to improve reliability of getting * diagnostic information. Happens right at the start of main because @@ -377,7 +377,7 @@ int main(int argc, char** argv) { printf("test count = %i\n", count); /* find random seed */ - rustsecp256k1_v0_7_0_testrand_init(argc > 2 ? argv[2] : NULL); + rustsecp256k1_v0_8_0_testrand_init(argc > 2 ? argv[2] : NULL); /* set up split processing */ if (argc > 4) { @@ -391,43 +391,43 @@ int main(int argc, char** argv) { } /* Recreate the ecmult{,_gen} tables using the right generator (as selected via EXHAUSTIVE_TEST_ORDER) */ - rustsecp256k1_v0_7_0_ecmult_gen_compute_table(&rustsecp256k1_v0_7_0_ecmult_gen_prec_table[0][0], &rustsecp256k1_v0_7_0_ge_const_g, ECMULT_GEN_PREC_BITS); - rustsecp256k1_v0_7_0_ecmult_compute_two_tables(rustsecp256k1_v0_7_0_pre_g, rustsecp256k1_v0_7_0_pre_g_128, WINDOW_G, &rustsecp256k1_v0_7_0_ge_const_g); + rustsecp256k1_v0_8_0_ecmult_gen_compute_table(&rustsecp256k1_v0_8_0_ecmult_gen_prec_table[0][0], &rustsecp256k1_v0_8_0_ge_const_g, ECMULT_GEN_PREC_BITS); + rustsecp256k1_v0_8_0_ecmult_compute_two_tables(rustsecp256k1_v0_8_0_pre_g, rustsecp256k1_v0_8_0_pre_g_128, WINDOW_G, &rustsecp256k1_v0_8_0_ge_const_g); while (count--) { /* Build context */ - ctx = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - rustsecp256k1_v0_7_0_testrand256(rand32); - CHECK(rustsecp256k1_v0_7_0_context_randomize(ctx, rand32)); + ctx = rustsecp256k1_v0_8_0_context_create(SECP256K1_CONTEXT_NONE); + rustsecp256k1_v0_8_0_testrand256(rand32); + CHECK(rustsecp256k1_v0_8_0_context_randomize(ctx, rand32)); /* Generate the entire group */ - rustsecp256k1_v0_7_0_gej_set_infinity(&groupj[0]); - rustsecp256k1_v0_7_0_ge_set_gej(&group[0], &groupj[0]); + rustsecp256k1_v0_8_0_gej_set_infinity(&groupj[0]); + rustsecp256k1_v0_8_0_ge_set_gej(&group[0], &groupj[0]); for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { - rustsecp256k1_v0_7_0_gej_add_ge(&groupj[i], &groupj[i - 1], &rustsecp256k1_v0_7_0_ge_const_g); - rustsecp256k1_v0_7_0_ge_set_gej(&group[i], &groupj[i]); + rustsecp256k1_v0_8_0_gej_add_ge(&groupj[i], &groupj[i - 1], &rustsecp256k1_v0_8_0_ge_const_g); + rustsecp256k1_v0_8_0_ge_set_gej(&group[i], &groupj[i]); if (count != 0) { /* Set a different random z-value for each Jacobian point, except z=1 is used in the last iteration. */ - rustsecp256k1_v0_7_0_fe z; + rustsecp256k1_v0_8_0_fe z; random_fe(&z); - rustsecp256k1_v0_7_0_gej_rescale(&groupj[i], &z); + rustsecp256k1_v0_8_0_gej_rescale(&groupj[i], &z); } /* Verify against ecmult_gen */ { - rustsecp256k1_v0_7_0_scalar scalar_i; - rustsecp256k1_v0_7_0_gej generatedj; - rustsecp256k1_v0_7_0_ge generated; + rustsecp256k1_v0_8_0_scalar scalar_i; + rustsecp256k1_v0_8_0_gej generatedj; + rustsecp256k1_v0_8_0_ge generated; - rustsecp256k1_v0_7_0_scalar_set_int(&scalar_i, i); - rustsecp256k1_v0_7_0_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i); - rustsecp256k1_v0_7_0_ge_set_gej(&generated, &generatedj); + rustsecp256k1_v0_8_0_scalar_set_int(&scalar_i, i); + rustsecp256k1_v0_8_0_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i); + rustsecp256k1_v0_8_0_ge_set_gej(&generated, &generatedj); CHECK(group[i].infinity == 0); CHECK(generated.infinity == 0); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&generated.x, &group[i].x)); - CHECK(rustsecp256k1_v0_7_0_fe_equal_var(&generated.y, &group[i].y)); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&generated.x, &group[i].x)); + CHECK(rustsecp256k1_v0_8_0_fe_equal_var(&generated.y, &group[i].y)); } } @@ -449,10 +449,10 @@ int main(int argc, char** argv) { test_exhaustive_schnorrsig(ctx); #endif - rustsecp256k1_v0_7_0_context_destroy(ctx); + rustsecp256k1_v0_8_0_context_destroy(ctx); } - rustsecp256k1_v0_7_0_testrand_finish(); + rustsecp256k1_v0_8_0_testrand_finish(); printf("no problems found\n"); return 0; diff --git a/secp256k1-sys/depend/secp256k1/src/util.h b/secp256k1-sys/depend/secp256k1/src/util.h index 018e81795..e6dac67a6 100644 --- a/secp256k1-sys/depend/secp256k1/src/util.h +++ b/secp256k1-sys/depend/secp256k1/src/util.h @@ -16,38 +16,43 @@ #include #include +#define STR_(x) #x +#define STR(x) STR_(x) +#define DEBUG_CONFIG_MSG(x) "DEBUG_CONFIG: " x +#define DEBUG_CONFIG_DEF(x) DEBUG_CONFIG_MSG(#x "=" STR(x)) + typedef struct { void (*fn)(const char *text, void* data); const void* data; -} rustsecp256k1_v0_7_0_callback; +} rustsecp256k1_v0_8_0_callback; -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_callback_call(const rustsecp256k1_v0_7_0_callback * const cb, const char * const text) { +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_callback_call(const rustsecp256k1_v0_8_0_callback * const cb, const char * const text) { cb->fn(text, (void*)cb->data); } #ifndef USE_EXTERNAL_DEFAULT_CALLBACKS -static void rustsecp256k1_v0_7_0_default_illegal_callback_fn(const char* str, void* data) { +static void rustsecp256k1_v0_8_0_default_illegal_callback_fn(const char* str, void* data) { (void)data; fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str); abort(); } -static void rustsecp256k1_v0_7_0_default_error_callback_fn(const char* str, void* data) { +static void rustsecp256k1_v0_8_0_default_error_callback_fn(const char* str, void* data) { (void)data; fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str); abort(); } #else -void rustsecp256k1_v0_7_0_default_illegal_callback_fn(const char* str, void* data); -void rustsecp256k1_v0_7_0_default_error_callback_fn(const char* str, void* data); +void rustsecp256k1_v0_8_0_default_illegal_callback_fn(const char* str, void* data); +void rustsecp256k1_v0_8_0_default_error_callback_fn(const char* str, void* data); #endif -static const rustsecp256k1_v0_7_0_callback default_illegal_callback = { - rustsecp256k1_v0_7_0_default_illegal_callback_fn, +static const rustsecp256k1_v0_8_0_callback default_illegal_callback = { + rustsecp256k1_v0_8_0_default_illegal_callback_fn, NULL }; -static const rustsecp256k1_v0_7_0_callback default_error_callback = { - rustsecp256k1_v0_7_0_default_error_callback_fn, +static const rustsecp256k1_v0_8_0_callback default_error_callback = { + rustsecp256k1_v0_8_0_default_error_callback_fn, NULL }; @@ -157,33 +162,8 @@ static const rustsecp256k1_v0_7_0_callback default_error_callback = { # define SECP256K1_GNUC_EXT #endif -/* If SECP256K1_{LITTLE,BIG}_ENDIAN is not explicitly provided, infer from various other system macros. */ -#if !defined(SECP256K1_LITTLE_ENDIAN) && !defined(SECP256K1_BIG_ENDIAN) -/* Inspired by https://github.com/rofl0r/endianness.h/blob/9853923246b065a3b52d2c43835f3819a62c7199/endianness.h#L52L73 */ -# if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \ - defined(_X86_) || defined(__x86_64__) || defined(__i386__) || \ - defined(__i486__) || defined(__i586__) || defined(__i686__) || \ - defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) || \ - defined(__ARMEL__) || defined(__AARCH64EL__) || \ - (defined(__LITTLE_ENDIAN__) && __LITTLE_ENDIAN__ == 1) || \ - (defined(_LITTLE_ENDIAN) && _LITTLE_ENDIAN == 1) || \ - defined(_M_IX86) || defined(_M_AMD64) || defined(_M_ARM) /* MSVC */ -# define SECP256K1_LITTLE_ENDIAN -# endif -# if (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) || \ - defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) || \ - defined(__MICROBLAZEEB__) || defined(__ARMEB__) || defined(__AARCH64EB__) || \ - (defined(__BIG_ENDIAN__) && __BIG_ENDIAN__ == 1) || \ - (defined(_BIG_ENDIAN) && _BIG_ENDIAN == 1) -# define SECP256K1_BIG_ENDIAN -# endif -#endif -#if defined(SECP256K1_LITTLE_ENDIAN) == defined(SECP256K1_BIG_ENDIAN) -# error Please make sure that either SECP256K1_LITTLE_ENDIAN or SECP256K1_BIG_ENDIAN is set, see src/util.h. -#endif - /* Zero memory if flag == 1. Flag must be 0 or 1. Constant time. */ -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_memczero(void *s, size_t len, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_memczero(void *s, size_t len, int flag) { unsigned char *p = (unsigned char *)s; /* Access flag with a volatile-qualified lvalue. This prevents clang from figuring out (after inlining) that flag can @@ -202,7 +182,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_7_0_memczero(void *s, size_t len, * We use this to avoid possible compiler bugs with memcmp, e.g. * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95189 */ -static SECP256K1_INLINE int rustsecp256k1_v0_7_0_memcmp_var(const void *s1, const void *s2, size_t n) { +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_memcmp_var(const void *s1, const void *s2, size_t n) { const unsigned char *p1 = s1, *p2 = s2; size_t i; @@ -216,7 +196,7 @@ static SECP256K1_INLINE int rustsecp256k1_v0_7_0_memcmp_var(const void *s1, cons } /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized and non-negative.*/ -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_int_cmov(int *r, const int *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_int_cmov(int *r, const int *a, int flag) { unsigned int mask0, mask1, r_masked, a_masked; /* Access flag with a volatile-qualified lvalue. This prevents clang from figuring out (after inlining) that flag can @@ -234,28 +214,36 @@ static SECP256K1_INLINE void rustsecp256k1_v0_7_0_int_cmov(int *r, const int *a, *r = (int)(r_masked | a_masked); } -/* If USE_FORCE_WIDEMUL_{INT128,INT64} is set, use that wide multiplication implementation. - * Otherwise use the presence of __SIZEOF_INT128__ to decide. - */ -#if defined(USE_FORCE_WIDEMUL_INT128) +#if defined(USE_FORCE_WIDEMUL_INT128_STRUCT) +/* If USE_FORCE_WIDEMUL_INT128_STRUCT is set, use int128_struct. */ +# define SECP256K1_WIDEMUL_INT128 1 +# define SECP256K1_INT128_STRUCT 1 +#elif defined(USE_FORCE_WIDEMUL_INT128) +/* If USE_FORCE_WIDEMUL_INT128 is set, use int128. */ # define SECP256K1_WIDEMUL_INT128 1 +# define SECP256K1_INT128_NATIVE 1 #elif defined(USE_FORCE_WIDEMUL_INT64) +/* If USE_FORCE_WIDEMUL_INT64 is set, use int64. */ # define SECP256K1_WIDEMUL_INT64 1 #elif defined(UINT128_MAX) || defined(__SIZEOF_INT128__) +/* If a native 128-bit integer type exists, use int128. */ # define SECP256K1_WIDEMUL_INT128 1 +# define SECP256K1_INT128_NATIVE 1 +#elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) +/* On 64-bit MSVC targets (x86_64 and arm64), use int128_struct + * (which has special logic to implement using intrinsics on those systems). */ +# define SECP256K1_WIDEMUL_INT128 1 +# define SECP256K1_INT128_STRUCT 1 +#elif SIZE_MAX > 0xffffffff +/* Systems with 64-bit pointers (and thus registers) very likely benefit from + * using 64-bit based arithmetic (even if we need to fall back to 32x32->64 based + * multiplication logic). */ +# define SECP256K1_WIDEMUL_INT128 1 +# define SECP256K1_INT128_STRUCT 1 #else +/* Lastly, fall back to int64 based arithmetic. */ # define SECP256K1_WIDEMUL_INT64 1 #endif -#if defined(SECP256K1_WIDEMUL_INT128) -# if !defined(UINT128_MAX) && defined(__SIZEOF_INT128__) -SECP256K1_GNUC_EXT typedef unsigned __int128 uint128_t; -SECP256K1_GNUC_EXT typedef __int128 int128_t; -#define UINT128_MAX ((uint128_t)(-1)) -#define INT128_MAX ((int128_t)(UINT128_MAX >> 1)) -#define INT128_MIN (-INT128_MAX - 1) -/* No (U)INT128_C macros because compilers providing __int128 do not support 128-bit literals. */ -# endif -#endif #ifndef __has_builtin #define __has_builtin(x) 0 @@ -263,8 +251,8 @@ SECP256K1_GNUC_EXT typedef __int128 int128_t; /* Determine the number of trailing zero bits in a (non-zero) 32-bit x. * This function is only intended to be used as fallback for - * rustsecp256k1_v0_7_0_ctz32_var, but permits it to be tested separately. */ -static SECP256K1_INLINE int rustsecp256k1_v0_7_0_ctz32_var_debruijn(uint32_t x) { + * rustsecp256k1_v0_8_0_ctz32_var, but permits it to be tested separately. */ +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_ctz32_var_debruijn(uint32_t x) { static const uint8_t debruijn[32] = { 0x00, 0x01, 0x02, 0x18, 0x03, 0x13, 0x06, 0x19, 0x16, 0x04, 0x14, 0x0A, 0x10, 0x07, 0x0C, 0x1A, 0x1F, 0x17, 0x12, 0x05, 0x15, 0x09, 0x0F, 0x0B, @@ -275,8 +263,8 @@ static SECP256K1_INLINE int rustsecp256k1_v0_7_0_ctz32_var_debruijn(uint32_t x) /* Determine the number of trailing zero bits in a (non-zero) 64-bit x. * This function is only intended to be used as fallback for - * rustsecp256k1_v0_7_0_ctz64_var, but permits it to be tested separately. */ -static SECP256K1_INLINE int rustsecp256k1_v0_7_0_ctz64_var_debruijn(uint64_t x) { + * rustsecp256k1_v0_8_0_ctz64_var, but permits it to be tested separately. */ +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_ctz64_var_debruijn(uint64_t x) { static const uint8_t debruijn[64] = { 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, @@ -287,7 +275,7 @@ static SECP256K1_INLINE int rustsecp256k1_v0_7_0_ctz64_var_debruijn(uint64_t x) } /* Determine the number of trailing zero bits in a (non-zero) 32-bit x. */ -static SECP256K1_INLINE int rustsecp256k1_v0_7_0_ctz32_var(uint32_t x) { +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_ctz32_var(uint32_t x) { VERIFY_CHECK(x != 0); #if (__has_builtin(__builtin_ctz) || SECP256K1_GNUC_PREREQ(3,4)) /* If the unsigned type is sufficient to represent the largest uint32_t, consider __builtin_ctz. */ @@ -300,12 +288,12 @@ static SECP256K1_INLINE int rustsecp256k1_v0_7_0_ctz32_var(uint32_t x) { return __builtin_ctzl(x); #else /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */ - return rustsecp256k1_v0_7_0_ctz32_var_debruijn(x); + return rustsecp256k1_v0_8_0_ctz32_var_debruijn(x); #endif } /* Determine the number of trailing zero bits in a (non-zero) 64-bit x. */ -static SECP256K1_INLINE int rustsecp256k1_v0_7_0_ctz64_var(uint64_t x) { +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_ctz64_var(uint64_t x) { VERIFY_CHECK(x != 0); #if (__has_builtin(__builtin_ctzl) || SECP256K1_GNUC_PREREQ(3,4)) /* If the unsigned long type is sufficient to represent the largest uint64_t, consider __builtin_ctzl. */ @@ -318,8 +306,24 @@ static SECP256K1_INLINE int rustsecp256k1_v0_7_0_ctz64_var(uint64_t x) { return __builtin_ctzll(x); #else /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */ - return rustsecp256k1_v0_7_0_ctz64_var_debruijn(x); + return rustsecp256k1_v0_8_0_ctz64_var_debruijn(x); #endif } +/* Read a uint32_t in big endian */ +SECP256K1_INLINE static uint32_t rustsecp256k1_v0_8_0_read_be32(const unsigned char* p) { + return (uint32_t)p[0] << 24 | + (uint32_t)p[1] << 16 | + (uint32_t)p[2] << 8 | + (uint32_t)p[3]; +} + +/* Write a uint32_t in big endian */ +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_write_be32(unsigned char* p, uint32_t x) { + p[3] = x; + p[2] = x >> 8; + p[1] = x >> 16; + p[0] = x >> 24; +} + #endif /* SECP256K1_UTIL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/util.h.orig b/secp256k1-sys/depend/secp256k1/src/util.h.orig index 931259f20..70b0bd99c 100644 --- a/secp256k1-sys/depend/secp256k1/src/util.h.orig +++ b/secp256k1-sys/depend/secp256k1/src/util.h.orig @@ -16,38 +16,43 @@ #include #include +#define STR_(x) #x +#define STR(x) STR_(x) +#define DEBUG_CONFIG_MSG(x) "DEBUG_CONFIG: " x +#define DEBUG_CONFIG_DEF(x) DEBUG_CONFIG_MSG(#x "=" STR(x)) + typedef struct { void (*fn)(const char *text, void* data); const void* data; -} rustsecp256k1_v0_7_0_callback; +} rustsecp256k1_v0_8_0_callback; -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_callback_call(const rustsecp256k1_v0_7_0_callback * const cb, const char * const text) { +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_callback_call(const rustsecp256k1_v0_8_0_callback * const cb, const char * const text) { cb->fn(text, (void*)cb->data); } #ifndef USE_EXTERNAL_DEFAULT_CALLBACKS -static void rustsecp256k1_v0_7_0_default_illegal_callback_fn(const char* str, void* data) { +static void rustsecp256k1_v0_8_0_default_illegal_callback_fn(const char* str, void* data) { (void)data; fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str); abort(); } -static void rustsecp256k1_v0_7_0_default_error_callback_fn(const char* str, void* data) { +static void rustsecp256k1_v0_8_0_default_error_callback_fn(const char* str, void* data) { (void)data; fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str); abort(); } #else -void rustsecp256k1_v0_7_0_default_illegal_callback_fn(const char* str, void* data); -void rustsecp256k1_v0_7_0_default_error_callback_fn(const char* str, void* data); +void rustsecp256k1_v0_8_0_default_illegal_callback_fn(const char* str, void* data); +void rustsecp256k1_v0_8_0_default_error_callback_fn(const char* str, void* data); #endif -static const rustsecp256k1_v0_7_0_callback default_illegal_callback = { - rustsecp256k1_v0_7_0_default_illegal_callback_fn, +static const rustsecp256k1_v0_8_0_callback default_illegal_callback = { + rustsecp256k1_v0_8_0_default_illegal_callback_fn, NULL }; -static const rustsecp256k1_v0_7_0_callback default_error_callback = { - rustsecp256k1_v0_7_0_default_error_callback_fn, +static const rustsecp256k1_v0_8_0_callback default_error_callback = { + rustsecp256k1_v0_8_0_default_error_callback_fn, NULL }; @@ -115,18 +120,18 @@ static const rustsecp256k1_v0_7_0_callback default_error_callback = { #define VG_CHECK_VERIFY(x,y) #endif -static SECP256K1_INLINE void *checked_malloc(const rustsecp256k1_v0_7_0_callback* cb, size_t size) { +static SECP256K1_INLINE void *checked_malloc(const rustsecp256k1_v0_8_0_callback* cb, size_t size) { void *ret = malloc(size); if (ret == NULL) { - rustsecp256k1_v0_7_0_callback_call(cb, "Out of memory"); + rustsecp256k1_v0_8_0_callback_call(cb, "Out of memory"); } return ret; } -static SECP256K1_INLINE void *checked_realloc(const rustsecp256k1_v0_7_0_callback* cb, void *ptr, size_t size) { +static SECP256K1_INLINE void *checked_realloc(const rustsecp256k1_v0_8_0_callback* cb, void *ptr, size_t size) { void *ret = realloc(ptr, size); if (ret == NULL) { - rustsecp256k1_v0_7_0_callback_call(cb, "Out of memory"); + rustsecp256k1_v0_8_0_callback_call(cb, "Out of memory"); } return ret; } @@ -173,33 +178,8 @@ static SECP256K1_INLINE void *checked_realloc(const rustsecp256k1_v0_7_0_callbac # define SECP256K1_GNUC_EXT #endif -/* If SECP256K1_{LITTLE,BIG}_ENDIAN is not explicitly provided, infer from various other system macros. */ -#if !defined(SECP256K1_LITTLE_ENDIAN) && !defined(SECP256K1_BIG_ENDIAN) -/* Inspired by https://github.com/rofl0r/endianness.h/blob/9853923246b065a3b52d2c43835f3819a62c7199/endianness.h#L52L73 */ -# if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \ - defined(_X86_) || defined(__x86_64__) || defined(__i386__) || \ - defined(__i486__) || defined(__i586__) || defined(__i686__) || \ - defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) || \ - defined(__ARMEL__) || defined(__AARCH64EL__) || \ - (defined(__LITTLE_ENDIAN__) && __LITTLE_ENDIAN__ == 1) || \ - (defined(_LITTLE_ENDIAN) && _LITTLE_ENDIAN == 1) || \ - defined(_M_IX86) || defined(_M_AMD64) || defined(_M_ARM) /* MSVC */ -# define SECP256K1_LITTLE_ENDIAN -# endif -# if (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) || \ - defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) || \ - defined(__MICROBLAZEEB__) || defined(__ARMEB__) || defined(__AARCH64EB__) || \ - (defined(__BIG_ENDIAN__) && __BIG_ENDIAN__ == 1) || \ - (defined(_BIG_ENDIAN) && _BIG_ENDIAN == 1) -# define SECP256K1_BIG_ENDIAN -# endif -#endif -#if defined(SECP256K1_LITTLE_ENDIAN) == defined(SECP256K1_BIG_ENDIAN) -# error Please make sure that either SECP256K1_LITTLE_ENDIAN or SECP256K1_BIG_ENDIAN is set, see src/util.h. -#endif - /* Zero memory if flag == 1. Flag must be 0 or 1. Constant time. */ -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_memczero(void *s, size_t len, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_memczero(void *s, size_t len, int flag) { unsigned char *p = (unsigned char *)s; /* Access flag with a volatile-qualified lvalue. This prevents clang from figuring out (after inlining) that flag can @@ -218,7 +198,7 @@ static SECP256K1_INLINE void rustsecp256k1_v0_7_0_memczero(void *s, size_t len, * We use this to avoid possible compiler bugs with memcmp, e.g. * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95189 */ -static SECP256K1_INLINE int rustsecp256k1_v0_7_0_memcmp_var(const void *s1, const void *s2, size_t n) { +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_memcmp_var(const void *s1, const void *s2, size_t n) { const unsigned char *p1 = s1, *p2 = s2; size_t i; @@ -232,7 +212,7 @@ static SECP256K1_INLINE int rustsecp256k1_v0_7_0_memcmp_var(const void *s1, cons } /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized and non-negative.*/ -static SECP256K1_INLINE void rustsecp256k1_v0_7_0_int_cmov(int *r, const int *a, int flag) { +static SECP256K1_INLINE void rustsecp256k1_v0_8_0_int_cmov(int *r, const int *a, int flag) { unsigned int mask0, mask1, r_masked, a_masked; /* Access flag with a volatile-qualified lvalue. This prevents clang from figuring out (after inlining) that flag can @@ -250,28 +230,36 @@ static SECP256K1_INLINE void rustsecp256k1_v0_7_0_int_cmov(int *r, const int *a, *r = (int)(r_masked | a_masked); } -/* If USE_FORCE_WIDEMUL_{INT128,INT64} is set, use that wide multiplication implementation. - * Otherwise use the presence of __SIZEOF_INT128__ to decide. - */ -#if defined(USE_FORCE_WIDEMUL_INT128) +#if defined(USE_FORCE_WIDEMUL_INT128_STRUCT) +/* If USE_FORCE_WIDEMUL_INT128_STRUCT is set, use int128_struct. */ +# define SECP256K1_WIDEMUL_INT128 1 +# define SECP256K1_INT128_STRUCT 1 +#elif defined(USE_FORCE_WIDEMUL_INT128) +/* If USE_FORCE_WIDEMUL_INT128 is set, use int128. */ # define SECP256K1_WIDEMUL_INT128 1 +# define SECP256K1_INT128_NATIVE 1 #elif defined(USE_FORCE_WIDEMUL_INT64) +/* If USE_FORCE_WIDEMUL_INT64 is set, use int64. */ # define SECP256K1_WIDEMUL_INT64 1 #elif defined(UINT128_MAX) || defined(__SIZEOF_INT128__) +/* If a native 128-bit integer type exists, use int128. */ # define SECP256K1_WIDEMUL_INT128 1 +# define SECP256K1_INT128_NATIVE 1 +#elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) +/* On 64-bit MSVC targets (x86_64 and arm64), use int128_struct + * (which has special logic to implement using intrinsics on those systems). */ +# define SECP256K1_WIDEMUL_INT128 1 +# define SECP256K1_INT128_STRUCT 1 +#elif SIZE_MAX > 0xffffffff +/* Systems with 64-bit pointers (and thus registers) very likely benefit from + * using 64-bit based arithmetic (even if we need to fall back to 32x32->64 based + * multiplication logic). */ +# define SECP256K1_WIDEMUL_INT128 1 +# define SECP256K1_INT128_STRUCT 1 #else +/* Lastly, fall back to int64 based arithmetic. */ # define SECP256K1_WIDEMUL_INT64 1 #endif -#if defined(SECP256K1_WIDEMUL_INT128) -# if !defined(UINT128_MAX) && defined(__SIZEOF_INT128__) -SECP256K1_GNUC_EXT typedef unsigned __int128 uint128_t; -SECP256K1_GNUC_EXT typedef __int128 int128_t; -#define UINT128_MAX ((uint128_t)(-1)) -#define INT128_MAX ((int128_t)(UINT128_MAX >> 1)) -#define INT128_MIN (-INT128_MAX - 1) -/* No (U)INT128_C macros because compilers providing __int128 do not support 128-bit literals. */ -# endif -#endif #ifndef __has_builtin #define __has_builtin(x) 0 @@ -279,8 +267,8 @@ SECP256K1_GNUC_EXT typedef __int128 int128_t; /* Determine the number of trailing zero bits in a (non-zero) 32-bit x. * This function is only intended to be used as fallback for - * rustsecp256k1_v0_7_0_ctz32_var, but permits it to be tested separately. */ -static SECP256K1_INLINE int rustsecp256k1_v0_7_0_ctz32_var_debruijn(uint32_t x) { + * rustsecp256k1_v0_8_0_ctz32_var, but permits it to be tested separately. */ +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_ctz32_var_debruijn(uint32_t x) { static const uint8_t debruijn[32] = { 0x00, 0x01, 0x02, 0x18, 0x03, 0x13, 0x06, 0x19, 0x16, 0x04, 0x14, 0x0A, 0x10, 0x07, 0x0C, 0x1A, 0x1F, 0x17, 0x12, 0x05, 0x15, 0x09, 0x0F, 0x0B, @@ -291,8 +279,8 @@ static SECP256K1_INLINE int rustsecp256k1_v0_7_0_ctz32_var_debruijn(uint32_t x) /* Determine the number of trailing zero bits in a (non-zero) 64-bit x. * This function is only intended to be used as fallback for - * rustsecp256k1_v0_7_0_ctz64_var, but permits it to be tested separately. */ -static SECP256K1_INLINE int rustsecp256k1_v0_7_0_ctz64_var_debruijn(uint64_t x) { + * rustsecp256k1_v0_8_0_ctz64_var, but permits it to be tested separately. */ +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_ctz64_var_debruijn(uint64_t x) { static const uint8_t debruijn[64] = { 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, @@ -303,7 +291,7 @@ static SECP256K1_INLINE int rustsecp256k1_v0_7_0_ctz64_var_debruijn(uint64_t x) } /* Determine the number of trailing zero bits in a (non-zero) 32-bit x. */ -static SECP256K1_INLINE int rustsecp256k1_v0_7_0_ctz32_var(uint32_t x) { +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_ctz32_var(uint32_t x) { VERIFY_CHECK(x != 0); #if (__has_builtin(__builtin_ctz) || SECP256K1_GNUC_PREREQ(3,4)) /* If the unsigned type is sufficient to represent the largest uint32_t, consider __builtin_ctz. */ @@ -316,12 +304,12 @@ static SECP256K1_INLINE int rustsecp256k1_v0_7_0_ctz32_var(uint32_t x) { return __builtin_ctzl(x); #else /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */ - return rustsecp256k1_v0_7_0_ctz32_var_debruijn(x); + return rustsecp256k1_v0_8_0_ctz32_var_debruijn(x); #endif } /* Determine the number of trailing zero bits in a (non-zero) 64-bit x. */ -static SECP256K1_INLINE int rustsecp256k1_v0_7_0_ctz64_var(uint64_t x) { +static SECP256K1_INLINE int rustsecp256k1_v0_8_0_ctz64_var(uint64_t x) { VERIFY_CHECK(x != 0); #if (__has_builtin(__builtin_ctzl) || SECP256K1_GNUC_PREREQ(3,4)) /* If the unsigned long type is sufficient to represent the largest uint64_t, consider __builtin_ctzl. */ @@ -334,8 +322,24 @@ static SECP256K1_INLINE int rustsecp256k1_v0_7_0_ctz64_var(uint64_t x) { return __builtin_ctzll(x); #else /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */ - return rustsecp256k1_v0_7_0_ctz64_var_debruijn(x); + return rustsecp256k1_v0_8_0_ctz64_var_debruijn(x); #endif } +/* Read a uint32_t in big endian */ +SECP256K1_INLINE static uint32_t rustsecp256k1_v0_8_0_read_be32(const unsigned char* p) { + return (uint32_t)p[0] << 24 | + (uint32_t)p[1] << 16 | + (uint32_t)p[2] << 8 | + (uint32_t)p[3]; +} + +/* Write a uint32_t in big endian */ +SECP256K1_INLINE static void rustsecp256k1_v0_8_0_write_be32(unsigned char* p, uint32_t x) { + p[3] = x; + p[2] = x >> 8; + p[1] = x >> 16; + p[0] = x >> 24; +} + #endif /* SECP256K1_UTIL_H */ diff --git a/secp256k1-sys/depend/secp256k1/src/valgrind_ctime_test.c b/secp256k1-sys/depend/secp256k1/src/valgrind_ctime_test.c index 91e7e46f5..8e62b3b2e 100644 --- a/secp256k1-sys/depend/secp256k1/src/valgrind_ctime_test.c +++ b/secp256k1-sys/depend/secp256k1/src/valgrind_ctime_test.c @@ -12,25 +12,25 @@ #include "util.h" #ifdef ENABLE_MODULE_ECDH -# include "../include/rustsecp256k1_v0_7_0_ecdh.h" +# include "../include/rustsecp256k1_v0_8_0_ecdh.h" #endif #ifdef ENABLE_MODULE_RECOVERY -# include "../include/rustsecp256k1_v0_7_0_recovery.h" +# include "../include/rustsecp256k1_v0_8_0_recovery.h" #endif #ifdef ENABLE_MODULE_EXTRAKEYS -# include "../include/rustsecp256k1_v0_7_0_extrakeys.h" +# include "../include/rustsecp256k1_v0_8_0_extrakeys.h" #endif #ifdef ENABLE_MODULE_SCHNORRSIG #include "../include/secp256k1_schnorrsig.h" #endif -void run_tests(rustsecp256k1_v0_7_0_context *ctx, unsigned char *key); +void run_tests(rustsecp256k1_v0_8_0_context *ctx, unsigned char *key); int main(void) { - rustsecp256k1_v0_7_0_context* ctx; + rustsecp256k1_v0_8_0_context* ctx; unsigned char key[32]; int ret, i; @@ -39,9 +39,7 @@ int main(void) { fprintf(stderr, "Usage: libtool --mode=execute valgrind ./valgrind_ctime_test\n"); return 1; } - ctx = rustsecp256k1_v0_7_0_context_create(SECP256K1_CONTEXT_SIGN - | SECP256K1_CONTEXT_VERIFY - | SECP256K1_CONTEXT_DECLASSIFY); + ctx = rustsecp256k1_v0_8_0_context_create(SECP256K1_CONTEXT_DECLASSIFY); /** In theory, testing with a single secret input should be sufficient: * If control flow depended on secrets the tool would generate an error. */ @@ -54,17 +52,17 @@ int main(void) { /* Test context randomisation. Do this last because it leaves the context * tainted. */ VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_7_0_context_randomize(ctx, key); + ret = rustsecp256k1_v0_8_0_context_randomize(ctx, key); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret); - rustsecp256k1_v0_7_0_context_destroy(ctx); + rustsecp256k1_v0_8_0_context_destroy(ctx); return 0; } -void run_tests(rustsecp256k1_v0_7_0_context *ctx, unsigned char *key) { - rustsecp256k1_v0_7_0_ecdsa_signature signature; - rustsecp256k1_v0_7_0_pubkey pubkey; +void run_tests(rustsecp256k1_v0_8_0_context *ctx, unsigned char *key) { + rustsecp256k1_v0_8_0_ecdsa_signature signature; + rustsecp256k1_v0_8_0_pubkey pubkey; size_t siglen = 74; size_t outputlen = 33; int i; @@ -73,11 +71,11 @@ void run_tests(rustsecp256k1_v0_7_0_context *ctx, unsigned char *key) { unsigned char sig[74]; unsigned char spubkey[33]; #ifdef ENABLE_MODULE_RECOVERY - rustsecp256k1_v0_7_0_ecdsa_recoverable_signature recoverable_signature; + rustsecp256k1_v0_8_0_ecdsa_recoverable_signature recoverable_signature; int recid; #endif #ifdef ENABLE_MODULE_EXTRAKEYS - rustsecp256k1_v0_7_0_keypair keypair; + rustsecp256k1_v0_8_0_keypair keypair; #endif for (i = 0; i < 32; i++) { @@ -86,24 +84,24 @@ void run_tests(rustsecp256k1_v0_7_0_context *ctx, unsigned char *key) { /* Test keygen. */ VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_7_0_ec_pubkey_create(ctx, &pubkey, key); - VALGRIND_MAKE_MEM_DEFINED(&pubkey, sizeof(rustsecp256k1_v0_7_0_pubkey)); + ret = rustsecp256k1_v0_8_0_ec_pubkey_create(ctx, &pubkey, key); + VALGRIND_MAKE_MEM_DEFINED(&pubkey, sizeof(rustsecp256k1_v0_8_0_pubkey)); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret); - CHECK(rustsecp256k1_v0_7_0_ec_pubkey_serialize(ctx, spubkey, &outputlen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); + CHECK(rustsecp256k1_v0_8_0_ec_pubkey_serialize(ctx, spubkey, &outputlen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); /* Test signing. */ VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_7_0_ecdsa_sign(ctx, &signature, msg, key, NULL, NULL); - VALGRIND_MAKE_MEM_DEFINED(&signature, sizeof(rustsecp256k1_v0_7_0_ecdsa_signature)); + ret = rustsecp256k1_v0_8_0_ecdsa_sign(ctx, &signature, msg, key, NULL, NULL); + VALGRIND_MAKE_MEM_DEFINED(&signature, sizeof(rustsecp256k1_v0_8_0_ecdsa_signature)); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret); - CHECK(rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature)); #ifdef ENABLE_MODULE_ECDH /* Test ECDH. */ VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_7_0_ecdh(ctx, msg, &pubkey, key, NULL, NULL); + ret = rustsecp256k1_v0_8_0_ecdh(ctx, msg, &pubkey, key, NULL, NULL); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); #endif @@ -111,62 +109,62 @@ void run_tests(rustsecp256k1_v0_7_0_context *ctx, unsigned char *key) { #ifdef ENABLE_MODULE_RECOVERY /* Test signing a recoverable signature. */ VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_7_0_ecdsa_sign_recoverable(ctx, &recoverable_signature, msg, key, NULL, NULL); + ret = rustsecp256k1_v0_8_0_ecdsa_sign_recoverable(ctx, &recoverable_signature, msg, key, NULL, NULL); VALGRIND_MAKE_MEM_DEFINED(&recoverable_signature, sizeof(recoverable_signature)); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret); - CHECK(rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &recoverable_signature)); + CHECK(rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &recoverable_signature)); CHECK(recid >= 0 && recid <= 3); #endif VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_7_0_ec_seckey_verify(ctx, key); + ret = rustsecp256k1_v0_8_0_ec_seckey_verify(ctx, key); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_7_0_ec_seckey_negate(ctx, key); + ret = rustsecp256k1_v0_8_0_ec_seckey_negate(ctx, key); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); VALGRIND_MAKE_MEM_UNDEFINED(key, 32); VALGRIND_MAKE_MEM_UNDEFINED(msg, 32); - ret = rustsecp256k1_v0_7_0_ec_seckey_tweak_add(ctx, key, msg); + ret = rustsecp256k1_v0_8_0_ec_seckey_tweak_add(ctx, key, msg); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); VALGRIND_MAKE_MEM_UNDEFINED(key, 32); VALGRIND_MAKE_MEM_UNDEFINED(msg, 32); - ret = rustsecp256k1_v0_7_0_ec_seckey_tweak_mul(ctx, key, msg); + ret = rustsecp256k1_v0_8_0_ec_seckey_tweak_mul(ctx, key, msg); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); /* Test keypair_create and keypair_xonly_tweak_add. */ #ifdef ENABLE_MODULE_EXTRAKEYS VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair, key); + ret = rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, key); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); /* The tweak is not treated as a secret in keypair_tweak_add */ VALGRIND_MAKE_MEM_DEFINED(msg, 32); - ret = rustsecp256k1_v0_7_0_keypair_xonly_tweak_add(ctx, &keypair, msg); + ret = rustsecp256k1_v0_8_0_keypair_xonly_tweak_add(ctx, &keypair, msg); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); VALGRIND_MAKE_MEM_UNDEFINED(key, 32); VALGRIND_MAKE_MEM_UNDEFINED(&keypair, sizeof(keypair)); - ret = rustsecp256k1_v0_7_0_keypair_sec(ctx, key, &keypair); + ret = rustsecp256k1_v0_8_0_keypair_sec(ctx, key, &keypair); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); #endif #ifdef ENABLE_MODULE_SCHNORRSIG VALGRIND_MAKE_MEM_UNDEFINED(key, 32); - ret = rustsecp256k1_v0_7_0_keypair_create(ctx, &keypair, key); + ret = rustsecp256k1_v0_8_0_keypair_create(ctx, &keypair, key); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); - ret = rustsecp256k1_v0_7_0_schnorrsig_sign(ctx, sig, msg, &keypair, NULL); + ret = rustsecp256k1_v0_8_0_schnorrsig_sign32(ctx, sig, msg, &keypair, NULL); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); #endif diff --git a/secp256k1-sys/src/lib.rs b/secp256k1-sys/src/lib.rs index 65558e742..7b28cc6ee 100644 --- a/secp256k1-sys/src/lib.rs +++ b/secp256k1-sys/src/lib.rs @@ -496,83 +496,83 @@ impl core::hash::Hash for KeyPair { extern "C" { /// Default ECDH hash function - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ecdh_hash_function_default")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ecdh_hash_function_default")] pub static secp256k1_ecdh_hash_function_default: EcdhHashFn; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_nonce_function_rfc6979")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_nonce_function_rfc6979")] pub static secp256k1_nonce_function_rfc6979: NonceFn; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_nonce_function_default")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_nonce_function_default")] pub static secp256k1_nonce_function_default: NonceFn; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_nonce_function_bip340")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_nonce_function_bip340")] pub static secp256k1_nonce_function_bip340: SchnorrNonceFn; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_context_no_precomp")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_context_no_precomp")] pub static secp256k1_context_no_precomp: *const Context; // Contexts - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_context_preallocated_destroy")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_context_preallocated_destroy")] pub fn secp256k1_context_preallocated_destroy(cx: NonNull); // Signatures - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ecdsa_signature_parse_der")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ecdsa_signature_parse_der")] pub fn secp256k1_ecdsa_signature_parse_der(cx: *const Context, sig: *mut Signature, input: *const c_uchar, in_len: size_t) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ecdsa_signature_parse_compact")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ecdsa_signature_parse_compact")] pub fn secp256k1_ecdsa_signature_parse_compact(cx: *const Context, sig: *mut Signature, input64: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ecdsa_signature_parse_der_lax")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ecdsa_signature_parse_der_lax")] pub fn ecdsa_signature_parse_der_lax(cx: *const Context, sig: *mut Signature, input: *const c_uchar, in_len: size_t) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ecdsa_signature_serialize_der")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ecdsa_signature_serialize_der")] pub fn secp256k1_ecdsa_signature_serialize_der(cx: *const Context, output: *mut c_uchar, out_len: *mut size_t, sig: *const Signature) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ecdsa_signature_serialize_compact")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ecdsa_signature_serialize_compact")] pub fn secp256k1_ecdsa_signature_serialize_compact(cx: *const Context, output64: *mut c_uchar, sig: *const Signature) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ecdsa_signature_normalize")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ecdsa_signature_normalize")] pub fn secp256k1_ecdsa_signature_normalize(cx: *const Context, out_sig: *mut Signature, in_sig: *const Signature) -> c_int; // Secret Keys - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ec_seckey_verify")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ec_seckey_verify")] pub fn secp256k1_ec_seckey_verify(cx: *const Context, sk: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ec_seckey_negate")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ec_seckey_negate")] pub fn secp256k1_ec_seckey_negate(cx: *const Context, sk: *mut c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ec_seckey_tweak_add")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ec_seckey_tweak_add")] pub fn secp256k1_ec_seckey_tweak_add(cx: *const Context, sk: *mut c_uchar, tweak: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ec_seckey_tweak_mul")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ec_seckey_tweak_mul")] pub fn secp256k1_ec_seckey_tweak_mul(cx: *const Context, sk: *mut c_uchar, tweak: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_keypair_sec")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_keypair_sec")] pub fn secp256k1_keypair_sec(cx: *const Context, output_seckey: *mut c_uchar, keypair: *const KeyPair) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_keypair_pub")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_keypair_pub")] pub fn secp256k1_keypair_pub(cx: *const Context, output_pubkey: *mut PublicKey, keypair: *const KeyPair) @@ -582,71 +582,71 @@ extern "C" { #[cfg(not(fuzzing))] extern "C" { // Contexts - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_context_preallocated_size")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_context_preallocated_size")] pub fn secp256k1_context_preallocated_size(flags: c_uint) -> size_t; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_context_preallocated_create")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_context_preallocated_create")] pub fn secp256k1_context_preallocated_create(prealloc: NonNull, flags: c_uint) -> NonNull; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_context_preallocated_clone_size")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_context_preallocated_clone_size")] pub fn secp256k1_context_preallocated_clone_size(cx: *const Context) -> size_t; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_context_preallocated_clone")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_context_preallocated_clone")] pub fn secp256k1_context_preallocated_clone(cx: *const Context, prealloc: NonNull) -> NonNull; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_context_randomize")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_context_randomize")] pub fn secp256k1_context_randomize(cx: NonNull, seed32: *const c_uchar) -> c_int; // Pubkeys - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ec_pubkey_parse")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ec_pubkey_parse")] pub fn secp256k1_ec_pubkey_parse(cx: *const Context, pk: *mut PublicKey, input: *const c_uchar, in_len: size_t) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ec_pubkey_serialize")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ec_pubkey_serialize")] pub fn secp256k1_ec_pubkey_serialize(cx: *const Context, output: *mut c_uchar, out_len: *mut size_t, pk: *const PublicKey, compressed: c_uint) -> c_int; // EC - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ec_pubkey_create")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ec_pubkey_create")] pub fn secp256k1_ec_pubkey_create(cx: *const Context, pk: *mut PublicKey, sk: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ec_pubkey_negate")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ec_pubkey_negate")] pub fn secp256k1_ec_pubkey_negate(cx: *const Context, pk: *mut PublicKey) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ec_pubkey_cmp")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ec_pubkey_cmp")] pub fn secp256k1_ec_pubkey_cmp(cx: *const Context, pubkey1: *const PublicKey, pubkey2: *const PublicKey) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ec_pubkey_tweak_add")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ec_pubkey_tweak_add")] pub fn secp256k1_ec_pubkey_tweak_add(cx: *const Context, pk: *mut PublicKey, tweak: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ec_pubkey_tweak_mul")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ec_pubkey_tweak_mul")] pub fn secp256k1_ec_pubkey_tweak_mul(cx: *const Context, pk: *mut PublicKey, tweak: *const c_uchar) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ec_pubkey_combine")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ec_pubkey_combine")] pub fn secp256k1_ec_pubkey_combine(cx: *const Context, out: *mut PublicKey, ins: *const *const PublicKey, n: c_int) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ecdh")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ecdh")] pub fn secp256k1_ecdh( cx: *const Context, output: *mut c_uchar, @@ -657,14 +657,14 @@ extern "C" { ) -> c_int; // ECDSA - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ecdsa_verify")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ecdsa_verify")] pub fn secp256k1_ecdsa_verify(cx: *const Context, sig: *const Signature, msg32: *const c_uchar, pk: *const PublicKey) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ecdsa_sign")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ecdsa_sign")] pub fn secp256k1_ecdsa_sign(cx: *const Context, sig: *mut Signature, msg32: *const c_uchar, @@ -674,7 +674,7 @@ extern "C" { -> c_int; // Schnorr Signatures - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_schnorrsig_sign")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_schnorrsig_sign")] pub fn secp256k1_schnorrsig_sign( cx: *const Context, sig: *mut c_uchar, @@ -684,7 +684,7 @@ extern "C" { ) -> c_int; // Schnorr Signatures with extra parameters (see [`SchnorrSigExtraParams`]) - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_schnorrsig_sign_custom")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_schnorrsig_sign_custom")] pub fn secp256k1_schnorrsig_sign_custom( cx: *const Context, sig: *mut c_uchar, @@ -694,7 +694,7 @@ extern "C" { extra_params: *const SchnorrSigExtraParams, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_schnorrsig_verify")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_schnorrsig_verify")] pub fn secp256k1_schnorrsig_verify( cx: *const Context, sig64: *const c_uchar, @@ -704,28 +704,28 @@ extern "C" { ) -> c_int; // Extra keys - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_keypair_create")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_keypair_create")] pub fn secp256k1_keypair_create( cx: *const Context, keypair: *mut KeyPair, seckey: *const c_uchar, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_xonly_pubkey_parse")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_xonly_pubkey_parse")] pub fn secp256k1_xonly_pubkey_parse( cx: *const Context, pubkey: *mut XOnlyPublicKey, input32: *const c_uchar, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_xonly_pubkey_serialize")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_xonly_pubkey_serialize")] pub fn secp256k1_xonly_pubkey_serialize( cx: *const Context, output32: *mut c_uchar, pubkey: *const XOnlyPublicKey, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_xonly_pubkey_from_pubkey")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_xonly_pubkey_from_pubkey")] pub fn secp256k1_xonly_pubkey_from_pubkey( cx: *const Context, xonly_pubkey: *mut XOnlyPublicKey, @@ -733,14 +733,14 @@ extern "C" { pubkey: *const PublicKey, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_xonly_pubkey_cmp")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_xonly_pubkey_cmp")] pub fn secp256k1_xonly_pubkey_cmp( cx: *const Context, pubkey1: *const XOnlyPublicKey, pubkey2: *const XOnlyPublicKey ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add")] pub fn secp256k1_xonly_pubkey_tweak_add( cx: *const Context, output_pubkey: *mut PublicKey, @@ -748,7 +748,7 @@ extern "C" { tweak32: *const c_uchar, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_keypair_xonly_pub")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_keypair_xonly_pub")] pub fn secp256k1_keypair_xonly_pub( cx: *const Context, pubkey: *mut XOnlyPublicKey, @@ -756,14 +756,14 @@ extern "C" { keypair: *const KeyPair ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_keypair_xonly_tweak_add")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_keypair_xonly_tweak_add")] pub fn secp256k1_keypair_xonly_tweak_add( cx: *const Context, keypair: *mut KeyPair, tweak32: *const c_uchar, ) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_xonly_pubkey_tweak_add_check")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_xonly_pubkey_tweak_add_check")] pub fn secp256k1_xonly_pubkey_tweak_add_check( cx: *const Context, tweaked_pubkey32: *const c_uchar, @@ -791,7 +791,7 @@ extern "C" { #[cfg(all(feature = "alloc", not(rust_secp_no_symbol_renaming)))] #[cfg_attr(docsrs, doc(cfg(all(feature = "alloc", not(rust_secp_no_symbol_renaming)))))] pub unsafe fn secp256k1_context_create(flags: c_uint) -> NonNull { - rustsecp256k1_v0_7_0_context_create(flags) + rustsecp256k1_v0_8_0_context_create(flags) } /// A reimplementation of the C function `secp256k1_context_create` in rust. @@ -801,7 +801,7 @@ pub unsafe fn secp256k1_context_create(flags: c_uint) -> NonNull { #[allow(clippy::missing_safety_doc)] // Documented above. #[cfg(all(feature = "alloc", not(rust_secp_no_symbol_renaming)))] #[cfg_attr(docsrs, doc(cfg(all(feature = "alloc", not(rust_secp_no_symbol_renaming)))))] -pub unsafe extern "C" fn rustsecp256k1_v0_7_0_context_create(flags: c_uint) -> NonNull { +pub unsafe extern "C" fn rustsecp256k1_v0_8_0_context_create(flags: c_uint) -> NonNull { use core::mem; use crate::alloc::alloc; assert!(ALIGN_TO >= mem::align_of::()); @@ -835,14 +835,14 @@ pub unsafe extern "C" fn rustsecp256k1_v0_7_0_context_create(flags: c_uint) -> N #[cfg(all(feature = "alloc", not(rust_secp_no_symbol_renaming)))] #[cfg_attr(docsrs, doc(cfg(all(feature = "alloc", not(rust_secp_no_symbol_renaming)))))] pub unsafe fn secp256k1_context_destroy(ctx: NonNull) { - rustsecp256k1_v0_7_0_context_destroy(ctx) + rustsecp256k1_v0_8_0_context_destroy(ctx) } #[no_mangle] #[allow(clippy::missing_safety_doc)] // Documented above. #[cfg(all(feature = "alloc", not(rust_secp_no_symbol_renaming)))] #[cfg_attr(docsrs, doc(cfg(all(feature = "alloc", not(rust_secp_no_symbol_renaming)))))] -pub unsafe extern "C" fn rustsecp256k1_v0_7_0_context_destroy(mut ctx: NonNull) { +pub unsafe extern "C" fn rustsecp256k1_v0_8_0_context_destroy(mut ctx: NonNull) { use crate::alloc::alloc; secp256k1_context_preallocated_destroy(ctx); let ctx: *mut Context = ctx.as_mut(); @@ -878,7 +878,7 @@ pub unsafe extern "C" fn rustsecp256k1_v0_7_0_context_destroy(mut ctx: NonNull size_t; - fn rustsecp256k1_v0_7_0_context_preallocated_create(prealloc: NonNull, flags: c_uint) -> NonNull; - fn rustsecp256k1_v0_7_0_context_preallocated_clone(cx: *const Context, prealloc: NonNull) -> NonNull; + fn rustsecp256k1_v0_8_0_context_preallocated_size(flags: c_uint) -> size_t; + fn rustsecp256k1_v0_8_0_context_preallocated_create(prealloc: NonNull, flags: c_uint) -> NonNull; + fn rustsecp256k1_v0_8_0_context_preallocated_clone(cx: *const Context, prealloc: NonNull) -> NonNull; } #[cfg(feature = "lowmemory")] @@ -979,7 +979,7 @@ mod fuzz_dummy { const CTX_SIZE: usize = 1024 * (1024 + 128); // Contexts pub unsafe fn secp256k1_context_preallocated_size(flags: c_uint) -> size_t { - assert!(rustsecp256k1_v0_7_0_context_preallocated_size(flags) + std::mem::size_of::() <= CTX_SIZE); + assert!(rustsecp256k1_v0_8_0_context_preallocated_size(flags) + std::mem::size_of::() <= CTX_SIZE); CTX_SIZE } @@ -999,8 +999,8 @@ mod fuzz_dummy { if have_ctx == HAVE_CONTEXT_NONE { have_ctx = HAVE_PREALLOCATED_CONTEXT.swap(HAVE_CONTEXT_WORKING, Ordering::AcqRel); if have_ctx == HAVE_CONTEXT_NONE { - assert!(rustsecp256k1_v0_7_0_context_preallocated_size(SECP256K1_START_SIGN | SECP256K1_START_VERIFY) + std::mem::size_of::() <= CTX_SIZE); - assert_eq!(rustsecp256k1_v0_7_0_context_preallocated_create( + assert!(rustsecp256k1_v0_8_0_context_preallocated_size(SECP256K1_START_SIGN | SECP256K1_START_VERIFY) + std::mem::size_of::() <= CTX_SIZE); + assert_eq!(rustsecp256k1_v0_8_0_context_preallocated_create( NonNull::new_unchecked(PREALLOCATED_CONTEXT[..].as_mut_ptr() as *mut c_void), SECP256K1_START_SIGN | SECP256K1_START_VERIFY), NonNull::new_unchecked(PREALLOCATED_CONTEXT[..].as_mut_ptr() as *mut Context)); @@ -1029,7 +1029,7 @@ mod fuzz_dummy { let new_ptr = (prealloc.as_ptr() as *mut u8).add(CTX_SIZE).sub(std::mem::size_of::()); let flags = (orig_ptr as *mut c_uint).read(); (new_ptr as *mut c_uint).write(flags); - rustsecp256k1_v0_7_0_context_preallocated_clone(cx, prealloc) + rustsecp256k1_v0_8_0_context_preallocated_clone(cx, prealloc) } pub unsafe fn secp256k1_context_randomize(cx: NonNull, diff --git a/secp256k1-sys/src/recovery.rs b/secp256k1-sys/src/recovery.rs index 9e30372a4..b3c8f568c 100644 --- a/secp256k1-sys/src/recovery.rs +++ b/secp256k1-sys/src/recovery.rs @@ -112,17 +112,17 @@ impl core::hash::Hash for RecoverableSignature { } extern "C" { - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_parse_compact")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_parse_compact")] pub fn secp256k1_ecdsa_recoverable_signature_parse_compact(cx: *const Context, sig: *mut RecoverableSignature, input64: *const c_uchar, recid: c_int) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_serialize_compact")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_serialize_compact")] pub fn secp256k1_ecdsa_recoverable_signature_serialize_compact(cx: *const Context, output64: *mut c_uchar, recid: *mut c_int, sig: *const RecoverableSignature) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ecdsa_recoverable_signature_convert")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ecdsa_recoverable_signature_convert")] pub fn secp256k1_ecdsa_recoverable_signature_convert(cx: *const Context, sig: *mut Signature, input: *const RecoverableSignature) -> c_int; @@ -130,7 +130,7 @@ extern "C" { #[cfg(not(fuzzing))] extern "C" { - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ecdsa_sign_recoverable")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ecdsa_sign_recoverable")] pub fn secp256k1_ecdsa_sign_recoverable(cx: *const Context, sig: *mut RecoverableSignature, msg32: *const c_uchar, @@ -139,7 +139,7 @@ extern "C" { noncedata: *const c_void) -> c_int; - #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_7_0_ecdsa_recover")] + #[cfg_attr(not(rust_secp_no_symbol_renaming), link_name = "rustsecp256k1_v0_8_0_ecdsa_recover")] pub fn secp256k1_ecdsa_recover(cx: *const Context, pk: *mut PublicKey, sig: *const RecoverableSignature,