From f460ae149b0000695205cc78f560d74a2d3918eb Mon Sep 17 00:00:00 2001 From: Michael Rosenberg Date: Tue, 28 Mar 2023 18:12:24 -0400 Subject: [PATCH] Make scalars always reduced (#519) * Removed Scalar::{from_bits, from_bytes_clamped}; all constructible scalars are now reduced mod l * Made Scalar::reduce() not pub; fixed test warning * Added benches for scalar add/sub/mul * Docs * Added EdwardsPoint::mul_base_clamped and gated Scalar::from_bits behind legacy_compatibility * Added unit test for Mul impl on unreduced Scalars * Added Montgomery::mul_base_clamped * Added BasepointTable::mul_base_clamped * Removed invalid scalar arithmetic test; this functionality is no longer supported * Made clamp_integer() const * Updated readme and changelog * Added BasepointTable::mul_base_clamped to tests * Added proper deprecation notice to Scalar::from_bits; added legacy_compatibility to Makefile and docsrs flags --- CHANGELOG.md | 3 + Cargo.toml | 3 +- Makefile | 2 +- README.md | 3 + benches/dalek_benchmarks.rs | 27 +- .../serial/scalar_mul/variable_base.rs | 1 + src/backend/serial/u32/scalar.rs | 3 +- src/edwards.rs | 106 ++++- src/montgomery.rs | 78 +++- src/scalar.rs | 439 ++++++++---------- src/traits.rs | 14 +- 11 files changed, 402 insertions(+), 277 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a5c9bff0a..4e24730ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,9 +24,12 @@ major series. whenever using `EdwardsBasepointTable` or `RistrettoBasepointTable` * `Scalar::from_canonical_bytes` now returns `CtOption` * `Scalar::is_canonical` now returns `Choice` +* Remove `Scalar::from_bytes_clamped` and `Scalar::reduce` +* Deprecate and feature-gate `Scalar::from_bits` behind `legacy_compatibility` #### Other changes +* Add `EdwardsPoint::{mul_base, mul_base_clamped}`, `MontgomeryPoint::{mul_base, mul_base_clamped}`, and `BasepointTable::mul_base_clamped` * Add `precomputed-tables` feature * Update Maintenance Policies for SemVer * Migrate documentation to docs.rs hosted diff --git a/Cargo.toml b/Cargo.toml index 9b4626c3c..dde08ecd3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ rustdoc-args = [ "--cfg", "docsrs", ] rustc-args = ["--cfg", "curve25519_dalek_backend=\"simd\""] -features = ["serde", "rand_core", "digest"] +features = ["serde", "rand_core", "digest", "legacy_compatibility"] [dev-dependencies] sha2 = { version = "0.10", default-features = false } @@ -66,6 +66,7 @@ packed_simd = { version = "0.3.8", package = "packed_simd_2", features = ["into_ default = ["alloc", "precomputed-tables", "zeroize"] alloc = ["zeroize?/alloc"] precomputed-tables = [] +legacy_compatibility = [] [profile.dev] opt-level = 2 diff --git a/Makefile b/Makefile index b263cf753..3b41b1756 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -FEATURES := serde rand_core digest +FEATURES := serde rand_core digest legacy_compatibility export RUSTFLAGS := --cfg=curve25519_dalek_backend="simd" export RUSTDOCFLAGS := \ diff --git a/README.md b/README.md index ecd00d6b5..574e6f37d 100644 --- a/README.md +++ b/README.md @@ -56,6 +56,7 @@ curve25519-dalek = "4.0.0-rc.2" | `rand_core` | | Enables `Scalar::random` and `RistrettoPoint::random`. This is an optional dependency whose version is not subject to SemVer. See [below](#public-api-semver-exemptions) for more details. | | `digest` | | Enables `RistrettoPoint::{from_hash, hash_from_bytes}` and `Scalar::{from_hash, hash_from_bytes}`. This is an optional dependency whose version is not subject to SemVer. See [below](#public-api-semver-exemptions) for more details. | | `serde` | | Enables `serde` serialization/deserialization for all the point and scalar types. | +| `legacy_compatibility`| | Enables `Scalar::from_bits`, which allows the user to build unreduced scalars whose arithmetic is broken. Do not use this unless you know what you're doing. | To disable the default features when using `curve25519-dalek` as a dependency, add `default-features = false` to the dependency in your `Cargo.toml`. To @@ -77,6 +78,8 @@ latest breaking changes in high level are below: * Replace methods `Scalar::{zero, one}` with constants `Scalar::{ZERO, ONE}` * `Scalar::from_canonical_bytes` now returns `CtOption` * `Scalar::is_canonical` now returns `Choice` +* Remove `Scalar::from_bytes_clamped` and `Scalar::reduce` +* Deprecate and feature-gate `Scalar::from_bits` behind `legacy_compatibility` * Deprecate `EdwardsPoint::hash_from_bytes` and rename it `EdwardsPoint::nonspec_map_to_curve` * Require including a new trait, `use curve25519_dalek::traits::BasepointTable` diff --git a/benches/dalek_benchmarks.rs b/benches/dalek_benchmarks.rs index aa16d0f09..9148faafe 100644 --- a/benches/dalek_benchmarks.rs +++ b/benches/dalek_benchmarks.rs @@ -300,11 +300,34 @@ mod montgomery_benches { mod scalar_benches { use super::*; - fn scalar_inversion(c: &mut BenchmarkGroup) { + fn scalar_arith(c: &mut BenchmarkGroup) { + let mut rng = thread_rng(); + c.bench_function("Scalar inversion", |b| { let s = Scalar::from(897987897u64).invert(); b.iter(|| s.invert()); }); + c.bench_function("Scalar addition", |b| { + b.iter_batched( + || (Scalar::random(&mut rng), Scalar::random(&mut rng)), + |(a, b)| a + b, + BatchSize::SmallInput, + ); + }); + c.bench_function("Scalar subtraction", |b| { + b.iter_batched( + || (Scalar::random(&mut rng), Scalar::random(&mut rng)), + |(a, b)| a - b, + BatchSize::SmallInput, + ); + }); + c.bench_function("Scalar multiplication", |b| { + b.iter_batched( + || (Scalar::random(&mut rng), Scalar::random(&mut rng)), + |(a, b)| a * b, + BatchSize::SmallInput, + ); + }); } fn batch_scalar_inversion(c: &mut BenchmarkGroup) { @@ -329,7 +352,7 @@ mod scalar_benches { let mut c = Criterion::default(); let mut g = c.benchmark_group("scalar benches"); - scalar_inversion(&mut g); + scalar_arith(&mut g); batch_scalar_inversion(&mut g); } } diff --git a/src/backend/serial/scalar_mul/variable_base.rs b/src/backend/serial/scalar_mul/variable_base.rs index 513904137..1de84bc4d 100644 --- a/src/backend/serial/scalar_mul/variable_base.rs +++ b/src/backend/serial/scalar_mul/variable_base.rs @@ -16,6 +16,7 @@ pub(crate) fn mul(point: &EdwardsPoint, scalar: &Scalar) -> EdwardsPoint { // s = s_0 + s_1*16^1 + ... + s_63*16^63, // // with `-8 ≤ s_i < 8` for `0 ≤ i < 63` and `-8 ≤ s_63 ≤ 8`. + // This decomposition requires s < 2^255, which is guaranteed by Scalar invariant #1. let scalar_digits = scalar.as_radix_16(); // Compute s*P as // diff --git a/src/backend/serial/u32/scalar.rs b/src/backend/serial/u32/scalar.rs index 2703078a1..8ae126b1e 100644 --- a/src/backend/serial/u32/scalar.rs +++ b/src/backend/serial/u32/scalar.rs @@ -18,7 +18,8 @@ use zeroize::Zeroize; use crate::constants; -/// The `Scalar29` struct represents an element in ℤ/lℤ as 9 29-bit limbs +/// The `Scalar29` struct represents an element in \\(\mathbb{Z} / \ell\mathbb{Z}\\) as 9 29-bit +/// limbs #[derive(Copy, Clone)] pub struct Scalar29(pub [u32; 9]); diff --git a/src/edwards.rs b/src/edwards.rs index 29c936126..fae296f66 100644 --- a/src/edwards.rs +++ b/src/edwards.rs @@ -44,8 +44,8 @@ //! //! ## Scalars //! -//! Scalars are represented by the [`Scalar`] struct. To construct a scalar with a specific bit -//! pattern, see [`Scalar::from_bits`]. +//! Scalars are represented by the [`Scalar`] struct. To construct a scalar, see +//! [`Scalar::from_canonical_bytes`] or [`Scalar::from_bytes_mod_order_wide`]. //! //! ## Scalar Multiplication //! @@ -118,7 +118,7 @@ use zeroize::Zeroize; use crate::constants; use crate::field::FieldElement; -use crate::scalar::Scalar; +use crate::scalar::{clamp_integer, Scalar}; use crate::montgomery::MontgomeryPoint; @@ -728,6 +728,34 @@ impl EdwardsPoint { scalar * constants::ED25519_BASEPOINT_TABLE } } + + /// Multiply this point by `clamp_integer(bytes)`. For a description of clamping, see + /// [`clamp_integer`]. + pub fn mul_clamped(self, bytes: [u8; 32]) -> Self { + // We have to construct a Scalar that is not reduced mod l, which breaks scalar invariant + // #2. But #2 is not necessary for correctness of variable-base multiplication. All that + // needs to hold is invariant #1, i.e., the scalar is less than 2^255. This is guaranteed + // by clamping. + // Further, we don't do any reduction or arithmetic with this clamped value, so there's no + // issues arising from the fact that the curve point is not necessarily in the prime-order + // subgroup. + let s = Scalar { + bytes: clamp_integer(bytes), + }; + s * self + } + + /// Multiply the basepoint by `clamp_integer(bytes)`. For a description of clamping, see + /// [`clamp_integer`]. + pub fn mul_base_clamped(bytes: [u8; 32]) -> Self { + // See reasoning in Self::mul_clamped why it is OK to make an unreduced Scalar here. We + // note that fixed-base multiplication is also defined for all values of `bytes` less than + // 2^255. + let s = Scalar { + bytes: clamp_integer(bytes), + }; + Self::mul_base(&s) + } } // ------------------------------------------------------------------------ @@ -875,7 +903,7 @@ macro_rules! impl_basepoint_table { /// /// Normally, the radix-256 tables would allow for only 32 additions per scalar /// multiplication. However, due to the fact that standardised definitions of - /// legacy protocols—such as x25519—require allowing unreduced 255-bit scalar + /// legacy protocols—such as x25519—require allowing unreduced 255-bit scalars /// invariants, when converting such an unreduced scalar's representation to /// radix-\\(2^{8}\\), we cannot guarantee the carry bit will fit in the last /// coefficient (the coefficients are `i8`s). When, \\(w\\), the power-of-2 of @@ -1224,8 +1252,7 @@ impl Debug for EdwardsPoint { #[cfg(test)] mod test { use super::*; - use crate::field::FieldElement; - use crate::scalar::Scalar; + use crate::{field::FieldElement, scalar::Scalar}; use subtle::ConditionallySelectable; #[cfg(feature = "alloc")] @@ -1234,6 +1261,8 @@ mod test { #[cfg(feature = "precomputed-tables")] use crate::constants::ED25519_BASEPOINT_TABLE; + use rand_core::RngCore; + /// X coordinate of the basepoint. /// = 15112221349535400772501151409588531511454012693041857206046113283949847762202 static BASE_X_COORD_BYTES: [u8; 32] = [ @@ -1465,16 +1494,13 @@ mod test { assert_eq!(aP128, aP256); } - /// Check a unreduced scalar multiplication by the basepoint tables. + /// Check unreduced scalar multiplication by the basepoint tables is the same no matter what + /// radix the table is. #[cfg(feature = "precomputed-tables")] #[test] fn basepoint_tables_unreduced_scalar() { let P = &constants::ED25519_BASEPOINT_POINT; - let a = Scalar::from_bits([ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - ]); + let a = crate::scalar::test::LARGEST_UNREDUCED_SCALAR; let table_radix16 = EdwardsBasepointTableRadix16::create(P); let table_radix32 = EdwardsBasepointTableRadix32::create(P); @@ -1515,6 +1541,55 @@ mod test { assert_eq!(bp16.compress(), BASE16_CMPRSSD); } + /// Check that mul_base_clamped and mul_clamped agree + #[test] + fn mul_base_clamped() { + let mut csprng = rand_core::OsRng; + + // Make a random curve point in the curve. Give it torsion to make things interesting. + #[cfg(feature = "precomputed-tables")] + let random_point = { + let mut b = [0u8; 32]; + csprng.fill_bytes(&mut b); + EdwardsPoint::mul_base_clamped(b) + constants::EIGHT_TORSION[1] + }; + // Make a basepoint table from the random point. We'll use this with mul_base_clamped + #[cfg(feature = "precomputed-tables")] + let random_table = EdwardsBasepointTableRadix256::create(&random_point); + + // Now test scalar mult. agreement on the default basepoint as well as random_point + + // Test that mul_base_clamped and mul_clamped agree on a large integer. Even after + // clamping, this integer is not reduced mod l. + let a_bytes = [0xff; 32]; + assert_eq!( + EdwardsPoint::mul_base_clamped(a_bytes), + constants::ED25519_BASEPOINT_POINT.mul_clamped(a_bytes) + ); + #[cfg(feature = "precomputed-tables")] + assert_eq!( + random_table.mul_base_clamped(a_bytes), + random_point.mul_clamped(a_bytes) + ); + + // Test agreement on random integers + for _ in 0..100 { + // This will be reduced mod l with probability l / 2^256 ≈ 6.25% + let mut a_bytes = [0u8; 32]; + csprng.fill_bytes(&mut a_bytes); + + assert_eq!( + EdwardsPoint::mul_base_clamped(a_bytes), + constants::ED25519_BASEPOINT_POINT.mul_clamped(a_bytes) + ); + #[cfg(feature = "precomputed-tables")] + assert_eq!( + random_table.mul_base_clamped(a_bytes), + random_point.mul_clamped(a_bytes) + ); + } + } + #[test] #[cfg(feature = "alloc")] fn impl_sum() { @@ -1617,16 +1692,11 @@ mod test { // A single iteration of a consistency check for MSM. #[cfg(feature = "alloc")] fn multiscalar_consistency_iter(n: usize) { - use core::iter; let mut rng = rand::thread_rng(); // Construct random coefficients x0, ..., x_{n-1}, // followed by some extra hardcoded ones. - let xs = (0..n) - .map(|_| Scalar::random(&mut rng)) - // The largest scalar allowed by the type system, 2^255-1 - .chain(iter::once(Scalar::from_bits([0xff; 32]))) - .collect::>(); + let xs = (0..n).map(|_| Scalar::random(&mut rng)).collect::>(); let check = xs.iter().map(|xi| xi * xi).sum::(); // Construct points G_i = x_i * B diff --git a/src/montgomery.rs b/src/montgomery.rs index 7bb4a294f..5f4033487 100644 --- a/src/montgomery.rs +++ b/src/montgomery.rs @@ -57,7 +57,7 @@ use core::{ use crate::constants::{APLUS2_OVER_FOUR, MONTGOMERY_A, MONTGOMERY_A_NEG}; use crate::edwards::{CompressedEdwardsY, EdwardsPoint}; use crate::field::FieldElement; -use crate::scalar::Scalar; +use crate::scalar::{clamp_integer, Scalar}; use crate::traits::Identity; @@ -123,6 +123,34 @@ impl MontgomeryPoint { EdwardsPoint::mul_base(scalar).to_montgomery() } + /// Multiply this point by `clamp_integer(bytes)`. For a description of clamping, see + /// [`clamp_integer`]. + pub fn mul_clamped(self, bytes: [u8; 32]) -> Self { + // We have to construct a Scalar that is not reduced mod l, which breaks scalar invariant + // #2. But #2 is not necessary for correctness of variable-base multiplication. All that + // needs to hold is invariant #1, i.e., the scalar is less than 2^255. This is guaranteed + // by clamping. + // Further, we don't do any reduction or arithmetic with this clamped value, so there's no + // issues arising from the fact that the curve point is not necessarily in the prime-order + // subgroup. + let s = Scalar { + bytes: clamp_integer(bytes), + }; + s * self + } + + /// Multiply the basepoint by `clamp_integer(bytes)`. For a description of clamping, see + /// [`clamp_integer`]. + pub fn mul_base_clamped(bytes: [u8; 32]) -> Self { + // See reasoning in Self::mul_clamped why it is OK to make an unreduced Scalar here. We + // note that fixed-base multiplication is also defined for all values of `bytes` less than + // 2^255. + let s = Scalar { + bytes: clamp_integer(bytes), + }; + Self::mul_base(&s) + } + /// View this `MontgomeryPoint` as an array of bytes. pub const fn as_bytes(&self) -> &[u8; 32] { &self.0 @@ -342,6 +370,9 @@ impl<'a, 'b> Mul<&'b Scalar> for &'a MontgomeryPoint { W: FieldElement::ONE, }; + // NOTE: The below swap-double-add routine skips the first iteration, i.e., it assumes the + // MSB of `scalar` is 0. This is allowed, since it follows from Scalar invariant #1. + // Go through the bits from most to least significant, using a sliding window of 2 let mut bits = scalar.bits_le().rev(); let mut prev_bit = bits.next().unwrap(); @@ -391,8 +422,7 @@ mod test { #[cfg(feature = "alloc")] use alloc::vec::Vec; - #[cfg(feature = "rand_core")] - use rand_core::OsRng; + use rand_core::RngCore; #[test] fn identity_in_different_coordinates() { @@ -476,18 +506,44 @@ mod test { } #[test] - #[cfg(feature = "rand_core")] fn montgomery_ladder_matches_edwards_scalarmult() { - let mut csprng: OsRng = OsRng; + let mut csprng = rand_core::OsRng; - let s: Scalar = Scalar::random(&mut csprng); - let p_edwards = EdwardsPoint::mul_base(&s); - let p_montgomery: MontgomeryPoint = p_edwards.to_montgomery(); + for _ in 0..100 { + let s: Scalar = Scalar::random(&mut csprng); + let p_edwards = EdwardsPoint::mul_base(&s); + let p_montgomery: MontgomeryPoint = p_edwards.to_montgomery(); - let expected = s * p_edwards; - let result = s * p_montgomery; + let expected = s * p_edwards; + let result = s * p_montgomery; - assert_eq!(result, expected.to_montgomery()) + assert_eq!(result, expected.to_montgomery()) + } + } + + /// Check that mul_base_clamped and mul_clamped agree + #[test] + fn mul_base_clamped() { + let mut csprng = rand_core::OsRng; + + // Test agreement on a large integer. Even after clamping, this is not reduced mod l. + let a_bytes = [0xff; 32]; + assert_eq!( + MontgomeryPoint::mul_base_clamped(a_bytes), + constants::X25519_BASEPOINT.mul_clamped(a_bytes) + ); + + // Test agreement on random integers + for _ in 0..100 { + // This will be reduced mod l with probability l / 2^256 ≈ 6.25% + let mut a_bytes = [0u8; 32]; + csprng.fill_bytes(&mut a_bytes); + + assert_eq!( + MontgomeryPoint::mul_base_clamped(a_bytes), + constants::X25519_BASEPOINT.mul_clamped(a_bytes) + ); + } } #[cfg(feature = "alloc")] diff --git a/src/scalar.rs b/src/scalar.rs index f76d97962..829f56021 100644 --- a/src/scalar.rs +++ b/src/scalar.rs @@ -110,34 +110,6 @@ //! See also `Scalar::hash_from_bytes` and `Scalar::from_hash` that //! reduces a \\(512\\)-bit integer, if the optional `digest` feature //! has been enabled. -//! -//! Finally, to create a `Scalar` with a specific bit-pattern -//! (e.g., for compatibility with X/Ed25519 -//! ["clamping"](https://github.com/isislovecruft/ed25519-dalek/blob/f790bd2ce/src/ed25519.rs#L349)), -//! use [`Scalar::from_bits`]. This constructs a scalar with exactly -//! the bit pattern given, without any assurances as to reduction -//! modulo the group order: -//! -//! ``` -//! use curve25519_dalek::scalar::Scalar; -//! -//! let l_plus_two_bytes: [u8; 32] = [ -//! 0xef, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, -//! 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14, -//! 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -//! 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, -//! ]; -//! let a: Scalar = Scalar::from_bits(l_plus_two_bytes); -//! -//! let two: Scalar = Scalar::ONE + Scalar::ONE; -//! -//! assert!(a != two); // the scalar is not reduced (mod l)… -//! assert!(! bool::from(a.is_canonical())); // …and therefore is not canonical. -//! assert!(a.reduce() == two); // if we were to reduce it manually, it would be. -//! ``` -//! -//! The resulting `Scalar` has exactly the specified bit pattern, -//! **except for the highest bit, which will be set to 0**. use core::borrow::Borrow; use core::cmp::{Eq, PartialEq}; @@ -211,24 +183,45 @@ cfg_if! { } } -/// The `Scalar` struct holds an integer \\(s < 2\^{255} \\) which -/// represents an element of \\(\mathbb Z / \ell\\). +/// The `Scalar` struct holds an element of \\(\mathbb Z / \ell\mathbb Z \\). #[allow(clippy::derive_hash_xor_eq)] #[derive(Copy, Clone, Hash)] pub struct Scalar { /// `bytes` is a little-endian byte encoding of an integer representing a scalar modulo the /// group order. /// - /// # Invariant + /// # Invariant #1 /// - /// The integer representing this scalar must be bounded above by \\(2\^{255}\\), or - /// equivalently the high bit of `bytes[31]` must be zero. + /// The integer representing this scalar is less than \\(2\^{255}\\). That is, the most + /// significant bit of `bytes[31]` is 0. + /// + /// This is required for `EdwardsPoint` variable- and fixed-base multiplication, because most + /// integers above 2^255 are unrepresentable in our radix-16 NAF (see [`Self::as_radix_16`]). + /// The invariant is also required because our `MontgomeryPoint` multiplication assumes the MSB + /// is 0 (see `MontgomeryPoint::mul`). + /// + /// # Invariant #2 (weak) + /// + /// The integer representing this scalar is less than \\(2\^{255} - 19 \\), i.e., it represents + /// a canonical representative of an element of \\( \mathbb Z / \ell\mathbb Z \\). This is + /// stronger than invariant #1. It also sometimes has to be broken. + /// + /// This invariant is deliberately broken in the implementation of `EdwardsPoint::{mul_clamped, + /// mul_base_clamped}`, `MontgomeryPoint::{mul_clamped, mul_base_clamped}`, and + /// `BasepointTable::mul_base_clamped`. This is not an issue though. As mentioned above, + /// scalar-point multiplication is defined for any choice of `bytes` that satisfies invariant + /// #1. Since clamping guarantees invariant #1 is satisfied, these operations are well defined. + /// + /// Note: Scalar-point mult is the _only_ thing you can do safely with an unreduced scalar. + /// Scalar-scalar addition and subtraction are NOT correct when using unreduced scalars. + /// Multiplication is correct, but this is only due to a quirk of our implementation, and not + /// guaranteed to hold in general in the future. + /// + /// Note: It is not possible to construct an unreduced `Scalar` from the public API unless the + /// `legacy_compatibility` is enabled (thus making `Scalar::from_bits` public). Thus, for all + /// public non-legacy uses, invariant #2 + /// always holds. /// - /// This ensures that there is room for a carry bit when computing a NAF representation. - // - // XXX This is pub(crate) so we can write literal constants. - // Alternatively we could make the Scalar constructors `const fn`s and use those instead. - // See dalek-cryptography/curve25519-dalek#493 pub(crate) bytes: [u8; 32], } @@ -257,51 +250,29 @@ impl Scalar { /// # Return /// /// - `Some(s)`, where `s` is the `Scalar` corresponding to `bytes`, - /// if `bytes` is a canonical byte representation; + /// if `bytes` is a canonical byte representation modulo the group order \\( \ell \\); /// - `None` if `bytes` is not a canonical byte representation. pub fn from_canonical_bytes(bytes: [u8; 32]) -> CtOption { let high_bit_unset = (bytes[31] >> 7).ct_eq(&0); - let candidate = Scalar::from_bits(bytes); + let candidate = Scalar { bytes }; CtOption::new(candidate, high_bit_unset & candidate.is_canonical()) } - /// Construct a `Scalar` from the low 255 bits of a 256-bit integer. - /// - /// This function is intended for applications like X25519 which - /// require specific bit-patterns when performing scalar - /// multiplication. + /// Construct a `Scalar` from the low 255 bits of a 256-bit integer. This breaks the invariant + /// that scalars are always reduced. Scalar-scalar arithmetic, i.e., addition, subtraction, + /// multiplication, **does not work** on scalars produced from this function. You may only use + /// the output of this function for `EdwardsPoint::mul`, `MontgomeryPoint::mul`, and + /// `EdwardsPoint::vartime_double_scalar_mul_basepoint`. **Do not use this function** unless + /// you absolutely have to. + #[cfg(feature = "legacy_compatibility")] + #[deprecated( + since = "4.0.0", + note = "This constructor outputs scalars with undefined scalar-scalar arithmetic. See docs." + )] pub const fn from_bits(bytes: [u8; 32]) -> Scalar { let mut s = Scalar { bytes }; - // Ensure that s < 2^255 by masking the high bit - s.bytes[31] &= 0b0111_1111; - - s - } - - /// Construct a `Scalar` from the low 255 bits of a little-endian 256-bit integer - /// `clamping` it's value to be in range - /// - /// **n ∈ 2^254 + 8\*{0, 1, 2, 3, . . ., 2^251 − 1}** - /// - /// # Explanation of `clamping` - /// - /// For Curve25519, h = 8, and multiplying by 8 is the same as a binary left-shift by 3 bits. - /// If you take a secret scalar value between 2^251 and 2^252 – 1 and left-shift by 3 bits - /// then you end up with a 255-bit number with the most significant bit set to 1 and - /// the least-significant three bits set to 0. - /// - /// The Curve25519 clamping operation takes **an arbitrary 256-bit random value** and - /// clears the most-significant bit (making it a 255-bit number), sets the next bit, and then - /// clears the 3 least-significant bits. In other words, it directly creates a scalar value that is - /// in the right form and pre-multiplied by the cofactor. - /// - /// See for details - pub const fn from_bits_clamped(bytes: [u8; 32]) -> Scalar { - let mut s = Scalar { bytes }; - - s.bytes[0] &= 0b1111_1000; + // Ensure invariant #1 holds. That is, make s < 2^255 by masking the high bit. s.bytes[31] &= 0b0111_1111; - s.bytes[31] |= 0b0100_0000; s } @@ -364,15 +335,9 @@ impl<'a, 'b> Add<&'b Scalar> for &'a Scalar { type Output = Scalar; #[allow(non_snake_case)] fn add(self, _rhs: &'b Scalar) -> Scalar { - // The UnpackedScalar::add function produces reduced outputs - // if the inputs are reduced. However, these inputs may not - // be reduced -- they might come from Scalar::from_bits. So - // after computing the sum, we explicitly reduce it mod l - // before repacking. - let sum = UnpackedScalar::add(&self.unpack(), &_rhs.unpack()); - let sum_R = UnpackedScalar::mul_internal(&sum, &constants::R); - let sum_mod_l = UnpackedScalar::montgomery_reduce(&sum_R); - sum_mod_l.pack() + // The UnpackedScalar::add function produces reduced outputs if the inputs are reduced. By + // Scalar invariant #1, this is always the case. + UnpackedScalar::add(&self.unpack(), &_rhs.unpack()).pack() } } @@ -390,16 +355,9 @@ impl<'a, 'b> Sub<&'b Scalar> for &'a Scalar { type Output = Scalar; #[allow(non_snake_case)] fn sub(self, rhs: &'b Scalar) -> Scalar { - // The UnpackedScalar::sub function requires reduced inputs - // and produces reduced output. However, these inputs may not - // be reduced -- they might come from Scalar::from_bits. So - // we explicitly reduce the inputs. - let self_R = UnpackedScalar::mul_internal(&self.unpack(), &constants::R); - let self_mod_l = UnpackedScalar::montgomery_reduce(&self_R); - let rhs_R = UnpackedScalar::mul_internal(&rhs.unpack(), &constants::R); - let rhs_mod_l = UnpackedScalar::montgomery_reduce(&rhs_R); - - UnpackedScalar::sub(&self_mod_l, &rhs_mod_l).pack() + // The UnpackedScalar::sub function produces reduced outputs if the inputs are reduced. By + // Scalar invariant #1, this is always the case. + UnpackedScalar::sub(&self.unpack(), &rhs.unpack()).pack() } } @@ -467,7 +425,10 @@ impl<'de> Deserialize<'de> for Scalar { type Value = Scalar; fn expecting(&self, formatter: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { - formatter.write_str("a valid point in Edwards y + sign format") + formatter.write_str( + "a sequence of 32 bytes whose little-endian interpretation is less than the \ + basepoint order ℓ", + ) } fn visit_seq(self, mut seq: A) -> Result @@ -613,7 +574,7 @@ impl Scalar { /// /// # Returns /// - /// A random scalar within ℤ/lℤ. + /// A random scalar within \\(\mathbb{Z} / \ell\mathbb{Z}\\). /// /// # Example /// @@ -691,10 +652,13 @@ impl Scalar { /// let s = Scalar::from_hash(h); /// /// println!("{:?}", s.to_bytes()); - /// assert!(s == Scalar::from_bits([ 21, 88, 208, 252, 63, 122, 210, 152, - /// 154, 38, 15, 23, 16, 167, 80, 150, - /// 192, 221, 77, 226, 62, 25, 224, 148, - /// 239, 48, 176, 10, 185, 69, 168, 11, ])); + /// assert_eq!( + /// s.to_bytes(), + /// [ 21, 88, 208, 252, 63, 122, 210, 152, + /// 154, 38, 15, 23, 16, 167, 80, 150, + /// 192, 221, 77, 226, 62, 25, 224, 148, + /// 239, 48, 176, 10, 185, 69, 168, 11, ], + /// ); /// # } /// ``` pub fn from_hash(hash: D) -> Scalar @@ -888,7 +852,7 @@ impl Scalar { /// /// The length of the NAF is at most one more than the length of /// the binary representation of \\(k\\). This is why the - /// `Scalar` type maintains an invariant that the top bit is + /// `Scalar` type maintains an invariant (invariant #1) that the top bit is /// \\(0\\), so that the NAF of a scalar has at most 256 digits. /// /// Intuitively, this is like a binary expansion, except that we @@ -1007,6 +971,10 @@ impl Scalar { /// a = a\_0 + a\_1 16\^1 + \cdots + a_{63} 16\^{63}, /// $$ /// with \\(-8 \leq a_i < 8\\) for \\(0 \leq i < 63\\) and \\(-8 \leq a_{63} \leq 8\\). + /// + /// The largest value that can be decomposed like this is just over \\(2^{255}\\). Thus, in + /// order to not error, the top bit MUST NOT be set, i.e., `Self` MUST be less than + /// \\(2^{255}\\). pub(crate) fn as_radix_16(&self) -> [i8; 64] { debug_assert!(self[31] <= 127); let mut output = [0i8; 64]; @@ -1049,10 +1017,7 @@ impl Scalar { debug_assert!(w <= 8); let digits_count = match w { - 4 => (256 + w - 1) / w, - 5 => (256 + w - 1) / w, - 6 => (256 + w - 1) / w, - 7 => (256 + w - 1) / w, + 4..=7 => (256 + w - 1) / w, // See comment in to_radix_2w on handling the terminal carry. 8 => (256 + w - 1) / w + 1_usize, _ => panic!("invalid radix parameter"), @@ -1062,14 +1027,18 @@ impl Scalar { digits_count } - /// Creates a representation of a Scalar in radix 32, 64, 128 or 256 for use with the Pippenger algorithm. - /// For lower radix, use `to_radix_16`, which is used by the Straus multi-scalar multiplication. - /// Higher radixes are not supported to save cache space. Radix 256 is near-optimal even for very - /// large inputs. + /// Creates a representation of a Scalar in radix \\( 2^w \\) with \\(w = 4, 5, 6, 7, 8\\) for + /// use with the Pippenger algorithm. Higher radixes are not supported to save cache space. + /// Radix 256 is near-optimal even for very large inputs. /// - /// Radix below 32 or above 256 is prohibited. + /// Radix below 16 or above 256 is prohibited. /// This method returns digits in a fixed-sized array, excess digits are zeroes. /// + /// For radix 16, `Self` must be less than \\(2^{255}\\). This is because most integers larger + /// than \\(2^{255}\\) are unrepresentable in the form described below for \\(w = 4\\). This + /// would be true for \\(w = 8\\) as well, but it is compensated for by increasing the size + /// hint by 1. + /// /// ## Scalar representation /// /// Radix \\(2\^w\\), with \\(n = ceil(256/w)\\) coefficients in \\([-(2\^w)/2,(2\^w)/2)\\), @@ -1123,12 +1092,12 @@ impl Scalar { digits[i] = ((coef as i64) - (carry << w) as i64) as i8; } - // When w < 8, we can fold the final carry onto the last digit d, + // When 4 < w < 8, we can fold the final carry onto the last digit d, // because d < 2^w/2 so d + carry*2^w = d + 1*2^w < 2^(w+1) < 2^8. // // When w = 8, we can't fit carry*2^w into an i8. This should // not happen anyways, because the final carry will be 0 for - // reduced scalars, but the Scalar invariant allows 255-bit scalars. + // reduced scalars, but Scalar invariant #1 allows 255-bit scalars. // To handle this, we expand the size_hint by 1 when w=8, // and accumulate the final carry onto another digit. match w { @@ -1146,28 +1115,16 @@ impl Scalar { /// Reduce this `Scalar` modulo \\(\ell\\). #[allow(non_snake_case)] - pub fn reduce(&self) -> Scalar { + fn reduce(&self) -> Scalar { let x = self.unpack(); let xR = UnpackedScalar::mul_internal(&x, &constants::R); let x_mod_l = UnpackedScalar::montgomery_reduce(&xR); x_mod_l.pack() } - /// Check whether this `Scalar` is the canonical representative mod \\(\ell\\). - /// - /// ``` - /// # use curve25519_dalek::scalar::Scalar; - /// # use subtle::ConditionallySelectable; - /// # fn main() { - /// // 2^255 - 1, since `from_bits` clears the high bit - /// let _2_255_minus_1 = Scalar::from_bits([0xff;32]); - /// assert!(! bool::from(_2_255_minus_1.is_canonical())); - /// - /// let reduced = _2_255_minus_1.reduce(); - /// assert!(bool::from(reduced.is_canonical())); - /// # } - /// ``` - pub fn is_canonical(&self) -> Choice { + /// Check whether this `Scalar` is the canonical representative mod \\(\ell\\). This is not + /// public because any `Scalar` that is publicly observed is reduced, by scalar invariant #2. + fn is_canonical(&self) -> Choice { self.ct_eq(&self.reduce()) } } @@ -1264,14 +1221,42 @@ fn read_le_u64_into(src: &[u8], dst: &mut [u64]) { } } +/// _Clamps_ the given little-endian representation of a 32-byte integer. Clamping the value puts +/// it in the range: +/// +/// **n ∈ 2^254 + 8\*{0, 1, 2, 3, . . ., 2^251 − 1}** +/// +/// # Explanation of clamping +/// +/// For Curve25519, h = 8, and multiplying by 8 is the same as a binary left-shift by 3 bits. +/// If you take a secret scalar value between 2^251 and 2^252 – 1 and left-shift by 3 bits +/// then you end up with a 255-bit number with the most significant bit set to 1 and +/// the least-significant three bits set to 0. +/// +/// The Curve25519 clamping operation takes **an arbitrary 256-bit random value** and +/// clears the most-significant bit (making it a 255-bit number), sets the next bit, and then +/// clears the 3 least-significant bits. In other words, it directly creates a scalar value that is +/// in the right form and pre-multiplied by the cofactor. +/// +/// See [here](https://neilmadden.blog/2020/05/28/whats-the-curve25519-clamping-all-about/) for +/// more details. +pub const fn clamp_integer(mut bytes: [u8; 32]) -> [u8; 32] { + bytes[0] &= 0b1111_1000; + bytes[31] &= 0b0111_1111; + bytes[31] |= 0b0100_0000; + bytes +} + #[cfg(test)] -mod test { +pub(crate) mod test { use super::*; use crate::constants; #[cfg(feature = "alloc")] use alloc::vec::Vec; + use rand::RngCore; + /// x = 2238329342913194256032495932344128051776374960164957527413114840482143558222 pub static X: Scalar = Scalar { bytes: [ @@ -1297,6 +1282,19 @@ mod test { ], }; + /// The largest scalar that satisfies invariant #1, i.e., the largest scalar with the top bit + /// set to 0. Since this scalar violates invariant #2, i.e., it's greater than the modulus `l`, + /// addition and subtraction are broken. The only thing you can do with this is scalar-point + /// multiplication (and actually also scalar-scalar multiplication, but that's just a quirk of + /// our implementation). + pub(crate) static LARGEST_UNREDUCED_SCALAR: Scalar = Scalar { + bytes: [ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0x7f, + ], + }; + /// x*y = 5690045403673944803228348699031245560686958845067437804563560795922180092780 static X_TIMES_Y: Scalar = Scalar { bytes: [ @@ -1336,29 +1334,16 @@ mod test { 0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, ]; - static LARGEST_ED25519_S: Scalar = Scalar { - bytes: [ - 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0x7f, - ], - }; - - static CANONICAL_LARGEST_ED25519_S_PLUS_ONE: Scalar = Scalar { + const BASEPOINT_ORDER_MINUS_ONE: Scalar = Scalar { bytes: [ - 0x7e, 0x34, 0x47, 0x75, 0x47, 0x4a, 0x7f, 0x97, 0x23, 0xb6, 0x3a, 0x8b, 0xe9, 0x2a, - 0xe7, 0x6d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0x0f, + 0xec, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, + 0xde, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x10, ], }; - static CANONICAL_LARGEST_ED25519_S_MINUS_ONE: Scalar = Scalar { - bytes: [ - 0x7c, 0x34, 0x47, 0x75, 0x47, 0x4a, 0x7f, 0x97, 0x23, 0xb6, 0x3a, 0x8b, 0xe9, 0x2a, - 0xe7, 0x6d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0x0f, - ], - }; + /// The largest clamped integer + static LARGEST_CLAMPED_INTEGER: [u8; 32] = clamp_integer(LARGEST_UNREDUCED_SCALAR.bytes); #[test] fn fuzzer_testcase_reduction() { @@ -1460,77 +1445,14 @@ mod test { #[test] fn add_reduces() { - // Check that the addition works - assert_eq!( - (LARGEST_ED25519_S + Scalar::ONE).reduce(), - CANONICAL_LARGEST_ED25519_S_PLUS_ONE - ); - // Check that the addition reduces - assert_eq!( - LARGEST_ED25519_S + Scalar::ONE, - CANONICAL_LARGEST_ED25519_S_PLUS_ONE - ); + // Check that addition wraps around the modulus + assert_eq!(BASEPOINT_ORDER_MINUS_ONE + Scalar::ONE, Scalar::ZERO); } #[test] fn sub_reduces() { - // Check that the subtraction works - assert_eq!( - (LARGEST_ED25519_S - Scalar::ONE).reduce(), - CANONICAL_LARGEST_ED25519_S_MINUS_ONE - ); - // Check that the subtraction reduces - assert_eq!( - LARGEST_ED25519_S - Scalar::ONE, - CANONICAL_LARGEST_ED25519_S_MINUS_ONE - ); - } - - #[test] - fn quarkslab_scalar_overflow_does_not_occur() { - // Check that manually-constructing large Scalars with - // from_bits cannot produce incorrect results. - // - // The from_bits function is required to implement X/Ed25519, - // while all other methods of constructing a Scalar produce - // reduced Scalars. However, this "invariant loophole" allows - // constructing large scalars which are not reduced mod l. - // - // This issue was discovered independently by both Jack - // "str4d" Grigg (issue #238), who noted that reduction was - // not performed on addition, and Laurent Grémy & Nicolas - // Surbayrole of Quarkslab, who noted that it was possible to - // cause an overflow and compute incorrect results. - // - // This test is adapted from the one suggested by Quarkslab. - - let large_bytes = [ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0x7f, - ]; - - let a = Scalar::from_bytes_mod_order(large_bytes); - let b = Scalar::from_bits(large_bytes); - - assert_eq!(a, b.reduce()); - - let a_3 = a + a + a; - let b_3 = b + b + b; - - assert_eq!(a_3, b_3); - - let neg_a = -a; - let neg_b = -b; - - assert_eq!(neg_a, neg_b); - - let minus_a_3 = Scalar::ZERO - a - a - a; - let minus_b_3 = Scalar::ZERO - b - b - b; - - assert_eq!(minus_a_3, minus_b_3); - assert_eq!(minus_a_3, -a_3); - assert_eq!(minus_b_3, -b_3); + // Check that subtraction wraps around the modulus + assert_eq!(Scalar::ZERO - Scalar::ONE, BASEPOINT_ORDER_MINUS_ONE); } #[test] @@ -1825,8 +1747,9 @@ mod test { // from the produced representation precisely. let cases = (2..100) .map(|s| Scalar::from(s as u64).invert()) - // The largest unreduced scalar, s = 2^255-1 - .chain(iter::once(Scalar::from_bits([0xff; 32]))); + // The largest unreduced scalar, s = 2^255-1. This is not reduced mod l. Scalar mult + // still works though. + .chain(iter::once(LARGEST_UNREDUCED_SCALAR)); for scalar in cases { test_pippenger_radix_iter(scalar, 6); @@ -1900,37 +1823,69 @@ mod test { #[test] fn test_scalar_clamp() { let input = A_SCALAR.bytes; - let expected = Scalar { - bytes: [ - 0x18, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d, 0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8, - 0x26, 0x4d, 0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1, 0x58, 0x9e, 0x7b, 0x7f, - 0x23, 0x76, 0xef, 0x49, - ], - }; - let actual = Scalar::from_bits_clamped(input); + let expected = [ + 0x18, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d, 0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8, + 0x26, 0x4d, 0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1, 0x58, 0x9e, 0x7b, 0x7f, + 0x23, 0x76, 0xef, 0x49, + ]; + let actual = clamp_integer(input); assert_eq!(actual, expected); - let expected = Scalar { - bytes: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0x40, - ], - }; - let actual = Scalar::from_bits_clamped([0; 32]); + let expected = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0x40, + ]; + let actual = clamp_integer([0; 32]); assert_eq!(expected, actual); - let expected = Scalar { - bytes: [ - 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0x7f, - ], - }; - let actual = Scalar::from_bits_clamped([0xff; 32]); + let expected = [ + 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0x7f, + ]; + let actual = clamp_integer([0xff; 32]); assert_eq!(actual, expected); assert_eq!( - LARGEST_ED25519_S.bytes, - Scalar::from_bits_clamped(LARGEST_ED25519_S.bytes).bytes - ) + LARGEST_CLAMPED_INTEGER, + clamp_integer(LARGEST_CLAMPED_INTEGER) + ); + } + + // Check that a * b == a.reduce() * a.reduce() for ANY scalars a,b, even ones that violate + // invariant #1, i.e., a,b > 2^255. Old versions of ed25519-dalek did multiplication where a + // was reduced and b was clamped and unreduced. This checks that that was always well-defined. + #[test] + fn test_mul_reduction_invariance() { + let mut rng = rand::thread_rng(); + + for _ in 0..10 { + // Also define c that's clamped. We'll make sure that clamping doesn't affect + // computation + let (a, b, c) = { + let mut a_bytes = [0u8; 32]; + let mut b_bytes = [0u8; 32]; + let mut c_bytes = [0u8; 32]; + rng.fill_bytes(&mut a_bytes); + rng.fill_bytes(&mut b_bytes); + rng.fill_bytes(&mut c_bytes); + ( + Scalar { bytes: a_bytes }, + Scalar { bytes: b_bytes }, + Scalar { + bytes: clamp_integer(c_bytes), + }, + ) + }; + + // Make sure this is the same product no matter how you cut it + let reduced_mul_ab = a.reduce() * b.reduce(); + let reduced_mul_ac = a.reduce() * c.reduce(); + assert_eq!(a * b, reduced_mul_ab); + assert_eq!(a.reduce() * b, reduced_mul_ab); + assert_eq!(a * b.reduce(), reduced_mul_ab); + assert_eq!(a * c, reduced_mul_ac); + assert_eq!(a.reduce() * c, reduced_mul_ac); + assert_eq!(a * c.reduce(), reduced_mul_ac); + } } } diff --git a/src/traits.rs b/src/traits.rs index 31ba9bea9..a742a2dde 100644 --- a/src/traits.rs +++ b/src/traits.rs @@ -17,7 +17,7 @@ use core::borrow::Borrow; use subtle; -use crate::scalar::Scalar; +use crate::scalar::{clamp_integer, Scalar}; // ------------------------------------------------------------------------ // Public Traits @@ -61,6 +61,18 @@ pub trait BasepointTable { /// Multiply a `scalar` by this precomputed basepoint table, in constant time. fn mul_base(&self, scalar: &Scalar) -> Self::Point; + + /// Multiply `clamp_integer(bytes)` by this precomputed basepoint table, in constant time. For + /// a description of clamping, see [`clamp_integer`]. + fn mul_base_clamped(&self, bytes: [u8; 32]) -> Self::Point { + // Basepoint multiplication is defined for all values of `bytes` up to and including + // 2^255 - 1. The limit comes from the fact that scalar.as_radix_16() doesn't work for + // most scalars larger than 2^255. + let s = Scalar { + bytes: clamp_integer(bytes), + }; + self.mul_base(&s) + } } /// A trait for constant-time multiscalar multiplication without precomputation.