From 8c3265424e9bd366b6752febedc22ede0d3d0558 Mon Sep 17 00:00:00 2001 From: Nicolas Stalder Date: Tue, 29 Sep 2020 23:25:39 +0200 Subject: [PATCH] Temporary commit for testing --- .envrc | 1 + src/lib.rs | 2 + src/scalar.rs | 312 ++++++++++++++++++++++++++++------------------- src/signature.rs | 35 ++++-- 4 files changed, 218 insertions(+), 132 deletions(-) diff --git a/.envrc b/.envrc index df8e0cf..809b5a9 100644 --- a/.envrc +++ b/.envrc @@ -1,2 +1,3 @@ # to use this, install [direnv](https://direnv.net/) source venv/bin/activate +unset PS1 diff --git a/src/lib.rs b/src/lib.rs index b8c00c5..d03ec9a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -177,6 +177,8 @@ pub use hash::Sha512; mod field; pub use field::{FieldElement, FieldImplementation}; +mod scalar29; + mod scalar; pub use scalar::Scalar; diff --git a/src/scalar.rs b/src/scalar.rs index 5ad91d0..fa0969d 100644 --- a/src/scalar.rs +++ b/src/scalar.rs @@ -19,19 +19,21 @@ pub struct Scalar( pub [u8; SCALAR_LENGTH] ); -/// NB: The buffer is assumed to be zero from -/// byte 32 onward - each operation should call -/// `clear_upper_half` to ensure this. -/// -/// Technically, in TweetNaCl, there's an operation -/// (a, b, c) -> a*b + c, which needs only one call -/// to clear upper half, but I think we can live with -/// this slight inefficiency - if scalar ring operations -/// become a bottleneck, there should be bigger improvements -/// achievable by other means. -pub(crate) struct TweetNaclScalar( - pub (crate) [i64; 64] -); +type UnpackedScalar = crate::scalar29::Scalar29; + +///// NB: The buffer is assumed to be zero from +///// byte 32 onward - each operation should call +///// `clear_upper_half` to ensure this. +///// +///// Technically, in TweetNaCl, there's an operation +///// (a, b, c) -> a*b + c, which needs only one call +///// to clear upper half, but I think we can live with +///// this slight inefficiency - if scalar ring operations +///// become a bottleneck, there should be bigger improvements +///// achievable by other means. +//pub struct TweetNaclScalar( +// pub (crate) [i64; 64] +//); impl From<&[u8; SCALAR_LENGTH]> for Scalar { fn from(bytes: &[u8; SCALAR_LENGTH]) -> Scalar { @@ -39,139 +41,149 @@ impl From<&[u8; SCALAR_LENGTH]> for Scalar { } } -impl From<&[u8; 64]> for TweetNaclScalar { - fn from(bytes: &[u8; 64]) -> TweetNaclScalar { - let mut x: [i64; 64] = [0; 64]; - for i in 0..64 { - x[i] = bytes[i] as i64; - } - let mut s = TweetNaclScalar(x); - s.clear_upper_half(); - s - } -} +// impl From<&[u8; 64]> for TweetNaclScalar { +// fn from(bytes: &[u8; 64]) -> TweetNaclScalar { +// let mut x: [i64; 64] = [0; 64]; +// for i in 0..64 { +// x[i] = bytes[i] as i64; +// } +// let mut s = TweetNaclScalar(x); +// s.clear_upper_half(); +// s +// } +// } -impl From for TweetNaclScalar { - fn from(scalar: Scalar) -> TweetNaclScalar { - let mut x: [i64; 64] = [0; 64]; - for i in 0..32 { - x[i] = scalar.0[i] as i64; - } - TweetNaclScalar(x) - } -} +// impl From for TweetNaclScalar { +// fn from(scalar: Scalar) -> TweetNaclScalar { +// let mut x: [i64; 64] = [0; 64]; +// for i in 0..32 { +// x[i] = scalar.0[i] as i64; +// } +// TweetNaclScalar(x) +// } +// } -impl From<&Scalar> for TweetNaclScalar { - fn from(scalar: &Scalar) -> TweetNaclScalar { - let mut x: [i64; 64] = [0; 64]; - for i in 0..32 { - x[i] = scalar.0[i] as i64; - } - TweetNaclScalar(x) - } -} +// impl From<&Scalar> for TweetNaclScalar { +// fn from(scalar: &Scalar) -> TweetNaclScalar { +// let mut x: [i64; 64] = [0; 64]; +// for i in 0..32 { +// x[i] = scalar.0[i] as i64; +// } +// TweetNaclScalar(x) +// } +// } -impl TweetNaclScalar { - pub(crate) fn clear_upper_half(&mut self) { - let x = &mut self.0; - #[allow(non_snake_case)] - let L = Scalar::L; - for i in (32..=63).rev() { - let mut carry: i64 = 0; - for j in (i - 32)..(i - 12) { - // x[j] += carry - 16 * x[i] * L[j - (i - 32)]; - // C code doesn't care about u64 vs i64... - x[j] += carry - 16 * x[i] * L[j - (i - 32)] as i64; - carry = (x[j] + 128) >> 8; - x[j] -= carry << 8; - } - // x[j] += carry; // C code uses out-of-scope variable j - x[i - 12] += carry; - x[i] = 0; - } - } +// impl TweetNaclScalar { +// pub(crate) fn clear_upper_half(&mut self) { +// let x = &mut self.0; +// #[allow(non_snake_case)] +// let L = Scalar::L; +// for i in (32..=63).rev() { +// let mut carry: i64 = 0; +// for j in (i - 32)..(i - 12) { +// // x[j] += carry - 16 * x[i] * L[j - (i - 32)]; +// // C code doesn't care about u64 vs i64... +// x[j] += carry - 16 * x[i] * L[j - (i - 32)] as i64; +// carry = (x[j] + 128) >> 8; +// x[j] -= carry << 8; +// } +// // x[j] += carry; // C code uses out-of-scope variable j +// x[i - 12] += carry; +// x[i] = 0; +// } +// } - pub(crate) fn reduce_modulo_ell(&mut self) -> Scalar { - // probably redundant - // self.clear_upper_half(); +// pub fn reduce_modulo_ell(&mut self) -> Scalar { +// // probably redundant +// // self.clear_upper_half(); - let x = &mut self.0; +// let x = &mut self.0; - #[allow(non_snake_case)] - let L = Scalar::L; +// #[allow(non_snake_case)] +// let L = Scalar::L; - let mut carry: i64 = 0; - for j in 0..32 { - // x[j] += carry - (x[31] >> 4) * L[j]; - x[j] += carry - (x[31] >> 4) * L[j] as i64; - carry = x[j] >> 8; - x[j] &= 0xff; - } +// let mut carry: i64 = 0; +// for j in 0..32 { +// // x[j] += carry - (x[31] >> 4) * L[j]; +// x[j] += carry - (x[31] >> 4) * L[j] as i64; +// carry = x[j] >> 8; +// x[j] &= 0xff; +// } - for j in 0..32 { - // x[j] -= carry * L[j]; - x[j] -= carry * L[j] as i64; - } +// for j in 0..32 { +// // x[j] -= carry * L[j]; +// x[j] -= carry * L[j] as i64; +// } - let mut r: [u8; 32] = Default::default(); - for i in 0 ..32 { - x[i + 1] += x[i] >> 8; - // r[i] = x[i] & 0xff; - r[i] = ((x[i] as u64) & 0xff) as u8; - } +// let mut r: [u8; 32] = Default::default(); +// for i in 0 ..32 { +// x[i + 1] += x[i] >> 8; +// // r[i] = x[i] & 0xff; +// r[i] = ((x[i] as u64) & 0xff) as u8; +// } + +// Scalar(r) - Scalar(r) +// } +// } +impl UnpackedScalar { + /// Pack the limbs of this `UnpackedScalar` into a `Scalar`. + fn pack(&self) -> Scalar { + Scalar(self.to_bytes()) } } -// // TODO: This all doesn't work so well... -// // Case of "rustc bad at types" or case of PEBKAC? -// impl<'a, 'b, T> Add<&'b T> for &'a TweetNaclScalar -// where -// // S: Into, -// &'b T: Into<&'b TweetNaclScalar>, -// { +impl<'a, 'b> Add<&'b Scalar> for &'a Scalar { + type Output = Scalar; + #[allow(non_snake_case)] + fn add(self, _rhs: &'b Scalar) -> Scalar { + // The UnpackedScalar::add function produces reduced outputs + // if the inputs are reduced. However, these inputs may not + // be reduced -- they might come from Scalar::from_bits. So + // after computing the sum, we explicitly reduce it mod l + // before repacking. + let sum = UnpackedScalar::add(&self.unpack(), &_rhs.unpack()); + let sum_R = UnpackedScalar::mul_internal(&sum, &crate::scalar29::constants::R); + let sum_mod_l = UnpackedScalar::montgomery_reduce(&sum_R); + sum_mod_l.pack() + } +} + +impl<'a, 'b> Mul<&'b Scalar> for &'a Scalar { + type Output = Scalar; + fn mul(self, _rhs: &'b Scalar) -> Scalar { + UnpackedScalar::mul(&self.unpack(), &_rhs.unpack()).pack() + } +} + +// impl<'a, 'b> Add<&'b TweetNaclScalar> for &'a TweetNaclScalar { // type Output = TweetNaclScalar; -// fn add(self, other: &'b T) -> TweetNaclScalar { +// fn add(self, other: &'b TweetNaclScalar) -> TweetNaclScalar { // let mut sum: [i64; 64] = [0; 64]; -// for (i, (ai, bi)) in self.0.iter().zip(other.into().0.iter()).enumerate() { +// for (i, (ai, bi)) in self.0.iter().zip(other.0.iter()).enumerate() { // sum[i] = *ai + *bi; // } -// // let mut sum = TweetNaclScalar(sum); -// // sum.clear_upper_half(); -// // sum + // TweetNaclScalar(sum) // } // } -impl<'a, 'b> Add<&'b TweetNaclScalar> for &'a TweetNaclScalar { - type Output = TweetNaclScalar; - fn add(self, other: &'b TweetNaclScalar) -> TweetNaclScalar { - let mut sum: [i64; 64] = [0; 64]; - for (i, (ai, bi)) in self.0.iter().zip(other.0.iter()).enumerate() { - sum[i] = *ai + *bi; - } - - TweetNaclScalar(sum) - } -} +// impl<'a, 'b> Mul<&'b TweetNaclScalar> for &'a TweetNaclScalar { +// type Output = TweetNaclScalar; +// fn mul(self, other: &'b TweetNaclScalar) -> TweetNaclScalar { +// let mut product: [i64; 64] = [0; 64]; +// for (i, ai) in self.0.iter().take(32).enumerate() { +// for (j, bj) in other.0.iter().take(32).enumerate() { +// product[i + j] += *ai * *bj; +// } +// } -impl<'a, 'b> Mul<&'b TweetNaclScalar> for &'a TweetNaclScalar { - type Output = TweetNaclScalar; - fn mul(self, other: &'b TweetNaclScalar) -> TweetNaclScalar { - let mut product: [i64; 64] = [0; 64]; - for (i, ai) in self.0.iter().take(32).enumerate() { - for (j, bj) in other.0.iter().take(32).enumerate() { - product[i + j] += *ai * *bj; - } - } - - let mut product = TweetNaclScalar(product); - product.clear_upper_half(); - product - } -} +// let mut product = TweetNaclScalar(product); +// product.clear_upper_half(); +// product +// } +// } impl Scalar { #[allow(non_snake_case)] @@ -200,10 +212,60 @@ impl Scalar { } pub fn from_u256_le(x: &U256le) -> Scalar { - TweetNaclScalar::from(&Scalar(*x)).reduce_modulo_ell() + // TweetNaclScalar::from(&Scalar(*x)).reduce_modulo_ell() + // Temporarily allow s_unreduced.bytes > 2^255 ... + let s_unreduced = Scalar(x.clone()); + + // Then reduce mod the group order and return the reduced representative. + let s = s_unreduced.reduce(); + debug_assert_eq!(0u8, s.0[31] >> 7); + + s } + // /// Construct a `Scalar` by reducing a 256-bit little-endian integer + // /// modulo the group order \\( \ell \\). + // pub fn from_bytes_mod_order(bytes: [u8; 32]) -> Scalar { + // // Temporarily allow s_unreduced.bytes > 2^255 ... + // let s_unreduced = Scalar{bytes}; + + // // Then reduce mod the group order and return the reduced representative. + // let s = s_unreduced.reduce(); + // debug_assert_eq!(0u8, s[31] >> 7); + + // s + // } + pub fn from_u512_le(x: &U512le) -> Scalar { - TweetNaclScalar::from(x).reduce_modulo_ell() + // TweetNaclScalar::from(x).reduce_modulo_ell() + UnpackedScalar::from_bytes_wide(x).pack() + } + + // /// Construct a `Scalar` by reducing a 512-bit little-endian integer + // /// modulo the group order \\( \ell \\). + // pub fn from_bytes_mod_order_wide(input: &[u8; 64]) -> Scalar { + // UnpackedScalar::from_bytes_wide(input).pack() + // } + + + /// Unpack this `Scalar` to an `UnpackedScalar` for faster arithmetic. + pub(crate) fn unpack(&self) -> UnpackedScalar { + UnpackedScalar::from_bytes(&self.0) + } + + /// Reduce this `Scalar` modulo \\(\ell\\). + #[allow(non_snake_case)] + pub fn reduce(&self) -> Scalar { + let x = self.unpack(); + let xR = UnpackedScalar::mul_internal(&x, &crate::scalar29::constants::R); + let x_mod_l = UnpackedScalar::montgomery_reduce(&xR); + x_mod_l.pack() + } + + /// Check whether this `Scalar` is the canonical representative mod \\(\ell\\). + /// + /// This is intended for uses like input validation, where variable-time code is acceptable. + pub fn is_canonical(&self) -> bool { + *self == self.reduce() } } diff --git a/src/signature.rs b/src/signature.rs index 5f2f22b..169fbf1 100644 --- a/src/signature.rs +++ b/src/signature.rs @@ -22,7 +22,7 @@ use crate::{ hash::Sha512, scalar::{ Scalar, - TweetNaclScalar, + // TweetNaclScalar, }, }; @@ -90,8 +90,10 @@ impl Keypair { .finalize(); let h: Scalar = Scalar::from_u512_le(&second_hash); - let mut s = &r.into() + &(&h.into() * &TweetNaclScalar::from(&self.secret.scalar)); - let s = s.reduce_modulo_ell(); + // let mut s = &r.into() + &(&h.into() * &TweetNaclScalar::from(&self.secret.scalar)); + let s = &r.into() + &(&h.into() * &self.secret.scalar); + // let s = s.reduce_modulo_ell(); + // let s = TweetNaclScalar::from(&s.reduce_modulo_ell()).reduce_modulo_ell(); Signature { r: R, s } } @@ -132,8 +134,9 @@ impl Keypair { .finalize(); let h: Scalar = Scalar::from_u512_le(&second_hash); - let mut s = &r.into() + &(&h.into() * &TweetNaclScalar::from(&self.secret.scalar)); - let s = s.reduce_modulo_ell(); + // let mut s = &r.into() + &(&h.into() * &TweetNaclScalar::from(&self.secret.scalar)); + let s = &r.into() + &(&h.into() * &self.secret.scalar); + // let s = s.reduce_modulo_ell(); Signature { r: R, s } } @@ -175,8 +178,9 @@ impl Keypair { .finalize(); let h: Scalar = Scalar::from_u512_le(&second_hash); - let mut s = &r.into() + &(&h.into() * &TweetNaclScalar::from(&self.secret.scalar)); - let s = s.reduce_modulo_ell(); + // let mut s = &r.into() + &(&h.into() * &TweetNaclScalar::from(&self.secret.scalar)); + let s = &r.into() + &(&h.into() * &self.secret.scalar); + // let s = s.reduce_modulo_ell(); Signature { r: R, s } } @@ -508,5 +512,22 @@ mod tests { let verification = public_key.verify_prehashed(&prehashed_message, &signature, None); assert!(verification.is_ok()); } + + #[test] + fn test_reduction_of_S_modulo_ell() { + let seed: &[u8; 32] = b"\\\x8a\x90\x83\x8d\x10U$\xfe\x8d\xf6Z\x9d\xaf\xd9\x9c\xc4\x08S{l\xa3\x1b9\x91\x0bqu5Ut\x15"; + let data: &[u8; 69] = b"\xbf\xab\xc3t2\x95\x8b\x063`\xd3\xadda\xc9\xc4sZ\xe7\xf8\xed\xd4e\x92\xa5\xe0\xf0\x14R\xb2\xe4\xb5\x01\x00\x00\x0b\x0e123456789abcdef0123456789abcdef0"; + let nonreduced_sig: &[u8; 64] = b"E\x13\x8aD\x1f\xb8\xd0\xc5k\x1f\xf7\xe5~u\x998I\x12\x17\x99\xf1X\xe0\xdeV\xf7))p\xea\x93\x9c\xfaV\xef\xeeP\xad\xdf*\x80O\xaaFA\x9d7\xd8L\xc4{\x93\xae\x96\x9e\xf09,\xb7\xf2\x00\xe56\x10"; + let reduced_s: &[u8; 32] = b"\r\x83\xf9\x916J\xcd\xd2\xa9\xb2\xb2\xa3b\xa3X\xc3L\xc4{\x93\xae\x96\x9e\xf09,\xb7\xf2\x00\xe56\x00"; + + let keypair = Keypair::from(seed); + let mut signature = keypair.sign(data); + let s = &signature.s; + // use crate::scalar::TweetNaclScalar; + // let manually_reduced_s = TweetNaclScalar::from(s).reduce_modulo_ell(); + // assert_eq!(reduced_s, &manually_reduced_s.0); + assert_ne!(signature.s.0, &nonreduced_sig[32..]); + + } }