Skip to content

Commit

Permalink
feat: remove partial precomputation (#129)
Browse files Browse the repository at this point in the history
Partial precomputation is the sole reason for maintaining a custom curve
library fork, which has proven to be a headache and limits compatibility
with other libraries.

This PR removes partial precomputation altogether. If you supply
parameters with more inner-product generators than you need, padding is
used. This incurs an efficiency hit, but only in this particular case.
For the use cases in the Tari ecosystem, this is not an issue.

As a result of this change, we also switch back to the latest version of
the upstream curve library and simplify some scalar exponentiation
operations. The audit report is also updated to note the curve library
dependency change.

Closes #128. Closes #93. Closes #96.
  • Loading branch information
AaronFeickert committed Mar 29, 2024
1 parent 6c4bfe0 commit 58409fa
Show file tree
Hide file tree
Showing 5 changed files with 65 additions and 13 deletions.
5 changes: 3 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,9 @@ description = "A smaller faster implementation of Bulletproofs"
[dependencies]
blake2 = { version = "0.10", default-features = false }
byteorder = { version = "1", default-features = false }
curve25519-dalek = { package = "tari-curve25519-dalek", version = "4.0.3", default-features = false, features = ["alloc", "rand_core", "serde", "zeroize"] }
curve25519-dalek = { version = "4.1", default-features = false, features = ["alloc", "group", "rand_core", "serde", "zeroize"] }
digest = { version = "0.10", default-features = false, features = ["alloc"] }
ff = { version = "0.13.0", default-features = false }
itertools = { version = "0.12", default-features = false, features = ["use_alloc"] }
merlin = { version = "3", default-features = false }
once_cell = { version = "1", default-features = false, features = ["alloc", "critical-section"] }
Expand All @@ -27,7 +28,7 @@ rand_chacha = { version = "0.3.1", default-features = false }

[features]
default = ["rand", "std"]
std = ["blake2/std", "byteorder/std", "digest/std", "itertools/use_std", "merlin/std", "once_cell/std", "rand_core/std", "serde/std", "sha3/std", "zeroize/std"]
std = ["blake2/std", "byteorder/std", "digest/std", "ff/std", "itertools/use_std", "merlin/std", "once_cell/std", "rand_core/std", "serde/std", "sha3/std", "zeroize/std"]
rand = ["rand_core/getrandom"]

[[bench]]
Expand Down
2 changes: 1 addition & 1 deletion clippy.toml
Original file line number Diff line number Diff line change
@@ -1 +1 @@
arithmetic-side-effects-allowed = [ "tari_curve25519_dalek::Scalar" ]
arithmetic-side-effects-allowed = [ "curve25519_dalek::Scalar" ]
3 changes: 3 additions & 0 deletions docs/quarkslab-audit/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@ Developers are in the process of testing an updated version of the fork, in orde

Further, an open [upstream pull request](https://github.com/dalek-cryptography/curve25519-dalek/pull/546) would add partial precomputation support; if it is accepted, the implementation could change its curve library dependency to upstream.

*Update*: The library has removed support for partial precomputation in favor of a different design.
As a result, the curve library dependency has been changed to upstream.

## `INFO 2`

This issue ran the `clippy` linter against several lints, and identified a number of warnings arising from them.
Expand Down
28 changes: 22 additions & 6 deletions src/range_proof.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
use alloc::{string::ToString, vec, vec::Vec};
use core::{
convert::{TryFrom, TryInto},
iter::once,
iter::{once, repeat},
marker::PhantomData,
ops::{Add, Mul, Shr},
slice::ChunksExact,
Expand All @@ -18,6 +18,7 @@ use curve25519_dalek::{
scalar::Scalar,
traits::{Identity, IsIdentity, MultiscalarMul, VartimePrecomputedMultiscalarMul},
};
use ff::Field;
use itertools::{izip, Itertools};
use merlin::Transcript;
use rand_core::CryptoRngCore;
Expand All @@ -36,7 +37,7 @@ use crate::{
traits::{Compressable, Decompressable, FixedBytesRepr, Precomputable},
transcripts::RangeProofTranscript,
utils::{
generic::{nonce, split_at_checked},
generic::{compute_generator_padding, nonce, split_at_checked},
nullrng::NullRng,
},
};
Expand Down Expand Up @@ -330,8 +331,15 @@ where
Scalar::random_not_zero(range_proof_transcript.as_mut_rng())
});
}
let padding = compute_generator_padding(
statement.generators.bit_length(),
statement.commitments.len(),
statement.generators.aggregation_factor(),
)?;
let a = statement.generators.precomp().vartime_mixed_multiscalar_mul(
a_li.iter().interleave(a_ri.iter()),
a_li.iter()
.interleave(a_ri.iter())
.chain(repeat(&Scalar::ZERO).take(padding)),
alpha.iter(),
statement.generators.g_bases().iter(),
);
Expand Down Expand Up @@ -763,7 +771,7 @@ where

// Compute 2**n-1 for later use
let two = Scalar::from(2u8);
let two_n_minus_one = (0..bit_length.ilog2()).fold(two, |acc, _| acc * acc) - Scalar::ONE;
let two_n_minus_one = two.pow_vartime([bit_length as u64]) - Scalar::ONE;

// Weighted coefficients for common generators
let mut g_base_scalars = vec![Scalar::ZERO; extension_degree];
Expand Down Expand Up @@ -890,7 +898,7 @@ where
let e_square = e * e;
let challenges_sq: Vec<Scalar> = challenges.iter().map(|c| c * c).collect();
let challenges_sq_inv: Vec<Scalar> = challenges_inv.iter().map(|c| c * c).collect();
let y_nm = (0..rounds).fold(y, |y_nm, _| y_nm * y_nm);
let y_nm = y.pow_vartime([full_length as u64]);
let y_nm_1 = y_nm * y;

// Compute the sum of powers of the challenge as a partial sum of a geometric series
Expand Down Expand Up @@ -1023,8 +1031,16 @@ where
dynamic_points.push(h_base.clone());

// Perform the final check using precomputation
let padding = compute_generator_padding(
max_statement.generators.bit_length(),
max_statement.commitments.len(),
max_statement.generators.aggregation_factor(),
)?;
if precomp.vartime_mixed_multiscalar_mul(
gi_base_scalars.iter().interleave(hi_base_scalars.iter()),
gi_base_scalars
.iter()
.interleave(hi_base_scalars.iter())
.chain(repeat(&Scalar::ZERO).take(padding)),
dynamic_scalars.iter(),
dynamic_points.iter(),
) != P::identity()
Expand Down
40 changes: 36 additions & 4 deletions src/utils/generic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,28 @@ pub fn split_at_checked<T>(vec: &[T], n: usize) -> Result<(&[T], &[T]), ProofErr
}
}

/// Compute the padding needed for generator vectors
pub fn compute_generator_padding(
bit_length: usize,
aggregation_factor: usize,
max_aggregation_factor: usize,
) -> Result<usize, ProofError> {
let padded_capacity = 2usize
.checked_mul(bit_length)
.ok_or(ProofError::SizeOverflow)?
.checked_mul(max_aggregation_factor)
.ok_or(ProofError::SizeOverflow)?;
let actual_capacity = 2usize
.checked_mul(bit_length)
.ok_or(ProofError::SizeOverflow)?
.checked_mul(aggregation_factor)
.ok_or(ProofError::SizeOverflow)?;

padded_capacity
.checked_sub(actual_capacity)
.ok_or(ProofError::SizeOverflow)
}

#[cfg(test)]
mod tests {
use alloc::{vec, vec::Vec};
Expand All @@ -76,10 +98,20 @@ mod tests {
use rand_chacha::ChaCha12Rng;
use rand_core::SeedableRng;

use crate::{
protocols::scalar_protocol::ScalarProtocol,
utils::generic::{nonce, split_at_checked, BLAKE2B_PERSONA_LIMIT},
};
use crate::{protocols::scalar_protocol::ScalarProtocol, utils::generic::*};

#[test]
fn test_padding() {
// No padding
assert_eq!(compute_generator_padding(64, 1, 1).unwrap(), 0);

// Padding
assert_eq!(compute_generator_padding(64, 1, 2).unwrap(), 128);

// Invalid
assert!(compute_generator_padding(64, 2, 1).is_err());
assert!(compute_generator_padding(64, usize::MAX - 1, usize::MAX).is_err());
}

#[test]
fn test_split() {
Expand Down

0 comments on commit 58409fa

Please sign in to comment.