Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Final changes #22

Merged
merged 5 commits into from
Apr 4, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ homepage = "https://github.com/ebfull/bellman"
license = "MIT/Apache-2.0"
name = "bellman"
repository = "https://github.com/ebfull/bellman"
version = "0.0.9"
version = "0.1.0"

[dependencies]
rand = "0.4"
Expand Down
56 changes: 28 additions & 28 deletions src/groth16/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ pub use self::verifier::*;

#[derive(Clone)]
pub struct Proof<E: Engine> {
a: E::G1Affine,
b: E::G2Affine,
c: E::G1Affine
pub a: E::G1Affine,
pub b: E::G2Affine,
pub c: E::G1Affine
}

impl<E: Engine> PartialEq for Proof<E> {
Expand Down Expand Up @@ -101,28 +101,28 @@ impl<E: Engine> Proof<E> {
pub struct VerifyingKey<E: Engine> {
// alpha in g1 for verifying and for creating A/C elements of
// proof. Never the point at infinity.
alpha_g1: E::G1Affine,
pub alpha_g1: E::G1Affine,

// beta in g1 and g2 for verifying and for creating B/C elements
// of proof. Never the point at infinity.
beta_g1: E::G1Affine,
beta_g2: E::G2Affine,
pub beta_g1: E::G1Affine,
pub beta_g2: E::G2Affine,

// gamma in g2 for verifying. Never the point at infinity.
gamma_g2: E::G2Affine,
pub gamma_g2: E::G2Affine,

// delta in g1/g2 for verifying and proving, essentially the magic
// trapdoor that forces the prover to evaluate the C element of the
// proof with only components from the CRS. Never the point at
// infinity.
delta_g1: E::G1Affine,
delta_g2: E::G2Affine,
pub delta_g1: E::G1Affine,
pub delta_g2: E::G2Affine,

// Elements of the form (beta * u_i(tau) + alpha v_i(tau) + w_i(tau)) / gamma
// for all public inputs. Because all public inputs have a dummy constraint,
// this is the same size as the number of inputs, and never contains points
// at infinity.
ic: Vec<E::G1Affine>
pub ic: Vec<E::G1Affine>
}

impl<E: Engine> PartialEq for VerifyingKey<E> {
Expand All @@ -149,7 +149,7 @@ impl<E: Engine> VerifyingKey<E> {
writer.write_all(self.gamma_g2.into_uncompressed().as_ref())?;
writer.write_all(self.delta_g1.into_uncompressed().as_ref())?;
writer.write_all(self.delta_g2.into_uncompressed().as_ref())?;
writer.write_u64::<BigEndian>(self.ic.len() as u64)?;
writer.write_u32::<BigEndian>(self.ic.len() as u32)?;
for ic in &self.ic {
writer.write_all(ic.into_uncompressed().as_ref())?;
}
Expand Down Expand Up @@ -182,7 +182,7 @@ impl<E: Engine> VerifyingKey<E> {
reader.read_exact(g2_repr.as_mut())?;
let delta_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;

let ic_len = reader.read_u64::<BigEndian>()? as usize;
let ic_len = reader.read_u32::<BigEndian>()? as usize;

let mut ic = vec![];

Expand Down Expand Up @@ -218,23 +218,23 @@ pub struct Parameters<E: Engine> {

// Elements of the form ((tau^i * t(tau)) / delta) for i between 0 and
// m-2 inclusive. Never contains points at infinity.
h: Arc<Vec<E::G1Affine>>,
pub h: Arc<Vec<E::G1Affine>>,

// Elements of the form (beta * u_i(tau) + alpha v_i(tau) + w_i(tau)) / delta
// for all auxillary inputs. Variables can never be unconstrained, so this
// never contains points at infinity.
l: Arc<Vec<E::G1Affine>>,
pub l: Arc<Vec<E::G1Affine>>,

// QAP "A" polynomials evaluated at tau in the Lagrange basis. Never contains
// points at infinity: polynomials that evaluate to zero are omitted from
// the CRS and the prover can deterministically skip their evaluation.
a: Arc<Vec<E::G1Affine>>,
pub a: Arc<Vec<E::G1Affine>>,

// QAP "B" polynomials evaluated at tau in the Lagrange basis. Needed in
// G1 and G2 for C/B queries, respectively. Never contains points at
// infinity for the same reason as the "A" polynomials.
b_g1: Arc<Vec<E::G1Affine>>,
b_g2: Arc<Vec<E::G2Affine>>
pub b_g1: Arc<Vec<E::G1Affine>>,
pub b_g2: Arc<Vec<E::G2Affine>>
}

impl<E: Engine> PartialEq for Parameters<E> {
Expand All @@ -256,27 +256,27 @@ impl<E: Engine> Parameters<E> {
{
self.vk.write(&mut writer)?;

writer.write_u64::<BigEndian>(self.h.len() as u64)?;
writer.write_u32::<BigEndian>(self.h.len() as u32)?;
for g in &self.h[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}

writer.write_u64::<BigEndian>(self.l.len() as u64)?;
writer.write_u32::<BigEndian>(self.l.len() as u32)?;
for g in &self.l[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}

writer.write_u64::<BigEndian>(self.a.len() as u64)?;
writer.write_u32::<BigEndian>(self.a.len() as u32)?;
for g in &self.a[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}

writer.write_u64::<BigEndian>(self.b_g1.len() as u64)?;
writer.write_u32::<BigEndian>(self.b_g1.len() as u32)?;
for g in &self.b_g1[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}

writer.write_u64::<BigEndian>(self.b_g2.len() as u64)?;
writer.write_u32::<BigEndian>(self.b_g2.len() as u32)?;
for g in &self.b_g2[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
Expand Down Expand Up @@ -336,35 +336,35 @@ impl<E: Engine> Parameters<E> {
let mut b_g2 = vec![];

{
let len = reader.read_u64::<BigEndian>()? as usize;
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
h.push(read_g1(&mut reader)?);
}
}

{
let len = reader.read_u64::<BigEndian>()? as usize;
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
l.push(read_g1(&mut reader)?);
}
}

{
let len = reader.read_u64::<BigEndian>()? as usize;
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
a.push(read_g1(&mut reader)?);
}
}

{
let len = reader.read_u64::<BigEndian>()? as usize;
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
b_g1.push(read_g1(&mut reader)?);
}
}

{
let len = reader.read_u64::<BigEndian>()? as usize;
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
b_g2.push(read_g2(&mut reader)?);
}
Expand Down Expand Up @@ -535,7 +535,7 @@ mod test_with_bls12_381 {
let mut v = vec![];

params.write(&mut v).unwrap();
assert_eq!(v.len(), 2160);
assert_eq!(v.len(), 2136);

let de_params = Parameters::read(&v[..], true).unwrap();
assert!(params == de_params);
Expand Down
6 changes: 6 additions & 0 deletions src/groth16/prover.rs
Original file line number Diff line number Diff line change
Expand Up @@ -290,6 +290,12 @@ pub fn create_proof<E, C, P: ParameterSource<E>>(
let b_g2_inputs = multiexp(&worker, b_g2_inputs_source, b_input_density, input_assignment);
let b_g2_aux = multiexp(&worker, b_g2_aux_source, b_aux_density, aux_assignment);

if vk.delta_g1.is_zero() || vk.delta_g2.is_zero() {
// If this element is zero, someone is trying to perform a
// subversion-CRS attack.
return Err(SynthesisError::UnexpectedIdentity);
}

let mut g_a = vk.delta_g1.mul(r);
g_a.add_assign_mixed(&vk.alpha_g1);
let mut g_b = vk.delta_g2.mul(s);
Expand Down
2 changes: 1 addition & 1 deletion src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ extern crate bit_vec;
extern crate crossbeam;
extern crate byteorder;

mod multicore;
pub mod multicore;
mod multiexp;
pub mod domain;
pub mod groth16;
Expand Down