diff --git a/src/lem/interpreter.rs b/src/lem/interpreter.rs index 3425cfa42..a6975ec74 100644 --- a/src/lem/interpreter.rs +++ b/src/lem/interpreter.rs @@ -278,13 +278,13 @@ impl Block { let g = *store.expect_f(g_idx); let diff = f - g; hints.bit_decomp.push(Some(SlotData { - vals: vec![Val::Num(IVal::Atom(store.intern_f(f + f).0))], + vals: vec![Val::Num(IVal::Atom(store.intern_f(f + f)))], })); hints.bit_decomp.push(Some(SlotData { - vals: vec![Val::Num(IVal::Atom(store.intern_f(g + g).0))], + vals: vec![Val::Num(IVal::Atom(store.intern_f(g + g)))], })); hints.bit_decomp.push(Some(SlotData { - vals: vec![Val::Num(IVal::Atom(store.intern_f(diff + diff).0))], + vals: vec![Val::Num(IVal::Atom(store.intern_f(diff + diff)))], })); let f = BaseNum::Scalar(f); let g = BaseNum::Scalar(g); @@ -466,7 +466,7 @@ impl Block { tgt_secret.clone(), store.intern_atom(Tag::Expr(Num), secret.0), ); - let secret_idx = store.intern_f(secret.0).0; + let secret_idx = store.intern_f(secret.0); let vals = vec![Val::Num(IVal::Atom(secret_idx)), Val::Pointer(*ptr)]; hints.commitment.push(Some(SlotData { vals })); } diff --git a/src/lem/pointers.rs b/src/lem/pointers.rs index 6ade3be69..821b99385 100644 --- a/src/lem/pointers.rs +++ b/src/lem/pointers.rs @@ -50,10 +50,16 @@ impl GPtr { /// Encoding for pointer children that are stored in index-based data structures #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub enum IVal { + /// Holds the index of leaf data Atom(usize), + /// Holds the index of two children Tuple2(usize), + /// Holds the index of three children Tuple3(usize), + /// Holds the index of four children Tuple4(usize), + /// Similar to `Tuple3`, but ignores the tags of the first and third children + /// for content-addressing Compact(usize), } diff --git a/src/lem/store.rs b/src/lem/store.rs index 4f682715f..73904fe4f 100644 --- a/src/lem/store.rs +++ b/src/lem/store.rs @@ -126,10 +126,10 @@ impl Default for Store { let hash6zeros = core.hasher.hash6(&[F::ZERO; 6]); let hash8zeros = core.hasher.hash8(&[F::ZERO; 8]); - let (hash3zeros_idx, _) = core.intern_digest(FWrap(hash3zeros)); - let (hash4zeros_idx, _) = core.intern_digest(FWrap(hash4zeros)); - let (hash6zeros_idx, _) = core.intern_digest(FWrap(hash6zeros)); - let (hash8zeros_idx, _) = core.intern_digest(FWrap(hash8zeros)); + let hash3zeros_idx = core.intern_digest(FWrap(hash3zeros)); + let hash4zeros_idx = core.intern_digest(FWrap(hash4zeros)); + let hash6zeros_idx = core.intern_digest(FWrap(hash6zeros)); + let hash8zeros_idx = core.intern_digest(FWrap(hash8zeros)); Self { core, @@ -149,7 +149,7 @@ impl Default for Store { // Handling to the core impl Store { #[inline] - pub fn intern_f(&self, f: F) -> (usize, bool) { + pub fn intern_f(&self, f: F) -> usize { self.core.intern_digest(FWrap(f)) } diff --git a/src/lem/store_core.rs b/src/lem/store_core.rs index 9bd82d109..104aff477 100644 --- a/src/lem/store_core.rs +++ b/src/lem/store_core.rs @@ -13,8 +13,8 @@ pub trait StoreHasher { fn hash_commitment(&self, secret: D, payload: GPtr) -> D; } -/// A data structure used to efficiently encode data as DAGs of tagged pointers -/// that can eventually be content-addressed by a custom hasher +/// Append-only threadsafe data structure used to efficiently encode data as DAGs +/// of tagged pointers that can be content-addressed by a custom hasher on demand #[derive(Debug)] pub struct StoreCore> { /// Holds leaf (non-compound) data @@ -47,8 +47,8 @@ pub struct StoreCore> { } impl< - T: PartialEq + std::cmp::Eq + std::hash::Hash, - D: PartialEq + std::cmp::Eq + std::hash::Hash, + T: PartialEq + Eq + std::hash::Hash, + D: PartialEq + Eq + std::hash::Hash, H: StoreHasher + Default, > Default for StoreCore { @@ -68,14 +68,14 @@ impl< } impl< - T: Copy + PartialEq + std::cmp::Eq + std::hash::Hash + Send + Sync, - D: Copy + PartialEq + std::cmp::Eq + std::hash::Hash + Send + Sync, + T: Copy + PartialEq + Eq + std::hash::Hash + Send + Sync, + D: Copy + PartialEq + Eq + std::hash::Hash + Send + Sync, H: StoreHasher + Sync, > StoreCore { #[inline] - pub fn intern_digest(&self, d: D) -> (usize, bool) { - self.atom.insert_probe(Box::new(d)) + pub fn intern_digest(&self, digest: D) -> usize { + self.atom.insert_probe(Box::new(digest)).0 } #[inline] @@ -88,17 +88,26 @@ impl< self.fetch_digest(idx).expect("Digest wasn't interned") } - pub fn intern_tuple2(&self, ptrs: [IPtr; 2], tag: T, digest: Option) -> IPtr { - let (idx, inserted) = self.tuple2.insert_probe(Box::new(ptrs)); - let ptr = IPtr::new(tag, IVal::Tuple2(idx)); + /// Performs the side-effects of interning then returns the pointer + fn finalize_interning( + &self, + tag: T, + ptr_val: IVal, + digest: Option, + inserted: bool, + ) -> IPtr { if let Some(digest) = digest { - let val = *ptr.val(); - self.z_cache.insert(val, Box::new(digest)); - self.inverse_z_cache.insert(digest, Box::new(val)); + self.z_cache.insert(ptr_val, Box::new(digest)); + self.inverse_z_cache.insert(digest, Box::new(ptr_val)); } else if inserted { - self.dehydrated.load().push(Box::new(*ptr.val())); + self.dehydrated.load().push(Box::new(ptr_val)); } - ptr + IPtr::new(tag, ptr_val) + } + + pub fn intern_tuple2(&self, ptrs: [IPtr; 2], tag: T, digest: Option) -> IPtr { + let (idx, inserted) = self.tuple2.insert_probe(Box::new(ptrs)); + self.finalize_interning(tag, IVal::Tuple2(idx), digest, inserted) } #[inline] @@ -119,19 +128,12 @@ impl< compact: bool, ) -> IPtr { let (idx, inserted) = self.tuple3.insert_probe(Box::new(ptrs)); - let ptr = if compact { - IPtr::new(tag, IVal::Compact(idx)) + let ptr_val = if compact { + IVal::Compact(idx) } else { - IPtr::new(tag, IVal::Tuple3(idx)) + IVal::Tuple3(idx) }; - if let Some(digest) = digest { - let val = *ptr.val(); - self.z_cache.insert(val, Box::new(digest)); - self.inverse_z_cache.insert(digest, Box::new(val)); - } else if inserted { - self.dehydrated.load().push(Box::new(*ptr.val())); - } - ptr + self.finalize_interning(tag, ptr_val, digest, inserted) } #[inline] @@ -151,15 +153,7 @@ impl< pub fn intern_tuple4(&self, ptrs: [IPtr; 4], tag: T, digest: Option) -> IPtr { let (idx, inserted) = self.tuple4.insert_probe(Box::new(ptrs)); - let ptr = IPtr::new(tag, IVal::Tuple4(idx)); - if let Some(digest) = digest { - let val = *ptr.val(); - self.z_cache.insert(val, Box::new(digest)); - self.inverse_z_cache.insert(digest, Box::new(val)); - } else if inserted { - self.dehydrated.load().push(Box::new(*ptr.val())); - } - ptr + self.finalize_interning(tag, IVal::Tuple4(idx), digest, inserted) } #[inline] @@ -259,10 +253,10 @@ impl< /// limit in `hash_ptr_val_unsafe`. So we move in smaller chunks from left to /// right, populating the `z_cache`, which can rescue `hash_ptr_val_unsafe` /// from deep recursions - fn hydrate_z_cache_with_ptr_vals(&self, ptrs: &[&IVal]) { - ptrs.chunks(256).for_each(|chunk| { - chunk.par_iter().for_each(|ptr| { - self.hash_ptr_val_unsafe(ptr); + fn hydrate_z_cache_with_ptr_vals(&self, ptr_vals: &[&IVal]) { + ptr_vals.chunks(256).for_each(|chunk| { + chunk.par_iter().for_each(|ptr_val| { + self.hash_ptr_val_unsafe(ptr_val); }); }); } @@ -287,9 +281,9 @@ impl< } } - /// Safe version of `hash_ptr_val_unsafe` that doesn't hit a stack overflow - /// by precomputing the pointers that need to be hashed in order to hash the - /// provided `ptr` + /// Safe version of `hash_ptr_val_unsafe` that doesn't hit a stack overflow. + /// It precomputes the `IVal`s that need to be hashed in order to hash the + /// provided `ptr_val` pub fn hash_ptr_val(&self, ptr_val: &IVal) -> D { if self.is_below_safe_threshold() { // just run `hash_ptr_val_unsafe` for extra speed when the dehydrated @@ -362,15 +356,15 @@ impl< self.open(digest).is_some() } - /// `IPtr` equality w.r.t. their content-addressed versions + /// Pointer equality w.r.t. their content-addressed versions #[inline] pub fn ptr_eq(&self, a: &IPtr, b: &IPtr) -> bool { self.hash_ptr(a) == self.hash_ptr(b) } #[inline] - pub fn intern_atom(&self, tag: T, d: D) -> IPtr { - IPtr::new(tag, IVal::Atom(self.intern_digest(d).0)) + pub fn intern_atom(&self, tag: T, digest: D) -> IPtr { + IPtr::new(tag, IVal::Atom(self.intern_digest(digest))) } /// Creates an atom pointer from a ZPtr, with its hash. Hashing @@ -386,7 +380,7 @@ impl< self.inverse_z_cache .get(digest) .cloned() - .unwrap_or_else(|| IVal::Atom(self.intern_digest(*digest).0)) + .unwrap_or_else(|| IVal::Atom(self.intern_digest(*digest))) } /// Attempts to recover the `Ptr` from `inverse_z_cache`. If the mapping is