Skip to content

Commit

Permalink
chore: minor changes in StoreCore
Browse files Browse the repository at this point in the history
* Factor out common code for finalizing interning
* Make `intern_digest` just return `usize`
* Improve some docstrings
  • Loading branch information
arthurpaulino committed Apr 18, 2024
1 parent bb04f94 commit 4bd17be
Show file tree
Hide file tree
Showing 4 changed files with 56 additions and 56 deletions.
8 changes: 4 additions & 4 deletions src/lem/interpreter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -278,13 +278,13 @@ impl Block {
let g = *store.expect_f(g_idx);
let diff = f - g;
hints.bit_decomp.push(Some(SlotData {
vals: vec![Val::Num(IVal::Atom(store.intern_f(f + f).0))],
vals: vec![Val::Num(IVal::Atom(store.intern_f(f + f)))],
}));
hints.bit_decomp.push(Some(SlotData {
vals: vec![Val::Num(IVal::Atom(store.intern_f(g + g).0))],
vals: vec![Val::Num(IVal::Atom(store.intern_f(g + g)))],
}));
hints.bit_decomp.push(Some(SlotData {
vals: vec![Val::Num(IVal::Atom(store.intern_f(diff + diff).0))],
vals: vec![Val::Num(IVal::Atom(store.intern_f(diff + diff)))],
}));
let f = BaseNum::Scalar(f);
let g = BaseNum::Scalar(g);
Expand Down Expand Up @@ -466,7 +466,7 @@ impl Block {
tgt_secret.clone(),
store.intern_atom(Tag::Expr(Num), secret.0),
);
let secret_idx = store.intern_f(secret.0).0;
let secret_idx = store.intern_f(secret.0);
let vals = vec![Val::Num(IVal::Atom(secret_idx)), Val::Pointer(*ptr)];
hints.commitment.push(Some(SlotData { vals }));
}
Expand Down
6 changes: 6 additions & 0 deletions src/lem/pointers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,16 @@ impl<T, V> GPtr<T, V> {
/// Encoding for pointer children that are stored in index-based data structures
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum IVal {
/// Holds the index of leaf data
Atom(usize),
/// Holds the index of two children
Tuple2(usize),
/// Holds the index of three children
Tuple3(usize),
/// Holds the index of four children
Tuple4(usize),
/// Similar to `Tuple3`, but ignores the tags of the first and third children
/// for content-addressing
Compact(usize),
}

Expand Down
10 changes: 5 additions & 5 deletions src/lem/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -126,10 +126,10 @@ impl<F: LurkField> Default for Store<F> {
let hash6zeros = core.hasher.hash6(&[F::ZERO; 6]);
let hash8zeros = core.hasher.hash8(&[F::ZERO; 8]);

let (hash3zeros_idx, _) = core.intern_digest(FWrap(hash3zeros));
let (hash4zeros_idx, _) = core.intern_digest(FWrap(hash4zeros));
let (hash6zeros_idx, _) = core.intern_digest(FWrap(hash6zeros));
let (hash8zeros_idx, _) = core.intern_digest(FWrap(hash8zeros));
let hash3zeros_idx = core.intern_digest(FWrap(hash3zeros));
let hash4zeros_idx = core.intern_digest(FWrap(hash4zeros));
let hash6zeros_idx = core.intern_digest(FWrap(hash6zeros));
let hash8zeros_idx = core.intern_digest(FWrap(hash8zeros));

Self {
core,
Expand All @@ -149,7 +149,7 @@ impl<F: LurkField> Default for Store<F> {
// Handling to the core
impl<F: LurkField> Store<F> {
#[inline]
pub fn intern_f(&self, f: F) -> (usize, bool) {
pub fn intern_f(&self, f: F) -> usize {
self.core.intern_digest(FWrap(f))
}

Expand Down
88 changes: 41 additions & 47 deletions src/lem/store_core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ pub trait StoreHasher<T, D> {
fn hash_commitment(&self, secret: D, payload: GPtr<T, D>) -> D;
}

/// A data structure used to efficiently encode data as DAGs of tagged pointers
/// that can eventually be content-addressed by a custom hasher
/// Append-only threadsafe data structure used to efficiently encode data as DAGs
/// of tagged pointers that can be content-addressed by a custom hasher on demand
#[derive(Debug)]
pub struct StoreCore<T, D, H: StoreHasher<T, D>> {
/// Holds leaf (non-compound) data
Expand Down Expand Up @@ -47,8 +47,8 @@ pub struct StoreCore<T, D, H: StoreHasher<T, D>> {
}

impl<
T: PartialEq + std::cmp::Eq + std::hash::Hash,
D: PartialEq + std::cmp::Eq + std::hash::Hash,
T: PartialEq + Eq + std::hash::Hash,
D: PartialEq + Eq + std::hash::Hash,
H: StoreHasher<T, D> + Default,
> Default for StoreCore<T, D, H>
{
Expand All @@ -68,14 +68,14 @@ impl<
}

impl<
T: Copy + PartialEq + std::cmp::Eq + std::hash::Hash + Send + Sync,
D: Copy + PartialEq + std::cmp::Eq + std::hash::Hash + Send + Sync,
T: Copy + PartialEq + Eq + std::hash::Hash + Send + Sync,
D: Copy + PartialEq + Eq + std::hash::Hash + Send + Sync,
H: StoreHasher<T, D> + Sync,
> StoreCore<T, D, H>
{
#[inline]
pub fn intern_digest(&self, d: D) -> (usize, bool) {
self.atom.insert_probe(Box::new(d))
pub fn intern_digest(&self, digest: D) -> usize {
self.atom.insert_probe(Box::new(digest)).0
}

#[inline]
Expand All @@ -88,17 +88,26 @@ impl<
self.fetch_digest(idx).expect("Digest wasn't interned")
}

pub fn intern_tuple2(&self, ptrs: [IPtr<T>; 2], tag: T, digest: Option<D>) -> IPtr<T> {
let (idx, inserted) = self.tuple2.insert_probe(Box::new(ptrs));
let ptr = IPtr::new(tag, IVal::Tuple2(idx));
/// Performs the side-effects of interning then returns the pointer
fn finalize_interning(
&self,
tag: T,
ptr_val: IVal,
digest: Option<D>,
inserted: bool,
) -> IPtr<T> {
if let Some(digest) = digest {
let val = *ptr.val();
self.z_cache.insert(val, Box::new(digest));
self.inverse_z_cache.insert(digest, Box::new(val));
self.z_cache.insert(ptr_val, Box::new(digest));
self.inverse_z_cache.insert(digest, Box::new(ptr_val));
} else if inserted {
self.dehydrated.load().push(Box::new(*ptr.val()));
self.dehydrated.load().push(Box::new(ptr_val));
}
ptr
IPtr::new(tag, ptr_val)
}

pub fn intern_tuple2(&self, ptrs: [IPtr<T>; 2], tag: T, digest: Option<D>) -> IPtr<T> {
let (idx, inserted) = self.tuple2.insert_probe(Box::new(ptrs));
self.finalize_interning(tag, IVal::Tuple2(idx), digest, inserted)
}

#[inline]
Expand All @@ -119,19 +128,12 @@ impl<
compact: bool,
) -> IPtr<T> {
let (idx, inserted) = self.tuple3.insert_probe(Box::new(ptrs));
let ptr = if compact {
IPtr::new(tag, IVal::Compact(idx))
let ptr_val = if compact {
IVal::Compact(idx)
} else {
IPtr::new(tag, IVal::Tuple3(idx))
IVal::Tuple3(idx)
};
if let Some(digest) = digest {
let val = *ptr.val();
self.z_cache.insert(val, Box::new(digest));
self.inverse_z_cache.insert(digest, Box::new(val));
} else if inserted {
self.dehydrated.load().push(Box::new(*ptr.val()));
}
ptr
self.finalize_interning(tag, ptr_val, digest, inserted)
}

#[inline]
Expand All @@ -151,15 +153,7 @@ impl<

pub fn intern_tuple4(&self, ptrs: [IPtr<T>; 4], tag: T, digest: Option<D>) -> IPtr<T> {
let (idx, inserted) = self.tuple4.insert_probe(Box::new(ptrs));
let ptr = IPtr::new(tag, IVal::Tuple4(idx));
if let Some(digest) = digest {
let val = *ptr.val();
self.z_cache.insert(val, Box::new(digest));
self.inverse_z_cache.insert(digest, Box::new(val));
} else if inserted {
self.dehydrated.load().push(Box::new(*ptr.val()));
}
ptr
self.finalize_interning(tag, IVal::Tuple4(idx), digest, inserted)
}

#[inline]
Expand Down Expand Up @@ -259,10 +253,10 @@ impl<
/// limit in `hash_ptr_val_unsafe`. So we move in smaller chunks from left to
/// right, populating the `z_cache`, which can rescue `hash_ptr_val_unsafe`
/// from deep recursions
fn hydrate_z_cache_with_ptr_vals(&self, ptrs: &[&IVal]) {
ptrs.chunks(256).for_each(|chunk| {
chunk.par_iter().for_each(|ptr| {
self.hash_ptr_val_unsafe(ptr);
fn hydrate_z_cache_with_ptr_vals(&self, ptr_vals: &[&IVal]) {
ptr_vals.chunks(256).for_each(|chunk| {
chunk.par_iter().for_each(|ptr_val| {
self.hash_ptr_val_unsafe(ptr_val);
});
});
}
Expand All @@ -287,9 +281,9 @@ impl<
}
}

/// Safe version of `hash_ptr_val_unsafe` that doesn't hit a stack overflow
/// by precomputing the pointers that need to be hashed in order to hash the
/// provided `ptr`
/// Safe version of `hash_ptr_val_unsafe` that doesn't hit a stack overflow.
/// It precomputes the `IVal`s that need to be hashed in order to hash the
/// provided `ptr_val`
pub fn hash_ptr_val(&self, ptr_val: &IVal) -> D {
if self.is_below_safe_threshold() {
// just run `hash_ptr_val_unsafe` for extra speed when the dehydrated
Expand Down Expand Up @@ -362,15 +356,15 @@ impl<
self.open(digest).is_some()
}

/// `IPtr` equality w.r.t. their content-addressed versions
/// Pointer equality w.r.t. their content-addressed versions
#[inline]
pub fn ptr_eq(&self, a: &IPtr<T>, b: &IPtr<T>) -> bool {
self.hash_ptr(a) == self.hash_ptr(b)
}

#[inline]
pub fn intern_atom(&self, tag: T, d: D) -> IPtr<T> {
IPtr::new(tag, IVal::Atom(self.intern_digest(d).0))
pub fn intern_atom(&self, tag: T, digest: D) -> IPtr<T> {
IPtr::new(tag, IVal::Atom(self.intern_digest(digest)))
}

/// Creates an atom pointer from a ZPtr, with its hash. Hashing
Expand All @@ -386,7 +380,7 @@ impl<
self.inverse_z_cache
.get(digest)
.cloned()
.unwrap_or_else(|| IVal::Atom(self.intern_digest(*digest).0))
.unwrap_or_else(|| IVal::Atom(self.intern_digest(*digest)))
}

/// Attempts to recover the `Ptr` from `inverse_z_cache`. If the mapping is
Expand Down

0 comments on commit 4bd17be

Please sign in to comment.