From 139e24a11bfe0dc869903d52763feccab0487f91 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Wed, 18 Mar 2020 16:38:40 +0000 Subject: [PATCH 01/48] variants for output_pos linked list entries (head/tail/middle/unique) next and prev and Vec lmdb keys --- Cargo.lock | 1 + chain/Cargo.toml | 1 + chain/src/lib.rs | 3 ++ chain/src/store.rs | 98 +++++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 102 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 2a0e2b44a1..47a140e9d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -892,6 +892,7 @@ dependencies = [ "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", "croaring-mw 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "enum_primitive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)", "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/chain/Cargo.toml b/chain/Cargo.toml index 362c23fade..c4522fe80b 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -16,6 +16,7 @@ byteorder = "1" failure = "0.1" failure_derive = "0.1" croaring = { version = "0.4.5", package = "croaring-mw", features = ["compat"] } +enum_primitive = "0.1" log = "0.4" serde = "1" serde_derive = "1" diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 78da144bc4..261cffad90 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -23,6 +23,9 @@ #[macro_use] extern crate bitflags; +#[macro_use] +extern crate enum_primitive; + #[macro_use] extern crate serde_derive; #[macro_use] diff --git a/chain/src/store.rs b/chain/src/store.rs index 0dee43cd1e..bc5f3ddedf 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -18,10 +18,11 @@ use crate::core::consensus::HeaderInfo; use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::{Block, BlockHeader, BlockSums}; use crate::core::pow::Difficulty; -use crate::core::ser::ProtocolVersion; +use crate::core::ser::{self, ProtocolVersion, Readable, Reader, Writeable, Writer}; use crate::types::{CommitPos, Tip}; use crate::util::secp::pedersen::Commitment; use croaring::Bitmap; +use enum_primitive::FromPrimitive; use grin_store as store; use grin_store::{option_to_not_found, to_key, Error, SerIterator}; use std::convert::TryInto; @@ -384,6 +385,101 @@ impl<'a> Batch<'a> { } } +enum_from_primitive! { + #[derive(Copy, Clone, Debug, PartialEq)] + enum OutputPosVariant { + Unique = 0, + Head = 1, + Tail = 2, + Middle = 3, + } +} + +impl Writeable for OutputPosVariant { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + writer.write_u8(*self as u8) + } +} + +impl Readable for OutputPosVariant { + fn read(reader: &mut dyn Reader) -> Result { + OutputPosVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData) + } +} + +pub enum OutputPosEntry { + Unique { + pos: CommitPos, + }, + Head { + pos: CommitPos, + next: Vec, + }, + Tail { + pos: CommitPos, + prev: Vec, + }, + Middle { + pos: CommitPos, + next: Vec, + prev: Vec, + }, +} + +impl Writeable for OutputPosEntry { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + match self { + Self::Unique { pos } => { + OutputPosVariant::Unique.write(writer)?; + pos.write(writer)?; + } + Self::Head { pos, next } => { + OutputPosVariant::Head.write(writer)?; + pos.write(writer)?; + next.write(writer)?; + } + Self::Tail { pos, prev } => { + OutputPosVariant::Tail.write(writer)?; + pos.write(writer)?; + prev.write(writer)?; + } + Self::Middle { pos, next, prev } => { + OutputPosVariant::Middle.write(writer)?; + pos.write(writer)?; + next.write(writer)?; + prev.write(writer)?; + } + } + Ok(()) + } +} + +impl Readable for OutputPosEntry { + fn read(reader: &mut dyn Reader) -> Result { + let variant = OutputPosVariant::read(reader)?; + let entry = match variant { + OutputPosVariant::Unique => Self::Unique { + pos: CommitPos::read(reader)?, + }, + OutputPosVariant::Head => Self::Head { + pos: CommitPos::read(reader)?, + next: Vec::::read(reader)?, + }, + OutputPosVariant::Tail => Self::Tail { + pos: CommitPos::read(reader)?, + prev: Vec::::read(reader)?, + }, + OutputPosVariant::Middle => Self::Middle { + pos: CommitPos::read(reader)?, + next: Vec::::read(reader)?, + prev: Vec::::read(reader)?, + }, + }; + + Ok(entry) + } +} + /// An iterator on blocks, from latest to earliest, specialized to return /// information pertaining to block difficulty calculation (timestamp and /// previous difficulties). Mostly used by the consensus next difficulty From 978bf60e5b7e03c78a68583fe4d27091ea901279 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Wed, 18 Mar 2020 16:49:03 +0000 Subject: [PATCH 02/48] get_pos on enum --- chain/src/store.rs | 14 ++++++++++++++ chain/src/types.rs | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/chain/src/store.rs b/chain/src/store.rs index bc5f3ddedf..b7dab9b930 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -427,6 +427,7 @@ pub enum OutputPosEntry { } impl Writeable for OutputPosEntry { + /// Write first byte representing the variant, followed by variant specific data. fn write(&self, writer: &mut W) -> Result<(), ser::Error> { match self { Self::Unique { pos } => { @@ -455,6 +456,7 @@ impl Writeable for OutputPosEntry { } impl Readable for OutputPosEntry { + /// Read the first byte to determine what needs to be read beyond that. fn read(reader: &mut dyn Reader) -> Result { let variant = OutputPosVariant::read(reader)?; let entry = match variant { @@ -480,6 +482,18 @@ impl Readable for OutputPosEntry { } } +impl OutputPosEntry { + /// Read the common pos from the various enum variants. + fn get_pos(&self) -> CommitPos { + match self { + Self::Unique { pos } => *pos, + Self::Head { pos, .. } => *pos, + Self::Tail { pos, .. } => *pos, + Self::Middle { pos, .. } => *pos, + } + } +} + /// An iterator on blocks, from latest to earliest, specialized to return /// information pertaining to block difficulty calculation (timestamp and /// previous difficulties). Mostly used by the consensus next difficulty diff --git a/chain/src/types.rs b/chain/src/types.rs index 12e2d78b68..88d64064d8 100644 --- a/chain/src/types.rs +++ b/chain/src/types.rs @@ -303,7 +303,7 @@ impl OutputRoots { } /// Minimal struct representing a known MMR position and associated block height. -#[derive(Debug)] +#[derive(Clone, Copy, Debug)] pub struct CommitPos { /// MMR position pub pos: u64, From e3bd7a36a1928d166e3f1bc54befb66cb5a550e4 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Thu, 19 Mar 2020 16:11:53 +0000 Subject: [PATCH 03/48] break list and list entries out into separate enums --- chain/src/store.rs | 185 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 141 insertions(+), 44 deletions(-) diff --git a/chain/src/store.rs b/chain/src/store.rs index b7dab9b930..51032a1208 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -24,7 +24,7 @@ use crate::util::secp::pedersen::Commitment; use croaring::Bitmap; use enum_primitive::FromPrimitive; use grin_store as store; -use grin_store::{option_to_not_found, to_key, Error, SerIterator}; +use grin_store::{option_to_not_found, to_key, to_key_u64, Error, SerIterator}; use std::convert::TryInto; use std::sync::Arc; @@ -36,6 +36,8 @@ const HEAD_PREFIX: u8 = b'H'; const TAIL_PREFIX: u8 = b'T'; const HEADER_HEAD_PREFIX: u8 = b'G'; const OUTPUT_POS_PREFIX: u8 = b'p'; +const NEW_OUTPUT_POS_PREFIX: u8 = b'P'; + const BLOCK_INPUT_BITMAP_PREFIX: u8 = b'B'; const BLOCK_SUMS_PREFIX: u8 = b'M'; const BLOCK_SPENT_PREFIX: u8 = b'S'; @@ -387,42 +389,147 @@ impl<'a> Batch<'a> { enum_from_primitive! { #[derive(Copy, Clone, Debug, PartialEq)] - enum OutputPosVariant { + enum OutputPosListVariant { Unique = 0, - Head = 1, - Tail = 2, - Middle = 3, + Multi = 1, } } -impl Writeable for OutputPosVariant { +impl Writeable for OutputPosListVariant { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { writer.write_u8(*self as u8) } } -impl Readable for OutputPosVariant { - fn read(reader: &mut dyn Reader) -> Result { - OutputPosVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData) +impl Readable for OutputPosListVariant { + fn read(reader: &mut dyn Reader) -> Result { + OutputPosListVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData) + } +} + +enum_from_primitive! { + #[derive(Copy, Clone, Debug, PartialEq)] + enum OutputPosEntryVariant { + Head = 2, + Tail = 3, + Middle = 4, + } +} + +impl Writeable for OutputPosEntryVariant { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + writer.write_u8(*self as u8) + } +} + +impl Readable for OutputPosEntryVariant { + fn read(reader: &mut dyn Reader) -> Result { + OutputPosEntryVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData) + } +} + +pub enum OutputPosList { + Unique { pos: CommitPos }, + Multi { head: u64, tail: u64 }, +} + +impl Writeable for OutputPosList { + /// Write first byte representing the variant, followed by variant specific data. + /// "Unique" is optimized with embedded "pos". + /// "Multi" has references to "head" and "tail". + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + match self { + OutputPosList::Unique { pos } => { + OutputPosListVariant::Unique.write(writer)?; + pos.write(writer)?; + } + OutputPosList::Multi { head, tail } => { + OutputPosListVariant::Multi.write(writer)?; + writer.write_u64(*head)?; + writer.write_u64(*tail)?; + } + } + Ok(()) + } +} + +impl Readable for OutputPosList { + /// Read the first byte to determine what needs to be read beyond that. + fn read(reader: &mut dyn Reader) -> Result { + let entry = match OutputPosListVariant::read(reader)? { + OutputPosListVariant::Unique => OutputPosList::Unique { + pos: CommitPos::read(reader)?, + }, + OutputPosListVariant::Multi => OutputPosList::Multi { + head: reader.read_u64()?, + tail: reader.read_u64()?, + }, + }; + Ok(entry) } } +impl OutputPosList { + /// Returns either a "unique" with embedded "pos" or a "list" with "head" and "tail". + /// Key is "prefix|commit". + /// Note the key for an individual entry in the list is "prefix|commit|pos". + pub fn get_list(batch: &Batch<'_>, commit: Commitment) -> Result, Error> { + batch.db.get_ser(&to_key( + NEW_OUTPUT_POS_PREFIX, + &mut commit.as_ref().to_vec(), + )) + } + + /// Returns one of "head", "tail" or "middle" entry variants. + /// Key is "prefix|commit|pos". + pub fn get_entry( + batch: &Batch<'_>, + commit: Commitment, + pos: u64, + ) -> Result, Error> { + batch.db.get_ser(&to_key_u64( + NEW_OUTPUT_POS_PREFIX, + &mut commit.as_ref().to_vec(), + pos, + )) + } + + // pub fn push_entry(batch: &Batch<'_>, commit: Commitment, new_pos: CommitPos) -> Result<(), Error> { + // let current = OutputPosList::get_list(batch, commit)?; + // + // // turn current into old_current here, if head then create a middle etc. + // // let updated_current = + // match current { + // None => None, + // Some(OutputPosEntry::Unique{ pos }) => { + // OutputPosEntry::Tail{ pos, } + // }, + // Some(OutputPosEntry::Head{ pos, next, tail }) => { + // + // // let new_head = OutputPosEntry::Head { + // // pos: new_pos, + // // next: foo, + // // }; + // }, + // Some(_) => { panic!("should never happen"); } + // } + // Ok(()) + // } +} + pub enum OutputPosEntry { - Unique { - pos: CommitPos, - }, Head { pos: CommitPos, - next: Vec, + next: u64, }, Tail { pos: CommitPos, - prev: Vec, + prev: u64, }, Middle { pos: CommitPos, - next: Vec, - prev: Vec, + next: u64, + prev: u64, }, } @@ -430,25 +537,21 @@ impl Writeable for OutputPosEntry { /// Write first byte representing the variant, followed by variant specific data. fn write(&self, writer: &mut W) -> Result<(), ser::Error> { match self { - Self::Unique { pos } => { - OutputPosVariant::Unique.write(writer)?; + OutputPosEntry::Head { pos, next } => { + OutputPosEntryVariant::Head.write(writer)?; pos.write(writer)?; + writer.write_u64(*next)?; } - Self::Head { pos, next } => { - OutputPosVariant::Head.write(writer)?; + OutputPosEntry::Tail { pos, prev } => { + OutputPosEntryVariant::Tail.write(writer)?; pos.write(writer)?; - next.write(writer)?; + writer.write_u64(*prev)?; } - Self::Tail { pos, prev } => { - OutputPosVariant::Tail.write(writer)?; + OutputPosEntry::Middle { pos, next, prev } => { + OutputPosEntryVariant::Middle.write(writer)?; pos.write(writer)?; - prev.write(writer)?; - } - Self::Middle { pos, next, prev } => { - OutputPosVariant::Middle.write(writer)?; - pos.write(writer)?; - next.write(writer)?; - prev.write(writer)?; + writer.write_u64(*next)?; + writer.write_u64(*prev)?; } } Ok(()) @@ -458,26 +561,21 @@ impl Writeable for OutputPosEntry { impl Readable for OutputPosEntry { /// Read the first byte to determine what needs to be read beyond that. fn read(reader: &mut dyn Reader) -> Result { - let variant = OutputPosVariant::read(reader)?; - let entry = match variant { - OutputPosVariant::Unique => Self::Unique { + let entry = match OutputPosEntryVariant::read(reader)? { + OutputPosEntryVariant::Head => OutputPosEntry::Head { pos: CommitPos::read(reader)?, + next: reader.read_u64()?, }, - OutputPosVariant::Head => Self::Head { + OutputPosEntryVariant::Tail => OutputPosEntry::Tail { pos: CommitPos::read(reader)?, - next: Vec::::read(reader)?, + prev: reader.read_u64()?, }, - OutputPosVariant::Tail => Self::Tail { + OutputPosEntryVariant::Middle => OutputPosEntry::Middle { pos: CommitPos::read(reader)?, - prev: Vec::::read(reader)?, - }, - OutputPosVariant::Middle => Self::Middle { - pos: CommitPos::read(reader)?, - next: Vec::::read(reader)?, - prev: Vec::::read(reader)?, + next: reader.read_u64()?, + prev: reader.read_u64()?, }, }; - Ok(entry) } } @@ -486,7 +584,6 @@ impl OutputPosEntry { /// Read the common pos from the various enum variants. fn get_pos(&self) -> CommitPos { match self { - Self::Unique { pos } => *pos, Self::Head { pos, .. } => *pos, Self::Tail { pos, .. } => *pos, Self::Middle { pos, .. } => *pos, From cabbc44c261cc7696f07713dfe3532c1a5a72e1f Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Thu, 19 Mar 2020 21:21:10 +0000 Subject: [PATCH 04/48] track output features in the new output_pos index, so we can determine coinbase maturity --- api/src/handlers/utils.rs | 6 +-- chain/src/chain.rs | 4 +- chain/src/store.rs | 75 ++++++++++++++++++-------------- chain/src/txhashset/txhashset.rs | 18 +++++--- chain/src/types.rs | 34 ++++++++++++++- 5 files changed, 93 insertions(+), 44 deletions(-) diff --git a/api/src/handlers/utils.rs b/api/src/handlers/utils.rs index 7b6848ebba..c033fb5bba 100644 --- a/api/src/handlers/utils.rs +++ b/api/src/handlers/utils.rs @@ -13,7 +13,7 @@ // limitations under the License. use crate::chain; -use crate::chain::types::CommitPos; +use crate::chain::types::OutputPos; use crate::core::core::{OutputFeatures, OutputIdentifier}; use crate::rest::*; use crate::types::*; @@ -26,14 +26,14 @@ use std::sync::{Arc, Weak}; // boilerplate of dealing with `Weak`. pub fn w(weak: &Weak) -> Result, Error> { weak.upgrade() - .ok_or_else(|| ErrorKind::Internal("failed to upgrade weak refernce".to_owned()).into()) + .ok_or_else(|| ErrorKind::Internal("failed to upgrade weak reference".to_owned()).into()) } /// Internal function to retrieves an output by a given commitment fn get_unspent( chain: &Arc, id: &str, -) -> Result, Error> { +) -> Result, Error> { let c = util::from_hex(id) .map_err(|_| ErrorKind::Argument(format!("Not a valid commitment: {}", id)))?; let commit = Commitment::from_vec(c); diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 087c0b342b..443db7864c 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -30,7 +30,7 @@ use crate::store; use crate::txhashset; use crate::txhashset::{PMMRHandle, TxHashSet}; use crate::types::{ - BlockStatus, ChainAdapter, CommitPos, NoStatus, Options, Tip, TxHashsetWriteStatus, + BlockStatus, ChainAdapter, NoStatus, Options, OutputPos, Tip, TxHashsetWriteStatus, }; use crate::util::secp::pedersen::{Commitment, RangeProof}; use crate::util::RwLock; @@ -500,7 +500,7 @@ impl Chain { /// spent. This querying is done in a way that is consistent with the /// current chain state, specifically the current winning (valid, most /// work) fork. - pub fn get_unspent(&self, output_ref: &OutputIdentifier) -> Result, Error> { + pub fn get_unspent(&self, output_ref: &OutputIdentifier) -> Result, Error> { self.txhashset.read().get_unspent(output_ref) } diff --git a/chain/src/store.rs b/chain/src/store.rs index 51032a1208..7f273fadef 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -19,7 +19,7 @@ use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::{Block, BlockHeader, BlockSums}; use crate::core::pow::Difficulty; use crate::core::ser::{self, ProtocolVersion, Readable, Reader, Writeable, Writer}; -use crate::types::{CommitPos, Tip}; +use crate::types::{OutputPos, Tip}; use crate::util::secp::pedersen::Commitment; use croaring::Bitmap; use enum_primitive::FromPrimitive; @@ -209,7 +209,7 @@ impl<'a> Batch<'a> { /// We maintain a "spent" index for each full block to allow the output_pos /// to be easily reverted during rewind. - pub fn save_spent_index(&self, h: &Hash, spent: &Vec) -> Result<(), Error> { + pub fn save_spent_index(&self, h: &Hash, spent: &Vec) -> Result<(), Error> { self.db.put_ser(&to_key(BLOCK_SPENT_PREFIX, h)[..], spent)?; Ok(()) } @@ -360,7 +360,7 @@ impl<'a> Batch<'a> { /// Get the "spent index" from the db for the specified block. /// If we need to rewind a block then we use this to "unspend" the spent outputs. - pub fn get_spent_index(&self, bh: &Hash) -> Result, Error> { + pub fn get_spent_index(&self, bh: &Hash) -> Result, Error> { option_to_not_found(self.db.get_ser(&to_key(BLOCK_SPENT_PREFIX, bh)), || { format!("spent index: {}", bh) }) @@ -429,7 +429,7 @@ impl Readable for OutputPosEntryVariant { } pub enum OutputPosList { - Unique { pos: CommitPos }, + Unique { pos: OutputPos }, Multi { head: u64, tail: u64 }, } @@ -458,7 +458,7 @@ impl Readable for OutputPosList { fn read(reader: &mut dyn Reader) -> Result { let entry = match OutputPosListVariant::read(reader)? { OutputPosListVariant::Unique => OutputPosList::Unique { - pos: CommitPos::read(reader)?, + pos: OutputPos::read(reader)?, }, OutputPosListVariant::Multi => OutputPosList::Multi { head: reader.read_u64()?, @@ -494,40 +494,49 @@ impl OutputPosList { )) } - // pub fn push_entry(batch: &Batch<'_>, commit: Commitment, new_pos: CommitPos) -> Result<(), Error> { - // let current = OutputPosList::get_list(batch, commit)?; - // - // // turn current into old_current here, if head then create a middle etc. - // // let updated_current = - // match current { - // None => None, - // Some(OutputPosEntry::Unique{ pos }) => { - // OutputPosEntry::Tail{ pos, } - // }, - // Some(OutputPosEntry::Head{ pos, next, tail }) => { - // - // // let new_head = OutputPosEntry::Head { - // // pos: new_pos, - // // next: foo, - // // }; - // }, - // Some(_) => { panic!("should never happen"); } - // } - // Ok(()) - // } + pub fn push_entry( + batch: &Batch<'_>, + commit: Commitment, + new_pos: OutputPos, + ) -> Result<(), Error> { + match OutputPosList::get_list(batch, commit)? { + None => { + // create new "unique" and save to db + } + Some(OutputPosList::Unique { pos }) => { + // create tail based on current unique pos + // save tail to db + // create head based on new_pos + // save head to db + // save list itself with head and tail references + } + Some(OutputPosList::Multi { head, tail }) => { + // lookup entry for current head + // create new middle based on current head + // save new middle to db + // create new head based on new_pos + // save updated head to db + // save list itself with head and tail references + } + } + Ok(()) + } } +/// +/// TODO - OutputPos needs an OutputFeatures so we can reason about coinbase maturity. +/// pub enum OutputPosEntry { Head { - pos: CommitPos, + pos: OutputPos, next: u64, }, Tail { - pos: CommitPos, + pos: OutputPos, prev: u64, }, Middle { - pos: CommitPos, + pos: OutputPos, next: u64, prev: u64, }, @@ -563,15 +572,15 @@ impl Readable for OutputPosEntry { fn read(reader: &mut dyn Reader) -> Result { let entry = match OutputPosEntryVariant::read(reader)? { OutputPosEntryVariant::Head => OutputPosEntry::Head { - pos: CommitPos::read(reader)?, + pos: OutputPos::read(reader)?, next: reader.read_u64()?, }, OutputPosEntryVariant::Tail => OutputPosEntry::Tail { - pos: CommitPos::read(reader)?, + pos: OutputPos::read(reader)?, prev: reader.read_u64()?, }, OutputPosEntryVariant::Middle => OutputPosEntry::Middle { - pos: CommitPos::read(reader)?, + pos: OutputPos::read(reader)?, next: reader.read_u64()?, prev: reader.read_u64()?, }, @@ -582,7 +591,7 @@ impl Readable for OutputPosEntry { impl OutputPosEntry { /// Read the common pos from the various enum variants. - fn get_pos(&self) -> CommitPos { + fn get_pos(&self) -> OutputPos { match self { Self::Head { pos, .. } => *pos, Self::Tail { pos, .. } => *pos, diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index af7b099fc7..99074af66a 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -25,7 +25,7 @@ use crate::error::{Error, ErrorKind}; use crate::store::{Batch, ChainStore}; use crate::txhashset::bitmap_accumulator::BitmapAccumulator; use crate::txhashset::{RewindableKernelView, UTXOView}; -use crate::types::{CommitPos, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus}; +use crate::types::{OutputPos, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus}; use crate::util::secp::pedersen::{Commitment, RangeProof}; use crate::util::{file, secp_static, zip}; use croaring::Bitmap; @@ -217,7 +217,7 @@ impl TxHashSet { /// Check if an output is unspent. /// We look in the index to find the output MMR pos. /// Then we check the entry in the output MMR and confirm the hash matches. - pub fn get_unspent(&self, output_id: &OutputIdentifier) -> Result, Error> { + pub fn get_unspent(&self, output_id: &OutputIdentifier) -> Result, Error> { let commit = output_id.commit; match self.commit_index.get_output_pos_height(&commit) { Ok(Some((pos, height))) => { @@ -225,7 +225,11 @@ impl TxHashSet { ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); if let Some(out) = output_pmmr.get_data(pos) { if out == *output_id { - Ok(Some(CommitPos { pos, height })) + Ok(Some(OutputPos { + pos, + height, + features: output_id.features, + })) } else { Ok(None) } @@ -969,7 +973,7 @@ impl<'a> Extension<'a> { ) } - fn apply_input(&mut self, input: &Input, batch: &Batch<'_>) -> Result { + fn apply_input(&mut self, input: &Input, batch: &Batch<'_>) -> Result { let commit = input.commitment(); if let Some((pos, height)) = batch.get_output_pos_height(&commit)? { // First check this input corresponds to an existing entry in the output MMR. @@ -987,7 +991,11 @@ impl<'a> Extension<'a> { self.rproof_pmmr .prune(pos) .map_err(ErrorKind::TxHashSetErr)?; - Ok(CommitPos { pos, height }) + Ok(OutputPos { + pos, + height, + features: input.features, + }) } Ok(false) => Err(ErrorKind::AlreadySpent(commit).into()), Err(e) => Err(ErrorKind::TxHashSetErr(e).into()), diff --git a/chain/src/types.rs b/chain/src/types.rs index 88d64064d8..faeab2df7a 100644 --- a/chain/src/types.rs +++ b/chain/src/types.rs @@ -17,7 +17,7 @@ use chrono::prelude::{DateTime, Utc}; use crate::core::core::hash::{Hash, Hashed, ZERO_HASH}; -use crate::core::core::{Block, BlockHeader, HeaderVersion}; +use crate::core::core::{Block, BlockHeader, HeaderVersion, OutputFeatures}; use crate::core::pow::Difficulty; use crate::core::ser::{self, PMMRIndexHashable, Readable, Reader, Writeable, Writer}; use crate::error::{Error, ErrorKind}; @@ -327,6 +327,38 @@ impl Writeable for CommitPos { } } +#[derive(Clone, Copy, Debug)] +pub struct OutputPos { + /// MMR position + pub pos: u64, + /// Block height + pub height: u64, + /// Features + pub features: OutputFeatures, +} + +impl Readable for OutputPos { + fn read(reader: &mut dyn Reader) -> Result { + let pos = reader.read_u64()?; + let height = reader.read_u64()?; + let features = OutputFeatures::read(reader)?; + Ok(OutputPos { + pos, + height, + features, + }) + } +} + +impl Writeable for OutputPos { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + writer.write_u64(self.pos)?; + writer.write_u64(self.height)?; + self.features.write(writer)?; + Ok(()) + } +} + /// The tip of a fork. A handle to the fork ancestry from its leaf in the /// blockchain tree. References the max height and the latest and previous /// blocks From af889ffbb085b4e6c2988c9d816bd31aaa894876 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Fri, 20 Mar 2020 16:24:06 +0000 Subject: [PATCH 05/48] push entry impl for none and unique --- chain/src/store.rs | 62 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 46 insertions(+), 16 deletions(-) diff --git a/chain/src/store.rs b/chain/src/store.rs index 7f273fadef..223e02a437 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -480,6 +480,20 @@ impl OutputPosList { )) } + /// Oldest spendable instance if multiple duplicate unspent outputs exist. + /// Takes current height for coinbase maturity check. + /// A plain output is always spendable. + /// A coinbase output must have matured to be spendable. + /// This will normally be the "tail" of the output_pos list but not in all cases. + /// An plain output will take precedence over an older yet immature coinbase output. + pub fn get_spendable( + batch: &Batch<'_>, + commit: Commitment, + height: u64, + ) -> Result, Error> { + panic!("not yet implemented"); + } + /// Returns one of "head", "tail" or "middle" entry variants. /// Key is "prefix|commit|pos". pub fn get_entry( @@ -487,11 +501,15 @@ impl OutputPosList { commit: Commitment, pos: u64, ) -> Result, Error> { - batch.db.get_ser(&to_key_u64( - NEW_OUTPUT_POS_PREFIX, - &mut commit.as_ref().to_vec(), - pos, - )) + batch.db.get_ser(&Self::entry_key(commit, pos)) + } + + fn list_key(commit: Commitment) -> Vec { + to_key(NEW_OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec()) + } + + fn entry_key(commit: Commitment, pos: u64) -> Vec { + to_key_u64(NEW_OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec(), pos) } pub fn push_entry( @@ -499,16 +517,31 @@ impl OutputPosList { commit: Commitment, new_pos: OutputPos, ) -> Result<(), Error> { - match OutputPosList::get_list(batch, commit)? { + match Self::get_list(batch, commit)? { None => { - // create new "unique" and save to db + let list = Self::Unique { pos: new_pos }; + batch.db.put_ser(&Self::list_key(commit), &list)?; } - Some(OutputPosList::Unique { pos }) => { - // create tail based on current unique pos - // save tail to db - // create head based on new_pos - // save head to db - // save list itself with head and tail references + Some(OutputPosList::Unique { pos: current_pos }) => { + let head = OutputPosEntry::Head { + pos: new_pos, + next: current_pos.pos, + }; + let tail = OutputPosEntry::Tail { + pos: current_pos, + prev: new_pos.pos, + }; + let list = OutputPosList::Multi { + head: new_pos.pos, + tail: current_pos.pos, + }; + batch + .db + .put_ser(&Self::entry_key(commit, new_pos.pos), &head)?; + batch + .db + .put_ser(&Self::entry_key(commit, current_pos.pos), &tail)?; + batch.db.put_ser(&Self::list_key(commit), &list)?; } Some(OutputPosList::Multi { head, tail }) => { // lookup entry for current head @@ -523,9 +556,6 @@ impl OutputPosList { } } -/// -/// TODO - OutputPos needs an OutputFeatures so we can reason about coinbase maturity. -/// pub enum OutputPosEntry { Head { pos: OutputPos, From 3fd78ad85251880a665bcd85d3c188d70ba34e86 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Fri, 20 Mar 2020 21:53:00 +0000 Subject: [PATCH 06/48] some test coverage for output_pos_list --- chain/src/store.rs | 37 +++++++--- chain/src/types.rs | 2 +- chain/tests/store_output_pos_list.rs | 104 +++++++++++++++++++++++++++ store/src/lmdb.rs | 3 + 4 files changed, 137 insertions(+), 9 deletions(-) create mode 100644 chain/tests/store_output_pos_list.rs diff --git a/chain/src/store.rs b/chain/src/store.rs index 223e02a437..12d323e889 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -64,9 +64,7 @@ impl ChainStore { db: db_with_version, } } -} -impl ChainStore { /// The current chain head. pub fn head(&self) -> Result { option_to_not_found(self.db.get_ser(&[HEAD_PREFIX]), || "HEAD".to_owned()) @@ -428,6 +426,7 @@ impl Readable for OutputPosEntryVariant { } } +#[derive(Copy, Clone, Debug, PartialEq)] pub enum OutputPosList { Unique { pos: OutputPos }, Multi { head: u64, tail: u64 }, @@ -544,12 +543,34 @@ impl OutputPosList { batch.db.put_ser(&Self::list_key(commit), &list)?; } Some(OutputPosList::Multi { head, tail }) => { - // lookup entry for current head - // create new middle based on current head - // save new middle to db - // create new head based on new_pos - // save updated head to db - // save list itself with head and tail references + if let Some(OutputPosEntry::Head { + pos: current_pos, + next: current_next, + }) = Self::get_entry(batch, commit, head)? + { + let head = OutputPosEntry::Head { + pos: new_pos, + next: current_pos.pos, + }; + let middle = OutputPosEntry::Middle { + pos: current_pos, + next: current_next, + prev: new_pos.pos, + }; + let list = OutputPosList::Multi { + head: new_pos.pos, + tail, + }; + batch + .db + .put_ser(&Self::entry_key(commit, new_pos.pos), &head)?; + batch + .db + .put_ser(&Self::entry_key(commit, current_pos.pos), &middle)?; + batch.db.put_ser(&Self::list_key(commit), &list)?; + } else { + return Err(Error::OtherErr("expected head to be head variant".into())); + } } } Ok(()) diff --git a/chain/src/types.rs b/chain/src/types.rs index faeab2df7a..e92817ebbf 100644 --- a/chain/src/types.rs +++ b/chain/src/types.rs @@ -327,7 +327,7 @@ impl Writeable for CommitPos { } } -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, PartialEq)] pub struct OutputPos { /// MMR position pub pos: u64, diff --git a/chain/tests/store_output_pos_list.rs b/chain/tests/store_output_pos_list.rs new file mode 100644 index 0000000000..7c1c6cb817 --- /dev/null +++ b/chain/tests/store_output_pos_list.rs @@ -0,0 +1,104 @@ +// Copyright 2020 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use grin_chain as chain; +use grin_core as core; +use grin_util as util; + +use crate::chain::store::{ChainStore, OutputPosList}; +use crate::chain::types::OutputPos; +use crate::core::core::OutputFeatures; +use crate::util::secp::pedersen::Commitment; +mod chain_test_helper; + +use self::chain_test_helper::clean_output_dir; + +#[test] +fn test_store_output_pos_list() { + util::init_test_logger(); + + let chain_dir = ".grin_idx_1"; + clean_output_dir(chain_dir); + + let store = ChainStore::new(chain_dir).unwrap(); + + let batch = store.batch().unwrap(); + + let commit = Commitment::from_vec(vec![]); + + assert_eq!(OutputPosList::get_list(&batch, commit), Ok(None)); + + assert_eq!( + OutputPosList::push_entry( + &batch, + commit, + OutputPos { + pos: 1, + height: 1, + features: OutputFeatures::Plain, + }, + ), + Ok(()), + ); + + assert_eq!( + OutputPosList::get_list(&batch, commit), + Ok(Some(OutputPosList::Unique { + pos: OutputPos { + pos: 1, + height: 1, + features: OutputFeatures::Plain + } + })), + ); + + assert_eq!( + OutputPosList::push_entry( + &batch, + commit, + OutputPos { + pos: 2, + height: 2, + features: OutputFeatures::Plain, + }, + ), + Ok(()), + ); + + assert_eq!( + OutputPosList::get_list(&batch, commit), + Ok(Some(OutputPosList::Multi { head: 2, tail: 1 })), + ); + + assert_eq!( + OutputPosList::push_entry( + &batch, + commit, + OutputPos { + pos: 3, + height: 3, + features: OutputFeatures::Plain, + }, + ), + Ok(()), + ); + + assert_eq!( + OutputPosList::get_list(&batch, commit), + Ok(Some(OutputPosList::Multi { head: 3, tail: 1 })), + ); + + // Cleanup chain directory + clean_output_dir(chain_dir); +} diff --git a/store/src/lmdb.rs b/store/src/lmdb.rs index 96a43e06b1..57b5d8995a 100644 --- a/store/src/lmdb.rs +++ b/store/src/lmdb.rs @@ -47,6 +47,9 @@ pub enum Error { /// Wraps a serialization error for Writeable or Readable #[fail(display = "Serialization Error")] SerErr(String), + /// Other error + #[fail(display = "Other Error")] + OtherErr(String), } impl From for Error { From 34b4ed4d278be0ad949aec2dad8bdfbf79a30949 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 21 Mar 2020 20:41:35 +0000 Subject: [PATCH 07/48] commit --- chain/src/store.rs | 22 ++++++++++++++++++++++ chain/tests/store_output_pos_list.rs | 14 ++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/chain/src/store.rs b/chain/src/store.rs index 12d323e889..30f6fc86c0 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -575,6 +575,28 @@ impl OutputPosList { } Ok(()) } + + /// Pop the head of the list. + /// Returns the output_pos. + /// Returns None if list was empty. + pub fn pop_entry(batch: &Batch<'_>, commit: Commitment) -> Result, Error> { + match Self::get_list(batch, commit)? { + None => Ok(None), + Some(OutputPosList::Unique { pos }) => { + // TODO - delete the list itself. + + Ok(Some(pos)) + } + Some(OutputPosList::Multi { head, tail }) => { + // read head from db + // read next one + // update next to a head if it was a middle + // update list head + // update list to a unique if next is a tail + Ok(None) + } + } + } } pub enum OutputPosEntry { diff --git a/chain/tests/store_output_pos_list.rs b/chain/tests/store_output_pos_list.rs index 7c1c6cb817..8dd03db365 100644 --- a/chain/tests/store_output_pos_list.rs +++ b/chain/tests/store_output_pos_list.rs @@ -99,6 +99,20 @@ fn test_store_output_pos_list() { Ok(Some(OutputPosList::Multi { head: 3, tail: 1 })), ); + assert_eq!( + OutputPosList::pop_entry(&batch, commit,), + Ok(Some(OutputPos { + pos: 3, + height: 3, + features: OutputFeatures::Plain, + })), + ); + + assert_eq!( + OutputPosList::get_list(&batch, commit), + Ok(Some(OutputPosList::Multi { head: 2, tail: 1 })), + ); + // Cleanup chain directory clean_output_dir(chain_dir); } From 7841a3e7e5fa758f7970f554aa7baf1798e8b866 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Mon, 23 Mar 2020 11:10:13 +0000 Subject: [PATCH 08/48] wip - FooListEntry --- chain/src/store.rs | 319 ++++++++++++++++++++++++--------------------- 1 file changed, 173 insertions(+), 146 deletions(-) diff --git a/chain/src/store.rs b/chain/src/store.rs index 30f6fc86c0..c8ea226d97 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -387,63 +387,110 @@ impl<'a> Batch<'a> { enum_from_primitive! { #[derive(Copy, Clone, Debug, PartialEq)] - enum OutputPosListVariant { + enum LinkedListVariant { Unique = 0, Multi = 1, } } -impl Writeable for OutputPosListVariant { +impl Writeable for LinkedListVariant { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { writer.write_u8(*self as u8) } } -impl Readable for OutputPosListVariant { - fn read(reader: &mut dyn Reader) -> Result { - OutputPosListVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData) +impl Readable for LinkedListVariant { + fn read(reader: &mut dyn Reader) -> Result { + LinkedListVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData) } } enum_from_primitive! { #[derive(Copy, Clone, Debug, PartialEq)] - enum OutputPosEntryVariant { + enum ListEntryVariant { Head = 2, Tail = 3, Middle = 4, } } -impl Writeable for OutputPosEntryVariant { +impl Writeable for ListEntryVariant { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { writer.write_u8(*self as u8) } } -impl Readable for OutputPosEntryVariant { - fn read(reader: &mut dyn Reader) -> Result { - OutputPosEntryVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData) +impl Readable for ListEntryVariant { + fn read(reader: &mut dyn Reader) -> Result { + ListEntryVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData) } } +trait FooLinkedList { + /// List type + type List: Readable + Writeable; + + /// List entry type + type Entry: FooListEntry; + + fn list_key(commit: Commitment) -> Vec { + to_key(NEW_OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec()) + } + + fn entry_key(commit: Commitment, pos: u64) -> Vec { + to_key_u64(NEW_OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec(), pos) + } + + /// Returns either a "unique" with embedded "pos" or a "list" with "head" and "tail". + /// Key is "prefix|commit". + /// Note the key for an individual entry in the list is "prefix|commit|pos". + fn get_list(batch: &Batch<'_>, commit: Commitment) -> Result, Error> { + batch.db.get_ser(&Self::list_key(commit)) + } + + /// Returns one of "head", "tail" or "middle" entry variants. + /// Key is "prefix|commit|pos". + fn get_entry( + batch: &Batch<'_>, + commit: Commitment, + pos: u64, + ) -> Result, Error> { + batch.db.get_ser(&Self::entry_key(commit, pos)) + } + + fn push_entry( + batch: &Batch<'_>, + commit: Commitment, + new_pos: ::Pos, + ) -> Result<(), Error>; + + fn pop_entry( + batch: &Batch<'_>, + commit: Commitment, + ) -> Result::Pos>, Error>; +} + #[derive(Copy, Clone, Debug, PartialEq)] -pub enum OutputPosList { - Unique { pos: OutputPos }, +pub enum LinkedList { + Unique { pos: T }, Multi { head: u64, tail: u64 }, } -impl Writeable for OutputPosList { +impl Writeable for LinkedList +where + T: Writeable, +{ /// Write first byte representing the variant, followed by variant specific data. /// "Unique" is optimized with embedded "pos". /// "Multi" has references to "head" and "tail". fn write(&self, writer: &mut W) -> Result<(), ser::Error> { match self { - OutputPosList::Unique { pos } => { - OutputPosListVariant::Unique.write(writer)?; + LinkedList::Unique { pos } => { + LinkedListVariant::Unique.write(writer)?; pos.write(writer)?; } - OutputPosList::Multi { head, tail } => { - OutputPosListVariant::Multi.write(writer)?; + LinkedList::Multi { head, tail } => { + LinkedListVariant::Multi.write(writer)?; writer.write_u64(*head)?; writer.write_u64(*tail)?; } @@ -452,14 +499,17 @@ impl Writeable for OutputPosList { } } -impl Readable for OutputPosList { +impl Readable for LinkedList +where + T: Readable, +{ /// Read the first byte to determine what needs to be read beyond that. - fn read(reader: &mut dyn Reader) -> Result { - let entry = match OutputPosListVariant::read(reader)? { - OutputPosListVariant::Unique => OutputPosList::Unique { - pos: OutputPos::read(reader)?, + fn read(reader: &mut dyn Reader) -> Result, ser::Error> { + let entry = match LinkedListVariant::read(reader)? { + LinkedListVariant::Unique => LinkedList::Unique { + pos: T::read(reader)?, }, - OutputPosListVariant::Multi => OutputPosList::Multi { + LinkedListVariant::Multi => LinkedList::Multi { head: reader.read_u64()?, tail: reader.read_u64()?, }, @@ -468,105 +518,87 @@ impl Readable for OutputPosList { } } -impl OutputPosList { - /// Returns either a "unique" with embedded "pos" or a "list" with "head" and "tail". - /// Key is "prefix|commit". - /// Note the key for an individual entry in the list is "prefix|commit|pos". - pub fn get_list(batch: &Batch<'_>, commit: Commitment) -> Result, Error> { - batch.db.get_ser(&to_key( - NEW_OUTPUT_POS_PREFIX, - &mut commit.as_ref().to_vec(), - )) - } - - /// Oldest spendable instance if multiple duplicate unspent outputs exist. - /// Takes current height for coinbase maturity check. - /// A plain output is always spendable. - /// A coinbase output must have matured to be spendable. - /// This will normally be the "tail" of the output_pos list but not in all cases. - /// An plain output will take precedence over an older yet immature coinbase output. - pub fn get_spendable( - batch: &Batch<'_>, - commit: Commitment, - height: u64, - ) -> Result, Error> { - panic!("not yet implemented"); - } +impl FooLinkedList for LinkedList +where + T: PosEntry, +{ + type List = LinkedList; + type Entry = ListEntry; - /// Returns one of "head", "tail" or "middle" entry variants. - /// Key is "prefix|commit|pos". - pub fn get_entry( - batch: &Batch<'_>, - commit: Commitment, - pos: u64, - ) -> Result, Error> { - batch.db.get_ser(&Self::entry_key(commit, pos)) - } - - fn list_key(commit: Commitment) -> Vec { - to_key(NEW_OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec()) - } + /// Pop the head of the list. + /// Returns the output_pos. + /// Returns None if list was empty. + fn pop_entry(batch: &Batch<'_>, commit: Commitment) -> Result, Error> { + match Self::get_list(batch, commit)? { + None => Ok(None), + Some(LinkedList::Unique { pos }) => { + // TODO - delete the list itself. - fn entry_key(commit: Commitment, pos: u64) -> Vec { - to_key_u64(NEW_OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec(), pos) + Ok(Some(pos)) + } + Some(LinkedList::Multi { head, tail }) => { + // read head from db + // read next one + // update next to a head if it was a middle + // update list head + // update list to a unique if next is a tail + Ok(None) + } + } } - pub fn push_entry( - batch: &Batch<'_>, - commit: Commitment, - new_pos: OutputPos, - ) -> Result<(), Error> { + fn push_entry(batch: &Batch<'_>, commit: Commitment, new_pos: T) -> Result<(), Error> { match Self::get_list(batch, commit)? { None => { let list = Self::Unique { pos: new_pos }; batch.db.put_ser(&Self::list_key(commit), &list)?; } - Some(OutputPosList::Unique { pos: current_pos }) => { - let head = OutputPosEntry::Head { + Some(LinkedList::Unique { pos: current_pos }) => { + let head = ListEntry::Head { pos: new_pos, - next: current_pos.pos, + next: current_pos.pos(), }; - let tail = OutputPosEntry::Tail { + let tail = ListEntry::Tail { pos: current_pos, - prev: new_pos.pos, + prev: new_pos.pos(), }; - let list = OutputPosList::Multi { - head: new_pos.pos, - tail: current_pos.pos, + let list: LinkedList = LinkedList::Multi { + head: new_pos.pos(), + tail: current_pos.pos(), }; batch .db - .put_ser(&Self::entry_key(commit, new_pos.pos), &head)?; + .put_ser(&Self::entry_key(commit, new_pos.pos()), &head)?; batch .db - .put_ser(&Self::entry_key(commit, current_pos.pos), &tail)?; + .put_ser(&Self::entry_key(commit, current_pos.pos()), &tail)?; batch.db.put_ser(&Self::list_key(commit), &list)?; } - Some(OutputPosList::Multi { head, tail }) => { - if let Some(OutputPosEntry::Head { + Some(LinkedList::Multi { head, tail }) => { + if let Some(ListEntry::Head { pos: current_pos, next: current_next, }) = Self::get_entry(batch, commit, head)? { - let head = OutputPosEntry::Head { + let head = ListEntry::Head { pos: new_pos, - next: current_pos.pos, + next: current_pos.pos(), }; - let middle = OutputPosEntry::Middle { + let middle = ListEntry::Middle { pos: current_pos, next: current_next, - prev: new_pos.pos, + prev: new_pos.pos(), }; - let list = OutputPosList::Multi { - head: new_pos.pos, + let list: LinkedList = LinkedList::Multi { + head: new_pos.pos(), tail, }; batch .db - .put_ser(&Self::entry_key(commit, new_pos.pos), &head)?; + .put_ser(&Self::entry_key(commit, new_pos.pos()), &head)?; batch .db - .put_ser(&Self::entry_key(commit, current_pos.pos), &middle)?; + .put_ser(&Self::entry_key(commit, current_pos.pos()), &middle)?; batch.db.put_ser(&Self::list_key(commit), &list)?; } else { return Err(Error::OtherErr("expected head to be head variant".into())); @@ -575,62 +607,65 @@ impl OutputPosList { } Ok(()) } +} - /// Pop the head of the list. - /// Returns the output_pos. - /// Returns None if list was empty. - pub fn pop_entry(batch: &Batch<'_>, commit: Commitment) -> Result, Error> { - match Self::get_list(batch, commit)? { - None => Ok(None), - Some(OutputPosList::Unique { pos }) => { - // TODO - delete the list itself. +trait PosEntry: Readable + Writeable + Copy { + fn pos(&self) -> u64; +} - Ok(Some(pos)) - } - Some(OutputPosList::Multi { head, tail }) => { - // read head from db - // read next one - // update next to a head if it was a middle - // update list head - // update list to a unique if next is a tail - Ok(None) - } +impl PosEntry for OutputPos { + fn pos(&self) -> u64 { + self.pos + } +} + +trait FooListEntry: Readable + Writeable { + type Pos: PosEntry; + + fn get_pos(&self) -> Self::Pos; +} + +impl FooListEntry for ListEntry +where + T: PosEntry, +{ + type Pos = T; + + /// Read the common pos from the various enum variants. + fn get_pos(&self) -> Self::Pos { + match self { + Self::Head { pos, .. } => *pos, + Self::Tail { pos, .. } => *pos, + Self::Middle { pos, .. } => *pos, } } } -pub enum OutputPosEntry { - Head { - pos: OutputPos, - next: u64, - }, - Tail { - pos: OutputPos, - prev: u64, - }, - Middle { - pos: OutputPos, - next: u64, - prev: u64, - }, +pub enum ListEntry { + Head { pos: T, next: u64 }, + Tail { pos: T, prev: u64 }, + Middle { pos: T, next: u64, prev: u64 }, } -impl Writeable for OutputPosEntry { +impl Writeable for ListEntry +where + T: Writeable, +{ /// Write first byte representing the variant, followed by variant specific data. fn write(&self, writer: &mut W) -> Result<(), ser::Error> { match self { - OutputPosEntry::Head { pos, next } => { - OutputPosEntryVariant::Head.write(writer)?; + ListEntry::Head { pos, next } => { + ListEntryVariant::Head.write(writer)?; pos.write(writer)?; writer.write_u64(*next)?; } - OutputPosEntry::Tail { pos, prev } => { - OutputPosEntryVariant::Tail.write(writer)?; + ListEntry::Tail { pos, prev } => { + ListEntryVariant::Tail.write(writer)?; pos.write(writer)?; writer.write_u64(*prev)?; } - OutputPosEntry::Middle { pos, next, prev } => { - OutputPosEntryVariant::Middle.write(writer)?; + ListEntry::Middle { pos, next, prev } => { + ListEntryVariant::Middle.write(writer)?; pos.write(writer)?; writer.write_u64(*next)?; writer.write_u64(*prev)?; @@ -640,20 +675,23 @@ impl Writeable for OutputPosEntry { } } -impl Readable for OutputPosEntry { +impl Readable for ListEntry +where + T: Readable, +{ /// Read the first byte to determine what needs to be read beyond that. - fn read(reader: &mut dyn Reader) -> Result { - let entry = match OutputPosEntryVariant::read(reader)? { - OutputPosEntryVariant::Head => OutputPosEntry::Head { - pos: OutputPos::read(reader)?, + fn read(reader: &mut dyn Reader) -> Result, ser::Error> { + let entry = match ListEntryVariant::read(reader)? { + ListEntryVariant::Head => ListEntry::Head { + pos: T::read(reader)?, next: reader.read_u64()?, }, - OutputPosEntryVariant::Tail => OutputPosEntry::Tail { - pos: OutputPos::read(reader)?, + ListEntryVariant::Tail => ListEntry::Tail { + pos: T::read(reader)?, prev: reader.read_u64()?, }, - OutputPosEntryVariant::Middle => OutputPosEntry::Middle { - pos: OutputPos::read(reader)?, + ListEntryVariant::Middle => ListEntry::Middle { + pos: T::read(reader)?, next: reader.read_u64()?, prev: reader.read_u64()?, }, @@ -662,17 +700,6 @@ impl Readable for OutputPosEntry { } } -impl OutputPosEntry { - /// Read the common pos from the various enum variants. - fn get_pos(&self) -> OutputPos { - match self { - Self::Head { pos, .. } => *pos, - Self::Tail { pos, .. } => *pos, - Self::Middle { pos, .. } => *pos, - } - } -} - /// An iterator on blocks, from latest to earliest, specialized to return /// information pertaining to block difficulty calculation (timestamp and /// previous difficulties). Mostly used by the consensus next difficulty From 2f95e55f5a9c5e50c9b382d120ba769e5b4aae3e Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Mon, 23 Mar 2020 12:20:07 +0000 Subject: [PATCH 09/48] use instance of the index --- chain/src/store.rs | 82 +++++++++++++++++++--------- chain/tests/store_output_pos_list.rs | 32 ++++++----- 2 files changed, 72 insertions(+), 42 deletions(-) diff --git a/chain/src/store.rs b/chain/src/store.rs index c8ea226d97..e8594167f5 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -36,7 +36,8 @@ const HEAD_PREFIX: u8 = b'H'; const TAIL_PREFIX: u8 = b'T'; const HEADER_HEAD_PREFIX: u8 = b'G'; const OUTPUT_POS_PREFIX: u8 = b'p'; -const NEW_OUTPUT_POS_PREFIX: u8 = b'P'; +const NEW_PLAIN_OUTPUT_POS_PREFIX: u8 = b'P'; +const NEW_COINBASE_OUTPUT_POS_PREFIX: u8 = b'C'; const BLOCK_INPUT_BITMAP_PREFIX: u8 = b'B'; const BLOCK_SUMS_PREFIX: u8 = b'M'; @@ -426,45 +427,44 @@ impl Readable for ListEntryVariant { } } -trait FooLinkedList { +pub trait FooLinkedList { /// List type type List: Readable + Writeable; /// List entry type type Entry: FooListEntry; - fn list_key(commit: Commitment) -> Vec { - to_key(NEW_OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec()) - } + fn list_key(&self, commit: Commitment) -> Vec; - fn entry_key(commit: Commitment, pos: u64) -> Vec { - to_key_u64(NEW_OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec(), pos) - } + fn entry_key(&self, commit: Commitment, pos: u64) -> Vec; /// Returns either a "unique" with embedded "pos" or a "list" with "head" and "tail". /// Key is "prefix|commit". /// Note the key for an individual entry in the list is "prefix|commit|pos". - fn get_list(batch: &Batch<'_>, commit: Commitment) -> Result, Error> { - batch.db.get_ser(&Self::list_key(commit)) + fn get_list(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { + batch.db.get_ser(&self.list_key(commit)) } /// Returns one of "head", "tail" or "middle" entry variants. /// Key is "prefix|commit|pos". fn get_entry( + &self, batch: &Batch<'_>, commit: Commitment, pos: u64, ) -> Result, Error> { - batch.db.get_ser(&Self::entry_key(commit, pos)) + batch.db.get_ser(&self.entry_key(commit, pos)) } fn push_entry( + &self, batch: &Batch<'_>, commit: Commitment, new_pos: ::Pos, ) -> Result<(), Error>; fn pop_entry( + &self, batch: &Batch<'_>, commit: Commitment, ) -> Result::Pos>, Error>; @@ -518,18 +518,46 @@ where } } -impl FooLinkedList for LinkedList +pub struct MyLinkedList { + phantom: std::marker::PhantomData<*const T>, + prefix: u8, +} + +pub fn output_plain_index() -> MyLinkedList { + MyLinkedList { + phantom: std::marker::PhantomData, + prefix: NEW_PLAIN_OUTPUT_POS_PREFIX, + } +} + +pub fn output_coinbase_index() -> MyLinkedList { + MyLinkedList { + phantom: std::marker::PhantomData, + prefix: NEW_COINBASE_OUTPUT_POS_PREFIX, + } +} + +// TODO - We need a struct *and* an enum. The struct will handle the specific key prefixes etc. +impl FooLinkedList for MyLinkedList where T: PosEntry, { type List = LinkedList; type Entry = ListEntry; + fn list_key(&self, commit: Commitment) -> Vec { + to_key(self.prefix, &mut commit.as_ref().to_vec()) + } + + fn entry_key(&self, commit: Commitment, pos: u64) -> Vec { + to_key_u64(self.prefix, &mut commit.as_ref().to_vec(), pos) + } + /// Pop the head of the list. /// Returns the output_pos. /// Returns None if list was empty. - fn pop_entry(batch: &Batch<'_>, commit: Commitment) -> Result, Error> { - match Self::get_list(batch, commit)? { + fn pop_entry(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { + match self.get_list(batch, commit)? { None => Ok(None), Some(LinkedList::Unique { pos }) => { // TODO - delete the list itself. @@ -547,11 +575,11 @@ where } } - fn push_entry(batch: &Batch<'_>, commit: Commitment, new_pos: T) -> Result<(), Error> { - match Self::get_list(batch, commit)? { + fn push_entry(&self, batch: &Batch<'_>, commit: Commitment, new_pos: T) -> Result<(), Error> { + match self.get_list(batch, commit)? { None => { - let list = Self::Unique { pos: new_pos }; - batch.db.put_ser(&Self::list_key(commit), &list)?; + let list = LinkedList::Unique { pos: new_pos }; + batch.db.put_ser(&self.list_key(commit), &list)?; } Some(LinkedList::Unique { pos: current_pos }) => { let head = ListEntry::Head { @@ -568,17 +596,17 @@ where }; batch .db - .put_ser(&Self::entry_key(commit, new_pos.pos()), &head)?; + .put_ser(&self.entry_key(commit, new_pos.pos()), &head)?; batch .db - .put_ser(&Self::entry_key(commit, current_pos.pos()), &tail)?; - batch.db.put_ser(&Self::list_key(commit), &list)?; + .put_ser(&self.entry_key(commit, current_pos.pos()), &tail)?; + batch.db.put_ser(&self.list_key(commit), &list)?; } Some(LinkedList::Multi { head, tail }) => { if let Some(ListEntry::Head { pos: current_pos, next: current_next, - }) = Self::get_entry(batch, commit, head)? + }) = self.get_entry(batch, commit, head)? { let head = ListEntry::Head { pos: new_pos, @@ -595,11 +623,11 @@ where }; batch .db - .put_ser(&Self::entry_key(commit, new_pos.pos()), &head)?; + .put_ser(&self.entry_key(commit, new_pos.pos()), &head)?; batch .db - .put_ser(&Self::entry_key(commit, current_pos.pos()), &middle)?; - batch.db.put_ser(&Self::list_key(commit), &list)?; + .put_ser(&self.entry_key(commit, current_pos.pos()), &middle)?; + batch.db.put_ser(&self.list_key(commit), &list)?; } else { return Err(Error::OtherErr("expected head to be head variant".into())); } @@ -609,7 +637,7 @@ where } } -trait PosEntry: Readable + Writeable + Copy { +pub trait PosEntry: Readable + Writeable + Copy { fn pos(&self) -> u64; } @@ -619,7 +647,7 @@ impl PosEntry for OutputPos { } } -trait FooListEntry: Readable + Writeable { +pub trait FooListEntry: Readable + Writeable { type Pos: PosEntry; fn get_pos(&self) -> Self::Pos; diff --git a/chain/tests/store_output_pos_list.rs b/chain/tests/store_output_pos_list.rs index 8dd03db365..3927e7bb39 100644 --- a/chain/tests/store_output_pos_list.rs +++ b/chain/tests/store_output_pos_list.rs @@ -16,8 +16,8 @@ use grin_chain as chain; use grin_core as core; use grin_util as util; -use crate::chain::store::{ChainStore, OutputPosList}; -use crate::chain::types::OutputPos; +use crate::chain::store::{self, ChainStore, FooLinkedList, LinkedList}; +use crate::chain::types::{CommitPos, OutputPos}; use crate::core::core::OutputFeatures; use crate::util::secp::pedersen::Commitment; mod chain_test_helper; @@ -37,10 +37,12 @@ fn test_store_output_pos_list() { let commit = Commitment::from_vec(vec![]); - assert_eq!(OutputPosList::get_list(&batch, commit), Ok(None)); + let index = store::output_plain_index(); + + assert_eq!(index.get_list(&batch, commit), Ok(None)); assert_eq!( - OutputPosList::push_entry( + index.push_entry( &batch, commit, OutputPos { @@ -53,8 +55,8 @@ fn test_store_output_pos_list() { ); assert_eq!( - OutputPosList::get_list(&batch, commit), - Ok(Some(OutputPosList::Unique { + index.get_list(&batch, commit), + Ok(Some(LinkedList::Unique { pos: OutputPos { pos: 1, height: 1, @@ -64,7 +66,7 @@ fn test_store_output_pos_list() { ); assert_eq!( - OutputPosList::push_entry( + index.push_entry( &batch, commit, OutputPos { @@ -77,12 +79,12 @@ fn test_store_output_pos_list() { ); assert_eq!( - OutputPosList::get_list(&batch, commit), - Ok(Some(OutputPosList::Multi { head: 2, tail: 1 })), + index.get_list(&batch, commit), + Ok(Some(LinkedList::Multi { head: 2, tail: 1 })), ); assert_eq!( - OutputPosList::push_entry( + index.push_entry( &batch, commit, OutputPos { @@ -95,12 +97,12 @@ fn test_store_output_pos_list() { ); assert_eq!( - OutputPosList::get_list(&batch, commit), - Ok(Some(OutputPosList::Multi { head: 3, tail: 1 })), + index.get_list(&batch, commit), + Ok(Some(LinkedList::Multi { head: 3, tail: 1 })), ); assert_eq!( - OutputPosList::pop_entry(&batch, commit,), + index.pop_entry(&batch, commit,), Ok(Some(OutputPos { pos: 3, height: 3, @@ -109,8 +111,8 @@ fn test_store_output_pos_list() { ); assert_eq!( - OutputPosList::get_list(&batch, commit), - Ok(Some(OutputPosList::Multi { head: 2, tail: 1 })), + index.get_list(&batch, commit), + Ok(Some(LinkedList::Multi { head: 2, tail: 1 })), ); // Cleanup chain directory From f489e776575abe58c2fcde478bf1ec757e4626b0 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Mon, 23 Mar 2020 19:04:58 +0000 Subject: [PATCH 10/48] linked list of output_pos and commit_pos both now supported --- chain/src/store.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/chain/src/store.rs b/chain/src/store.rs index e8594167f5..ed33e9db35 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -19,7 +19,7 @@ use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::{Block, BlockHeader, BlockSums}; use crate::core::pow::Difficulty; use crate::core::ser::{self, ProtocolVersion, Readable, Reader, Writeable, Writer}; -use crate::types::{OutputPos, Tip}; +use crate::types::{CommitPos, OutputPos, Tip}; use crate::util::secp::pedersen::Commitment; use croaring::Bitmap; use enum_primitive::FromPrimitive; @@ -39,6 +39,8 @@ const OUTPUT_POS_PREFIX: u8 = b'p'; const NEW_PLAIN_OUTPUT_POS_PREFIX: u8 = b'P'; const NEW_COINBASE_OUTPUT_POS_PREFIX: u8 = b'C'; +const KERNEL_POS_PREFIX: u8 = b'K'; + const BLOCK_INPUT_BITMAP_PREFIX: u8 = b'B'; const BLOCK_SUMS_PREFIX: u8 = b'M'; const BLOCK_SPENT_PREFIX: u8 = b'S'; @@ -537,7 +539,13 @@ pub fn output_coinbase_index() -> MyLinkedList { } } -// TODO - We need a struct *and* an enum. The struct will handle the specific key prefixes etc. +pub fn kernel_index() -> MyLinkedList { + MyLinkedList { + phantom: std::marker::PhantomData, + prefix: KERNEL_POS_PREFIX, + } +} + impl FooLinkedList for MyLinkedList where T: PosEntry, From 9cbb55037b8124bb9b53f92808aecc4ac4d84c44 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Mon, 23 Mar 2020 20:46:32 +0000 Subject: [PATCH 11/48] linked_list --- chain/src/lib.rs | 1 + chain/src/linked_list.rs | 381 +++++++++++++++++++++++++++ chain/src/store.rs | 357 +------------------------ chain/tests/store_output_pos_list.rs | 5 +- 4 files changed, 390 insertions(+), 354 deletions(-) create mode 100644 chain/src/linked_list.rs diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 261cffad90..092b880387 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -38,6 +38,7 @@ use grin_util as util; mod chain; mod error; +pub mod linked_list; pub mod pipe; pub mod store; pub mod txhashset; diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs new file mode 100644 index 0000000000..f32cf6a3bd --- /dev/null +++ b/chain/src/linked_list.rs @@ -0,0 +1,381 @@ +// Copyright 2020 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implements storage primitives required by the chain + +use crate::core::consensus::HeaderInfo; +use crate::core::core::hash::{Hash, Hashed}; +use crate::core::core::{Block, BlockHeader, BlockSums}; +use crate::core::pow::Difficulty; +use crate::core::ser::{self, ProtocolVersion, Readable, Reader, Writeable, Writer}; +use crate::store::{ + Batch, KERNEL_POS_PREFIX, NEW_COINBASE_OUTPUT_POS_PREFIX, NEW_PLAIN_OUTPUT_POS_PREFIX, +}; +use crate::types::{CommitPos, OutputPos, Tip}; +use crate::util::secp::pedersen::Commitment; +use croaring::Bitmap; +use enum_primitive::FromPrimitive; +use grin_store as store; +use grin_store::{option_to_not_found, to_key, to_key_u64, Error, SerIterator}; +use std::convert::TryInto; +use std::marker::PhantomData; +use std::sync::Arc; + +enum_from_primitive! { + #[derive(Copy, Clone, Debug, PartialEq)] + enum LinkedListVariant { + Unique = 0, + Multi = 1, + } +} + +impl Writeable for LinkedListVariant { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + writer.write_u8(*self as u8) + } +} + +impl Readable for LinkedListVariant { + fn read(reader: &mut dyn Reader) -> Result { + LinkedListVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData) + } +} + +enum_from_primitive! { + #[derive(Copy, Clone, Debug, PartialEq)] + enum ListEntryVariant { + Head = 2, + Tail = 3, + Middle = 4, + } +} + +impl Writeable for ListEntryVariant { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + writer.write_u8(*self as u8) + } +} + +impl Readable for ListEntryVariant { + fn read(reader: &mut dyn Reader) -> Result { + ListEntryVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData) + } +} + +pub trait FooLinkedList { + /// List type + type List: Readable + Writeable; + + /// List entry type + type Entry: FooListEntry; + + fn list_key(&self, commit: Commitment) -> Vec; + + fn entry_key(&self, commit: Commitment, pos: u64) -> Vec; + + /// Returns either a "unique" with embedded "pos" or a "list" with "head" and "tail". + /// Key is "prefix|commit". + /// Note the key for an individual entry in the list is "prefix|commit|pos". + fn get_list(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { + batch.db.get_ser(&self.list_key(commit)) + } + + /// Returns one of "head", "tail" or "middle" entry variants. + /// Key is "prefix|commit|pos". + fn get_entry( + &self, + batch: &Batch<'_>, + commit: Commitment, + pos: u64, + ) -> Result, Error> { + batch.db.get_ser(&self.entry_key(commit, pos)) + } + + fn push_entry( + &self, + batch: &Batch<'_>, + commit: Commitment, + new_pos: ::Pos, + ) -> Result<(), Error>; + + fn pop_entry( + &self, + batch: &Batch<'_>, + commit: Commitment, + ) -> Result::Pos>, Error>; +} + +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum LinkedList { + Unique { pos: T }, + Multi { head: u64, tail: u64 }, +} + +impl Writeable for LinkedList +where + T: Writeable, +{ + /// Write first byte representing the variant, followed by variant specific data. + /// "Unique" is optimized with embedded "pos". + /// "Multi" has references to "head" and "tail". + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + match self { + LinkedList::Unique { pos } => { + LinkedListVariant::Unique.write(writer)?; + pos.write(writer)?; + } + LinkedList::Multi { head, tail } => { + LinkedListVariant::Multi.write(writer)?; + writer.write_u64(*head)?; + writer.write_u64(*tail)?; + } + } + Ok(()) + } +} + +impl Readable for LinkedList +where + T: Readable, +{ + /// Read the first byte to determine what needs to be read beyond that. + fn read(reader: &mut dyn Reader) -> Result, ser::Error> { + let entry = match LinkedListVariant::read(reader)? { + LinkedListVariant::Unique => LinkedList::Unique { + pos: T::read(reader)?, + }, + LinkedListVariant::Multi => LinkedList::Multi { + head: reader.read_u64()?, + tail: reader.read_u64()?, + }, + }; + Ok(entry) + } +} + +pub struct MyLinkedList { + phantom: PhantomData<*const T>, + prefix: u8, +} + +pub fn output_plain_index() -> MyLinkedList { + MyLinkedList { + phantom: PhantomData, + prefix: NEW_PLAIN_OUTPUT_POS_PREFIX, + } +} + +pub fn output_coinbase_index() -> MyLinkedList { + MyLinkedList { + phantom: PhantomData, + prefix: NEW_COINBASE_OUTPUT_POS_PREFIX, + } +} + +pub fn kernel_index() -> MyLinkedList { + MyLinkedList { + phantom: PhantomData, + prefix: KERNEL_POS_PREFIX, + } +} + +impl FooLinkedList for MyLinkedList +where + T: PosEntry, +{ + type List = LinkedList; + type Entry = ListEntry; + + fn list_key(&self, commit: Commitment) -> Vec { + to_key(self.prefix, &mut commit.as_ref().to_vec()) + } + + fn entry_key(&self, commit: Commitment, pos: u64) -> Vec { + to_key_u64(self.prefix, &mut commit.as_ref().to_vec(), pos) + } + + /// Pop the head of the list. + /// Returns the output_pos. + /// Returns None if list was empty. + fn pop_entry(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { + match self.get_list(batch, commit)? { + None => Ok(None), + Some(LinkedList::Unique { pos }) => { + // TODO - delete the list itself. + + Ok(Some(pos)) + } + Some(LinkedList::Multi { head, tail }) => { + // read head from db + // read next one + // update next to a head if it was a middle + // update list head + // update list to a unique if next is a tail + Ok(None) + } + } + } + + fn push_entry(&self, batch: &Batch<'_>, commit: Commitment, new_pos: T) -> Result<(), Error> { + match self.get_list(batch, commit)? { + None => { + let list = LinkedList::Unique { pos: new_pos }; + batch.db.put_ser(&self.list_key(commit), &list)?; + } + Some(LinkedList::Unique { pos: current_pos }) => { + let head = ListEntry::Head { + pos: new_pos, + next: current_pos.pos(), + }; + let tail = ListEntry::Tail { + pos: current_pos, + prev: new_pos.pos(), + }; + let list: LinkedList = LinkedList::Multi { + head: new_pos.pos(), + tail: current_pos.pos(), + }; + batch + .db + .put_ser(&self.entry_key(commit, new_pos.pos()), &head)?; + batch + .db + .put_ser(&self.entry_key(commit, current_pos.pos()), &tail)?; + batch.db.put_ser(&self.list_key(commit), &list)?; + } + Some(LinkedList::Multi { head, tail }) => { + if let Some(ListEntry::Head { + pos: current_pos, + next: current_next, + }) = self.get_entry(batch, commit, head)? + { + let head = ListEntry::Head { + pos: new_pos, + next: current_pos.pos(), + }; + let middle = ListEntry::Middle { + pos: current_pos, + next: current_next, + prev: new_pos.pos(), + }; + let list: LinkedList = LinkedList::Multi { + head: new_pos.pos(), + tail, + }; + batch + .db + .put_ser(&self.entry_key(commit, new_pos.pos()), &head)?; + batch + .db + .put_ser(&self.entry_key(commit, current_pos.pos()), &middle)?; + batch.db.put_ser(&self.list_key(commit), &list)?; + } else { + return Err(Error::OtherErr("expected head to be head variant".into())); + } + } + } + Ok(()) + } +} + +pub trait PosEntry: Readable + Writeable + Copy { + fn pos(&self) -> u64; +} + +impl PosEntry for OutputPos { + fn pos(&self) -> u64 { + self.pos + } +} + +pub trait FooListEntry: Readable + Writeable { + type Pos: PosEntry; + + fn get_pos(&self) -> Self::Pos; +} + +impl FooListEntry for ListEntry +where + T: PosEntry, +{ + type Pos = T; + + /// Read the common pos from the various enum variants. + fn get_pos(&self) -> Self::Pos { + match self { + Self::Head { pos, .. } => *pos, + Self::Tail { pos, .. } => *pos, + Self::Middle { pos, .. } => *pos, + } + } +} + +pub enum ListEntry { + Head { pos: T, next: u64 }, + Tail { pos: T, prev: u64 }, + Middle { pos: T, next: u64, prev: u64 }, +} + +impl Writeable for ListEntry +where + T: Writeable, +{ + /// Write first byte representing the variant, followed by variant specific data. + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + match self { + ListEntry::Head { pos, next } => { + ListEntryVariant::Head.write(writer)?; + pos.write(writer)?; + writer.write_u64(*next)?; + } + ListEntry::Tail { pos, prev } => { + ListEntryVariant::Tail.write(writer)?; + pos.write(writer)?; + writer.write_u64(*prev)?; + } + ListEntry::Middle { pos, next, prev } => { + ListEntryVariant::Middle.write(writer)?; + pos.write(writer)?; + writer.write_u64(*next)?; + writer.write_u64(*prev)?; + } + } + Ok(()) + } +} + +impl Readable for ListEntry +where + T: Readable, +{ + /// Read the first byte to determine what needs to be read beyond that. + fn read(reader: &mut dyn Reader) -> Result, ser::Error> { + let entry = match ListEntryVariant::read(reader)? { + ListEntryVariant::Head => ListEntry::Head { + pos: T::read(reader)?, + next: reader.read_u64()?, + }, + ListEntryVariant::Tail => ListEntry::Tail { + pos: T::read(reader)?, + prev: reader.read_u64()?, + }, + ListEntryVariant::Middle => ListEntry::Middle { + pos: T::read(reader)?, + next: reader.read_u64()?, + prev: reader.read_u64()?, + }, + }; + Ok(entry) + } +} diff --git a/chain/src/store.rs b/chain/src/store.rs index ed33e9db35..727c2bd9e7 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -26,6 +26,7 @@ use enum_primitive::FromPrimitive; use grin_store as store; use grin_store::{option_to_not_found, to_key, to_key_u64, Error, SerIterator}; use std::convert::TryInto; +use std::marker::PhantomData; use std::sync::Arc; const STORE_SUBPATH: &str = "chain"; @@ -36,10 +37,10 @@ const HEAD_PREFIX: u8 = b'H'; const TAIL_PREFIX: u8 = b'T'; const HEADER_HEAD_PREFIX: u8 = b'G'; const OUTPUT_POS_PREFIX: u8 = b'p'; -const NEW_PLAIN_OUTPUT_POS_PREFIX: u8 = b'P'; -const NEW_COINBASE_OUTPUT_POS_PREFIX: u8 = b'C'; -const KERNEL_POS_PREFIX: u8 = b'K'; +pub const NEW_PLAIN_OUTPUT_POS_PREFIX: u8 = b'P'; +pub const NEW_COINBASE_OUTPUT_POS_PREFIX: u8 = b'C'; +pub const KERNEL_POS_PREFIX: u8 = b'K'; const BLOCK_INPUT_BITMAP_PREFIX: u8 = b'B'; const BLOCK_SUMS_PREFIX: u8 = b'M'; @@ -148,7 +149,7 @@ impl ChainStore { /// An atomic batch in which all changes can be committed all at once or /// discarded on error. pub struct Batch<'a> { - db: store::Batch<'a>, + pub db: store::Batch<'a>, } impl<'a> Batch<'a> { @@ -388,354 +389,6 @@ impl<'a> Batch<'a> { } } -enum_from_primitive! { - #[derive(Copy, Clone, Debug, PartialEq)] - enum LinkedListVariant { - Unique = 0, - Multi = 1, - } -} - -impl Writeable for LinkedListVariant { - fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - writer.write_u8(*self as u8) - } -} - -impl Readable for LinkedListVariant { - fn read(reader: &mut dyn Reader) -> Result { - LinkedListVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData) - } -} - -enum_from_primitive! { - #[derive(Copy, Clone, Debug, PartialEq)] - enum ListEntryVariant { - Head = 2, - Tail = 3, - Middle = 4, - } -} - -impl Writeable for ListEntryVariant { - fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - writer.write_u8(*self as u8) - } -} - -impl Readable for ListEntryVariant { - fn read(reader: &mut dyn Reader) -> Result { - ListEntryVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData) - } -} - -pub trait FooLinkedList { - /// List type - type List: Readable + Writeable; - - /// List entry type - type Entry: FooListEntry; - - fn list_key(&self, commit: Commitment) -> Vec; - - fn entry_key(&self, commit: Commitment, pos: u64) -> Vec; - - /// Returns either a "unique" with embedded "pos" or a "list" with "head" and "tail". - /// Key is "prefix|commit". - /// Note the key for an individual entry in the list is "prefix|commit|pos". - fn get_list(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { - batch.db.get_ser(&self.list_key(commit)) - } - - /// Returns one of "head", "tail" or "middle" entry variants. - /// Key is "prefix|commit|pos". - fn get_entry( - &self, - batch: &Batch<'_>, - commit: Commitment, - pos: u64, - ) -> Result, Error> { - batch.db.get_ser(&self.entry_key(commit, pos)) - } - - fn push_entry( - &self, - batch: &Batch<'_>, - commit: Commitment, - new_pos: ::Pos, - ) -> Result<(), Error>; - - fn pop_entry( - &self, - batch: &Batch<'_>, - commit: Commitment, - ) -> Result::Pos>, Error>; -} - -#[derive(Copy, Clone, Debug, PartialEq)] -pub enum LinkedList { - Unique { pos: T }, - Multi { head: u64, tail: u64 }, -} - -impl Writeable for LinkedList -where - T: Writeable, -{ - /// Write first byte representing the variant, followed by variant specific data. - /// "Unique" is optimized with embedded "pos". - /// "Multi" has references to "head" and "tail". - fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - match self { - LinkedList::Unique { pos } => { - LinkedListVariant::Unique.write(writer)?; - pos.write(writer)?; - } - LinkedList::Multi { head, tail } => { - LinkedListVariant::Multi.write(writer)?; - writer.write_u64(*head)?; - writer.write_u64(*tail)?; - } - } - Ok(()) - } -} - -impl Readable for LinkedList -where - T: Readable, -{ - /// Read the first byte to determine what needs to be read beyond that. - fn read(reader: &mut dyn Reader) -> Result, ser::Error> { - let entry = match LinkedListVariant::read(reader)? { - LinkedListVariant::Unique => LinkedList::Unique { - pos: T::read(reader)?, - }, - LinkedListVariant::Multi => LinkedList::Multi { - head: reader.read_u64()?, - tail: reader.read_u64()?, - }, - }; - Ok(entry) - } -} - -pub struct MyLinkedList { - phantom: std::marker::PhantomData<*const T>, - prefix: u8, -} - -pub fn output_plain_index() -> MyLinkedList { - MyLinkedList { - phantom: std::marker::PhantomData, - prefix: NEW_PLAIN_OUTPUT_POS_PREFIX, - } -} - -pub fn output_coinbase_index() -> MyLinkedList { - MyLinkedList { - phantom: std::marker::PhantomData, - prefix: NEW_COINBASE_OUTPUT_POS_PREFIX, - } -} - -pub fn kernel_index() -> MyLinkedList { - MyLinkedList { - phantom: std::marker::PhantomData, - prefix: KERNEL_POS_PREFIX, - } -} - -impl FooLinkedList for MyLinkedList -where - T: PosEntry, -{ - type List = LinkedList; - type Entry = ListEntry; - - fn list_key(&self, commit: Commitment) -> Vec { - to_key(self.prefix, &mut commit.as_ref().to_vec()) - } - - fn entry_key(&self, commit: Commitment, pos: u64) -> Vec { - to_key_u64(self.prefix, &mut commit.as_ref().to_vec(), pos) - } - - /// Pop the head of the list. - /// Returns the output_pos. - /// Returns None if list was empty. - fn pop_entry(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { - match self.get_list(batch, commit)? { - None => Ok(None), - Some(LinkedList::Unique { pos }) => { - // TODO - delete the list itself. - - Ok(Some(pos)) - } - Some(LinkedList::Multi { head, tail }) => { - // read head from db - // read next one - // update next to a head if it was a middle - // update list head - // update list to a unique if next is a tail - Ok(None) - } - } - } - - fn push_entry(&self, batch: &Batch<'_>, commit: Commitment, new_pos: T) -> Result<(), Error> { - match self.get_list(batch, commit)? { - None => { - let list = LinkedList::Unique { pos: new_pos }; - batch.db.put_ser(&self.list_key(commit), &list)?; - } - Some(LinkedList::Unique { pos: current_pos }) => { - let head = ListEntry::Head { - pos: new_pos, - next: current_pos.pos(), - }; - let tail = ListEntry::Tail { - pos: current_pos, - prev: new_pos.pos(), - }; - let list: LinkedList = LinkedList::Multi { - head: new_pos.pos(), - tail: current_pos.pos(), - }; - batch - .db - .put_ser(&self.entry_key(commit, new_pos.pos()), &head)?; - batch - .db - .put_ser(&self.entry_key(commit, current_pos.pos()), &tail)?; - batch.db.put_ser(&self.list_key(commit), &list)?; - } - Some(LinkedList::Multi { head, tail }) => { - if let Some(ListEntry::Head { - pos: current_pos, - next: current_next, - }) = self.get_entry(batch, commit, head)? - { - let head = ListEntry::Head { - pos: new_pos, - next: current_pos.pos(), - }; - let middle = ListEntry::Middle { - pos: current_pos, - next: current_next, - prev: new_pos.pos(), - }; - let list: LinkedList = LinkedList::Multi { - head: new_pos.pos(), - tail, - }; - batch - .db - .put_ser(&self.entry_key(commit, new_pos.pos()), &head)?; - batch - .db - .put_ser(&self.entry_key(commit, current_pos.pos()), &middle)?; - batch.db.put_ser(&self.list_key(commit), &list)?; - } else { - return Err(Error::OtherErr("expected head to be head variant".into())); - } - } - } - Ok(()) - } -} - -pub trait PosEntry: Readable + Writeable + Copy { - fn pos(&self) -> u64; -} - -impl PosEntry for OutputPos { - fn pos(&self) -> u64 { - self.pos - } -} - -pub trait FooListEntry: Readable + Writeable { - type Pos: PosEntry; - - fn get_pos(&self) -> Self::Pos; -} - -impl FooListEntry for ListEntry -where - T: PosEntry, -{ - type Pos = T; - - /// Read the common pos from the various enum variants. - fn get_pos(&self) -> Self::Pos { - match self { - Self::Head { pos, .. } => *pos, - Self::Tail { pos, .. } => *pos, - Self::Middle { pos, .. } => *pos, - } - } -} - -pub enum ListEntry { - Head { pos: T, next: u64 }, - Tail { pos: T, prev: u64 }, - Middle { pos: T, next: u64, prev: u64 }, -} - -impl Writeable for ListEntry -where - T: Writeable, -{ - /// Write first byte representing the variant, followed by variant specific data. - fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - match self { - ListEntry::Head { pos, next } => { - ListEntryVariant::Head.write(writer)?; - pos.write(writer)?; - writer.write_u64(*next)?; - } - ListEntry::Tail { pos, prev } => { - ListEntryVariant::Tail.write(writer)?; - pos.write(writer)?; - writer.write_u64(*prev)?; - } - ListEntry::Middle { pos, next, prev } => { - ListEntryVariant::Middle.write(writer)?; - pos.write(writer)?; - writer.write_u64(*next)?; - writer.write_u64(*prev)?; - } - } - Ok(()) - } -} - -impl Readable for ListEntry -where - T: Readable, -{ - /// Read the first byte to determine what needs to be read beyond that. - fn read(reader: &mut dyn Reader) -> Result, ser::Error> { - let entry = match ListEntryVariant::read(reader)? { - ListEntryVariant::Head => ListEntry::Head { - pos: T::read(reader)?, - next: reader.read_u64()?, - }, - ListEntryVariant::Tail => ListEntry::Tail { - pos: T::read(reader)?, - prev: reader.read_u64()?, - }, - ListEntryVariant::Middle => ListEntry::Middle { - pos: T::read(reader)?, - next: reader.read_u64()?, - prev: reader.read_u64()?, - }, - }; - Ok(entry) - } -} - /// An iterator on blocks, from latest to earliest, specialized to return /// information pertaining to block difficulty calculation (timestamp and /// previous difficulties). Mostly used by the consensus next difficulty diff --git a/chain/tests/store_output_pos_list.rs b/chain/tests/store_output_pos_list.rs index 3927e7bb39..8a2cdca963 100644 --- a/chain/tests/store_output_pos_list.rs +++ b/chain/tests/store_output_pos_list.rs @@ -16,7 +16,8 @@ use grin_chain as chain; use grin_core as core; use grin_util as util; -use crate::chain::store::{self, ChainStore, FooLinkedList, LinkedList}; +use crate::chain::linked_list::{self, FooLinkedList, LinkedList}; +use crate::chain::store::{self, ChainStore}; use crate::chain::types::{CommitPos, OutputPos}; use crate::core::core::OutputFeatures; use crate::util::secp::pedersen::Commitment; @@ -37,7 +38,7 @@ fn test_store_output_pos_list() { let commit = Commitment::from_vec(vec![]); - let index = store::output_plain_index(); + let index = linked_list::output_plain_index(); assert_eq!(index.get_list(&batch, commit), Ok(None)); From c65f09d595b0672e0a9d0eb5859bc29ab3203752 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Fri, 17 Apr 2020 16:37:12 +0100 Subject: [PATCH 12/48] cleanup and rename --- chain/src/linked_list.rs | 97 ++++++++----------- chain/src/store.rs | 16 +-- chain/src/types.rs | 2 +- ..._pos_list.rs => store_kernel_pos_index.rs} | 56 +++-------- 4 files changed, 60 insertions(+), 111 deletions(-) rename chain/tests/{store_output_pos_list.rs => store_kernel_pos_index.rs} (64%) diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index f32cf6a3bd..0decd70663 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -14,41 +14,32 @@ //! Implements storage primitives required by the chain -use crate::core::consensus::HeaderInfo; -use crate::core::core::hash::{Hash, Hashed}; -use crate::core::core::{Block, BlockHeader, BlockSums}; -use crate::core::pow::Difficulty; -use crate::core::ser::{self, ProtocolVersion, Readable, Reader, Writeable, Writer}; -use crate::store::{ - Batch, KERNEL_POS_PREFIX, NEW_COINBASE_OUTPUT_POS_PREFIX, NEW_PLAIN_OUTPUT_POS_PREFIX, -}; -use crate::types::{CommitPos, OutputPos, Tip}; +use crate::core::ser::{self, Readable, Reader, Writeable, Writer}; +use crate::store::{Batch, COINBASE_KERNEL_POS_PREFIX}; +use crate::types::{CommitPos, OutputPos}; use crate::util::secp::pedersen::Commitment; -use croaring::Bitmap; use enum_primitive::FromPrimitive; use grin_store as store; -use grin_store::{option_to_not_found, to_key, to_key_u64, Error, SerIterator}; -use std::convert::TryInto; use std::marker::PhantomData; -use std::sync::Arc; +use store::{to_key, to_key_u64, Error}; enum_from_primitive! { #[derive(Copy, Clone, Debug, PartialEq)] - enum LinkedListVariant { + enum ListWrapperVariant { Unique = 0, Multi = 1, } } -impl Writeable for LinkedListVariant { +impl Writeable for ListWrapperVariant { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { writer.write_u8(*self as u8) } } -impl Readable for LinkedListVariant { - fn read(reader: &mut dyn Reader) -> Result { - LinkedListVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData) +impl Readable for ListWrapperVariant { + fn read(reader: &mut dyn Reader) -> Result { + ListWrapperVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData) } } @@ -73,7 +64,7 @@ impl Readable for ListEntryVariant { } } -pub trait FooLinkedList { +pub trait ListIndex { /// List type type List: Readable + Writeable; @@ -117,12 +108,12 @@ pub trait FooLinkedList { } #[derive(Copy, Clone, Debug, PartialEq)] -pub enum LinkedList { +pub enum ListWrapper { Unique { pos: T }, Multi { head: u64, tail: u64 }, } -impl Writeable for LinkedList +impl Writeable for ListWrapper where T: Writeable, { @@ -131,12 +122,12 @@ where /// "Multi" has references to "head" and "tail". fn write(&self, writer: &mut W) -> Result<(), ser::Error> { match self { - LinkedList::Unique { pos } => { - LinkedListVariant::Unique.write(writer)?; + ListWrapper::Unique { pos } => { + ListWrapperVariant::Unique.write(writer)?; pos.write(writer)?; } - LinkedList::Multi { head, tail } => { - LinkedListVariant::Multi.write(writer)?; + ListWrapper::Multi { head, tail } => { + ListWrapperVariant::Multi.write(writer)?; writer.write_u64(*head)?; writer.write_u64(*tail)?; } @@ -145,17 +136,17 @@ where } } -impl Readable for LinkedList +impl Readable for ListWrapper where T: Readable, { /// Read the first byte to determine what needs to be read beyond that. - fn read(reader: &mut dyn Reader) -> Result, ser::Error> { - let entry = match LinkedListVariant::read(reader)? { - LinkedListVariant::Unique => LinkedList::Unique { + fn read(reader: &mut dyn Reader) -> Result, ser::Error> { + let entry = match ListWrapperVariant::read(reader)? { + ListWrapperVariant::Unique => ListWrapper::Unique { pos: T::read(reader)?, }, - LinkedListVariant::Multi => LinkedList::Multi { + ListWrapperVariant::Multi => ListWrapper::Multi { head: reader.read_u64()?, tail: reader.read_u64()?, }, @@ -164,37 +155,25 @@ where } } -pub struct MyLinkedList { +pub struct MultiIndex { phantom: PhantomData<*const T>, prefix: u8, } -pub fn output_plain_index() -> MyLinkedList { - MyLinkedList { - phantom: PhantomData, - prefix: NEW_PLAIN_OUTPUT_POS_PREFIX, - } -} - -pub fn output_coinbase_index() -> MyLinkedList { - MyLinkedList { - phantom: PhantomData, - prefix: NEW_COINBASE_OUTPUT_POS_PREFIX, - } -} - -pub fn kernel_index() -> MyLinkedList { - MyLinkedList { - phantom: PhantomData, - prefix: KERNEL_POS_PREFIX, +impl MultiIndex { + pub fn init(prefix: u8) -> MultiIndex { + MultiIndex { + phantom: PhantomData, + prefix, + } } } -impl FooLinkedList for MyLinkedList +impl ListIndex for MultiIndex where T: PosEntry, { - type List = LinkedList; + type List = ListWrapper; type Entry = ListEntry; fn list_key(&self, commit: Commitment) -> Vec { @@ -211,12 +190,12 @@ where fn pop_entry(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { match self.get_list(batch, commit)? { None => Ok(None), - Some(LinkedList::Unique { pos }) => { + Some(ListWrapper::Unique { pos }) => { // TODO - delete the list itself. Ok(Some(pos)) } - Some(LinkedList::Multi { head, tail }) => { + Some(ListWrapper::Multi { head, tail }) => { // read head from db // read next one // update next to a head if it was a middle @@ -230,10 +209,10 @@ where fn push_entry(&self, batch: &Batch<'_>, commit: Commitment, new_pos: T) -> Result<(), Error> { match self.get_list(batch, commit)? { None => { - let list = LinkedList::Unique { pos: new_pos }; + let list = ListWrapper::Unique { pos: new_pos }; batch.db.put_ser(&self.list_key(commit), &list)?; } - Some(LinkedList::Unique { pos: current_pos }) => { + Some(ListWrapper::Unique { pos: current_pos }) => { let head = ListEntry::Head { pos: new_pos, next: current_pos.pos(), @@ -242,7 +221,7 @@ where pos: current_pos, prev: new_pos.pos(), }; - let list: LinkedList = LinkedList::Multi { + let list: ListWrapper = ListWrapper::Multi { head: new_pos.pos(), tail: current_pos.pos(), }; @@ -254,7 +233,7 @@ where .put_ser(&self.entry_key(commit, current_pos.pos()), &tail)?; batch.db.put_ser(&self.list_key(commit), &list)?; } - Some(LinkedList::Multi { head, tail }) => { + Some(ListWrapper::Multi { head, tail }) => { if let Some(ListEntry::Head { pos: current_pos, next: current_next, @@ -269,7 +248,7 @@ where next: current_next, prev: new_pos.pos(), }; - let list: LinkedList = LinkedList::Multi { + let list: ListWrapper = ListWrapper::Multi { head: new_pos.pos(), tail, }; @@ -293,7 +272,7 @@ pub trait PosEntry: Readable + Writeable + Copy { fn pos(&self) -> u64; } -impl PosEntry for OutputPos { +impl PosEntry for CommitPos { fn pos(&self) -> u64 { self.pos } diff --git a/chain/src/store.rs b/chain/src/store.rs index 727c2bd9e7..5357de47d5 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -18,15 +18,14 @@ use crate::core::consensus::HeaderInfo; use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::{Block, BlockHeader, BlockSums}; use crate::core::pow::Difficulty; -use crate::core::ser::{self, ProtocolVersion, Readable, Reader, Writeable, Writer}; +use crate::core::ser::ProtocolVersion; +use crate::linked_list::{ListIndex, MultiIndex}; use crate::types::{CommitPos, OutputPos, Tip}; use crate::util::secp::pedersen::Commitment; use croaring::Bitmap; -use enum_primitive::FromPrimitive; use grin_store as store; -use grin_store::{option_to_not_found, to_key, to_key_u64, Error, SerIterator}; +use grin_store::{option_to_not_found, to_key, Error, SerIterator}; use std::convert::TryInto; -use std::marker::PhantomData; use std::sync::Arc; const STORE_SUBPATH: &str = "chain"; @@ -38,9 +37,8 @@ const TAIL_PREFIX: u8 = b'T'; const HEADER_HEAD_PREFIX: u8 = b'G'; const OUTPUT_POS_PREFIX: u8 = b'p'; -pub const NEW_PLAIN_OUTPUT_POS_PREFIX: u8 = b'P'; -pub const NEW_COINBASE_OUTPUT_POS_PREFIX: u8 = b'C'; -pub const KERNEL_POS_PREFIX: u8 = b'K'; +// Proof of concept until we support NRD kernels. +pub const COINBASE_KERNEL_POS_PREFIX: u8 = b'K'; const BLOCK_INPUT_BITMAP_PREFIX: u8 = b'B'; const BLOCK_SUMS_PREFIX: u8 = b'M'; @@ -480,3 +478,7 @@ impl<'a> Iterator for DifficultyIter<'a> { } } } + +pub fn coinbase_kernel_index() -> MultiIndex { + MultiIndex::init(COINBASE_KERNEL_POS_PREFIX) +} diff --git a/chain/src/types.rs b/chain/src/types.rs index e92817ebbf..f5e157520d 100644 --- a/chain/src/types.rs +++ b/chain/src/types.rs @@ -303,7 +303,7 @@ impl OutputRoots { } /// Minimal struct representing a known MMR position and associated block height. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, PartialEq)] pub struct CommitPos { /// MMR position pub pos: u64, diff --git a/chain/tests/store_output_pos_list.rs b/chain/tests/store_kernel_pos_index.rs similarity index 64% rename from chain/tests/store_output_pos_list.rs rename to chain/tests/store_kernel_pos_index.rs index 8a2cdca963..399c3e141f 100644 --- a/chain/tests/store_output_pos_list.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -16,7 +16,7 @@ use grin_chain as chain; use grin_core as core; use grin_util as util; -use crate::chain::linked_list::{self, FooLinkedList, LinkedList}; +use crate::chain::linked_list::{self, ListIndex, ListWrapper}; use crate::chain::store::{self, ChainStore}; use crate::chain::types::{CommitPos, OutputPos}; use crate::core::core::OutputFeatures; @@ -26,7 +26,7 @@ mod chain_test_helper; use self::chain_test_helper::clean_output_dir; #[test] -fn test_store_output_pos_list() { +fn test_store_kernel_index() { util::init_test_logger(); let chain_dir = ".grin_idx_1"; @@ -38,82 +38,50 @@ fn test_store_output_pos_list() { let commit = Commitment::from_vec(vec![]); - let index = linked_list::output_plain_index(); + let index = store::coinbase_kernel_index(); assert_eq!(index.get_list(&batch, commit), Ok(None)); assert_eq!( - index.push_entry( - &batch, - commit, - OutputPos { - pos: 1, - height: 1, - features: OutputFeatures::Plain, - }, - ), + index.push_entry(&batch, commit, CommitPos { pos: 1, height: 1 },), Ok(()), ); assert_eq!( index.get_list(&batch, commit), - Ok(Some(LinkedList::Unique { - pos: OutputPos { - pos: 1, - height: 1, - features: OutputFeatures::Plain - } + Ok(Some(ListWrapper::Unique { + pos: CommitPos { pos: 1, height: 1 } })), ); assert_eq!( - index.push_entry( - &batch, - commit, - OutputPos { - pos: 2, - height: 2, - features: OutputFeatures::Plain, - }, - ), + index.push_entry(&batch, commit, CommitPos { pos: 2, height: 2 },), Ok(()), ); assert_eq!( index.get_list(&batch, commit), - Ok(Some(LinkedList::Multi { head: 2, tail: 1 })), + Ok(Some(ListWrapper::Multi { head: 2, tail: 1 })), ); assert_eq!( - index.push_entry( - &batch, - commit, - OutputPos { - pos: 3, - height: 3, - features: OutputFeatures::Plain, - }, - ), + index.push_entry(&batch, commit, CommitPos { pos: 3, height: 3 },), Ok(()), ); assert_eq!( index.get_list(&batch, commit), - Ok(Some(LinkedList::Multi { head: 3, tail: 1 })), + Ok(Some(ListWrapper::Multi { head: 3, tail: 1 })), ); assert_eq!( index.pop_entry(&batch, commit,), - Ok(Some(OutputPos { - pos: 3, - height: 3, - features: OutputFeatures::Plain, - })), + Ok(Some(CommitPos { pos: 3, height: 3 })), ); assert_eq!( index.get_list(&batch, commit), - Ok(Some(LinkedList::Multi { head: 2, tail: 1 })), + Ok(Some(ListWrapper::Multi { head: 2, tail: 1 })), ); // Cleanup chain directory From 61bd3200fb0d24818e36a308577229b1d4973b8e Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 18 Apr 2020 14:11:27 +0100 Subject: [PATCH 13/48] rename --- chain/src/linked_list.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index 0decd70663..c853358df9 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -69,7 +69,7 @@ pub trait ListIndex { type List: Readable + Writeable; /// List entry type - type Entry: FooListEntry; + type Entry: ListIndexEntry; fn list_key(&self, commit: Commitment) -> Vec; @@ -97,14 +97,14 @@ pub trait ListIndex { &self, batch: &Batch<'_>, commit: Commitment, - new_pos: ::Pos, + new_pos: ::Pos, ) -> Result<(), Error>; fn pop_entry( &self, batch: &Batch<'_>, commit: Commitment, - ) -> Result::Pos>, Error>; + ) -> Result::Pos>, Error>; } #[derive(Copy, Clone, Debug, PartialEq)] @@ -278,13 +278,13 @@ impl PosEntry for CommitPos { } } -pub trait FooListEntry: Readable + Writeable { +pub trait ListIndexEntry: Readable + Writeable { type Pos: PosEntry; fn get_pos(&self) -> Self::Pos; } -impl FooListEntry for ListEntry +impl ListIndexEntry for ListEntry where T: PosEntry, { From 5e44e2f061887a358752bafa9241c44841117a92 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 18 Apr 2020 14:43:20 +0100 Subject: [PATCH 14/48] peek_pos --- chain/src/linked_list.rs | 39 ++++++++++++++++++++++++--- chain/tests/store_kernel_pos_index.rs | 35 ++++++++++++++++++------ 2 files changed, 62 insertions(+), 12 deletions(-) diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index c853358df9..b88608fe6f 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -93,14 +93,20 @@ pub trait ListIndex { batch.db.get_ser(&self.entry_key(commit, pos)) } - fn push_entry( + fn push_pos( &self, batch: &Batch<'_>, commit: Commitment, new_pos: ::Pos, ) -> Result<(), Error>; - fn pop_entry( + fn pop_pos( + &self, + batch: &Batch<'_>, + commit: Commitment, + ) -> Result::Pos>, Error>; + + fn peek_pos( &self, batch: &Batch<'_>, commit: Commitment, @@ -184,10 +190,24 @@ where to_key_u64(self.prefix, &mut commit.as_ref().to_vec(), pos) } + fn peek_pos(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { + match self.get_list(batch, commit)? { + None => Ok(None), + Some(ListWrapper::Unique { pos }) => Ok(Some(pos)), + Some(ListWrapper::Multi { head, tail }) => { + if let Some(ListEntry::Head { pos, .. }) = self.get_entry(batch, commit, head)? { + Ok(Some(pos)) + } else { + Err(Error::OtherErr("expected head to be head variant".into())) + } + } + } + } + /// Pop the head of the list. /// Returns the output_pos. /// Returns None if list was empty. - fn pop_entry(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { + fn pop_pos(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { match self.get_list(batch, commit)? { None => Ok(None), Some(ListWrapper::Unique { pos }) => { @@ -201,12 +221,23 @@ where // update next to a head if it was a middle // update list head // update list to a unique if next is a tail + + if let Some(ListEntry::Head { + pos: current_pos, + next: current_next, + }) = self.get_entry(batch, commit, head)? + { + foo + } else { + Err(Error::OtherErr("expected head to be head variant".into())) + } + Ok(None) } } } - fn push_entry(&self, batch: &Batch<'_>, commit: Commitment, new_pos: T) -> Result<(), Error> { + fn push_pos(&self, batch: &Batch<'_>, commit: Commitment, new_pos: T) -> Result<(), Error> { match self.get_list(batch, commit)? { None => { let list = ListWrapper::Unique { pos: new_pos }; diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index 399c3e141f..4ca11a2a5f 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -32,21 +32,25 @@ fn test_store_kernel_index() { let chain_dir = ".grin_idx_1"; clean_output_dir(chain_dir); - let store = ChainStore::new(chain_dir).unwrap(); - - let batch = store.batch().unwrap(); - let commit = Commitment::from_vec(vec![]); + let store = ChainStore::new(chain_dir).unwrap(); + let batch = store.batch().unwrap(); let index = store::coinbase_kernel_index(); + assert_eq!(index.peek_pos(&batch, commit), Ok(None)); assert_eq!(index.get_list(&batch, commit), Ok(None)); assert_eq!( - index.push_entry(&batch, commit, CommitPos { pos: 1, height: 1 },), + index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 },), Ok(()), ); + assert_eq!( + index.peek_pos(&batch, commit), + Ok(Some(CommitPos { pos: 1, height: 1 })), + ); + assert_eq!( index.get_list(&batch, commit), Ok(Some(ListWrapper::Unique { @@ -55,30 +59,45 @@ fn test_store_kernel_index() { ); assert_eq!( - index.push_entry(&batch, commit, CommitPos { pos: 2, height: 2 },), + index.push_pos(&batch, commit, CommitPos { pos: 2, height: 2 },), Ok(()), ); + assert_eq!( + index.peek_pos(&batch, commit), + Ok(Some(CommitPos { pos: 2, height: 2 })), + ); + assert_eq!( index.get_list(&batch, commit), Ok(Some(ListWrapper::Multi { head: 2, tail: 1 })), ); assert_eq!( - index.push_entry(&batch, commit, CommitPos { pos: 3, height: 3 },), + index.push_pos(&batch, commit, CommitPos { pos: 3, height: 3 },), Ok(()), ); + assert_eq!( + index.peek_pos(&batch, commit), + Ok(Some(CommitPos { pos: 3, height: 3 })), + ); + assert_eq!( index.get_list(&batch, commit), Ok(Some(ListWrapper::Multi { head: 3, tail: 1 })), ); assert_eq!( - index.pop_entry(&batch, commit,), + index.pop_pos(&batch, commit,), Ok(Some(CommitPos { pos: 3, height: 3 })), ); + assert_eq!( + index.peek_pos(&batch, commit), + Ok(Some(CommitPos { pos: 2, height: 2 })), + ); + assert_eq!( index.get_list(&batch, commit), Ok(Some(ListWrapper::Multi { head: 2, tail: 1 })), From 82e3c99406e7430e4271ac47fd699c1a116f12f7 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 18 Apr 2020 17:29:14 +0100 Subject: [PATCH 15/48] push some, peek some, pop some --- chain/src/linked_list.rs | 57 ++++++++++++++++----------- chain/tests/store_kernel_pos_index.rs | 30 +++++++++++++- 2 files changed, 63 insertions(+), 24 deletions(-) diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index b88608fe6f..16c09475ee 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -26,7 +26,7 @@ use store::{to_key, to_key_u64, Error}; enum_from_primitive! { #[derive(Copy, Clone, Debug, PartialEq)] enum ListWrapperVariant { - Unique = 0, + Single = 0, Multi = 1, } } @@ -75,7 +75,7 @@ pub trait ListIndex { fn entry_key(&self, commit: Commitment, pos: u64) -> Vec; - /// Returns either a "unique" with embedded "pos" or a "list" with "head" and "tail". + /// Returns either a "Single" with embedded "pos" or a "list" with "head" and "tail". /// Key is "prefix|commit". /// Note the key for an individual entry in the list is "prefix|commit|pos". fn get_list(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { @@ -115,7 +115,7 @@ pub trait ListIndex { #[derive(Copy, Clone, Debug, PartialEq)] pub enum ListWrapper { - Unique { pos: T }, + Single { pos: T }, Multi { head: u64, tail: u64 }, } @@ -124,12 +124,12 @@ where T: Writeable, { /// Write first byte representing the variant, followed by variant specific data. - /// "Unique" is optimized with embedded "pos". + /// "Single" is optimized with embedded "pos". /// "Multi" has references to "head" and "tail". fn write(&self, writer: &mut W) -> Result<(), ser::Error> { match self { - ListWrapper::Unique { pos } => { - ListWrapperVariant::Unique.write(writer)?; + ListWrapper::Single { pos } => { + ListWrapperVariant::Single.write(writer)?; pos.write(writer)?; } ListWrapper::Multi { head, tail } => { @@ -149,7 +149,7 @@ where /// Read the first byte to determine what needs to be read beyond that. fn read(reader: &mut dyn Reader) -> Result, ser::Error> { let entry = match ListWrapperVariant::read(reader)? { - ListWrapperVariant::Unique => ListWrapper::Unique { + ListWrapperVariant::Single => ListWrapper::Single { pos: T::read(reader)?, }, ListWrapperVariant::Multi => ListWrapper::Multi { @@ -193,7 +193,7 @@ where fn peek_pos(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { match self.get_list(batch, commit)? { None => Ok(None), - Some(ListWrapper::Unique { pos }) => Ok(Some(pos)), + Some(ListWrapper::Single { pos }) => Ok(Some(pos)), Some(ListWrapper::Multi { head, tail }) => { if let Some(ListEntry::Head { pos, .. }) = self.get_entry(batch, commit, head)? { Ok(Some(pos)) @@ -210,29 +210,42 @@ where fn pop_pos(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { match self.get_list(batch, commit)? { None => Ok(None), - Some(ListWrapper::Unique { pos }) => { - // TODO - delete the list itself. - + Some(ListWrapper::Single { pos }) => { + batch.delete(&self.list_key(commit))?; Ok(Some(pos)) } Some(ListWrapper::Multi { head, tail }) => { - // read head from db - // read next one - // update next to a head if it was a middle - // update list head - // update list to a unique if next is a tail - if let Some(ListEntry::Head { pos: current_pos, next: current_next, }) = self.get_entry(batch, commit, head)? { - foo + match self.get_entry(batch, commit, current_next)? { + Some(ListEntry::Middle { pos, next, prev }) => { + let head = ListEntry::Head { pos, next }; + let list: ListWrapper = ListWrapper::Multi { + head: pos.pos(), + tail, + }; + batch.delete(&self.entry_key(commit, current_pos.pos()))?; + batch + .db + .put_ser(&self.entry_key(commit, pos.pos()), &head)?; + batch.db.put_ser(&self.list_key(commit), &list)?; + Ok(Some(current_pos)) + } + Some(ListEntry::Tail { pos, prev }) => { + let list = ListWrapper::Single { pos }; + batch.delete(&self.entry_key(commit, current_pos.pos()))?; + batch.db.put_ser(&self.list_key(commit), &list)?; + Ok(Some(current_pos)) + } + Some(_) => Err(Error::OtherErr("next was unexpected".into())), + None => Err(Error::OtherErr("next missing".into())), + } } else { Err(Error::OtherErr("expected head to be head variant".into())) } - - Ok(None) } } } @@ -240,10 +253,10 @@ where fn push_pos(&self, batch: &Batch<'_>, commit: Commitment, new_pos: T) -> Result<(), Error> { match self.get_list(batch, commit)? { None => { - let list = ListWrapper::Unique { pos: new_pos }; + let list = ListWrapper::Single { pos: new_pos }; batch.db.put_ser(&self.list_key(commit), &list)?; } - Some(ListWrapper::Unique { pos: current_pos }) => { + Some(ListWrapper::Single { pos: current_pos }) => { let head = ListEntry::Head { pos: new_pos, next: current_pos.pos(), diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index 4ca11a2a5f..efe10aaedf 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -53,7 +53,7 @@ fn test_store_kernel_index() { assert_eq!( index.get_list(&batch, commit), - Ok(Some(ListWrapper::Unique { + Ok(Some(ListWrapper::Single { pos: CommitPos { pos: 1, height: 1 } })), ); @@ -89,7 +89,7 @@ fn test_store_kernel_index() { ); assert_eq!( - index.pop_pos(&batch, commit,), + index.pop_pos(&batch, commit), Ok(Some(CommitPos { pos: 3, height: 3 })), ); @@ -103,6 +103,32 @@ fn test_store_kernel_index() { Ok(Some(ListWrapper::Multi { head: 2, tail: 1 })), ); + assert_eq!( + index.pop_pos(&batch, commit), + Ok(Some(CommitPos { pos: 2, height: 2 })), + ); + + assert_eq!( + index.peek_pos(&batch, commit), + Ok(Some(CommitPos { pos: 1, height: 1 })), + ); + + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Single { + pos: CommitPos { pos: 1, height: 1 } + })), + ); + + assert_eq!( + index.pop_pos(&batch, commit), + Ok(Some(CommitPos { pos: 1, height: 1 })), + ); + + assert_eq!(index.peek_pos(&batch, commit), Ok(None),); + + assert_eq!(index.get_list(&batch, commit), Ok(None),); + // Cleanup chain directory clean_output_dir(chain_dir); } From 536b8edeeb95880df01b16199aca3ebbce981bbe Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 18 Apr 2020 17:44:42 +0100 Subject: [PATCH 16/48] cleanup --- chain/src/linked_list.rs | 36 ++++++++++++++++++----- chain/src/store.rs | 2 +- chain/tests/store_kernel_pos_index.rs | 41 +++++++++++++++++++++++---- 3 files changed, 65 insertions(+), 14 deletions(-) diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index 16c09475ee..a259903fbb 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -15,8 +15,8 @@ //! Implements storage primitives required by the chain use crate::core::ser::{self, Readable, Reader, Writeable, Writer}; -use crate::store::{Batch, COINBASE_KERNEL_POS_PREFIX}; -use crate::types::{CommitPos, OutputPos}; +use crate::store::Batch; +use crate::types::CommitPos; use crate::util::secp::pedersen::Commitment; use enum_primitive::FromPrimitive; use grin_store as store; @@ -64,6 +64,8 @@ impl Readable for ListEntryVariant { } } +/// Index supporting a list of (duplicate) entries per commitment. +/// Each entry will be at a unique MMR pos. pub trait ListIndex { /// List type type List: Readable + Writeable; @@ -71,8 +73,10 @@ pub trait ListIndex { /// List entry type type Entry: ListIndexEntry; + /// Construct a key for the list. fn list_key(&self, commit: Commitment) -> Vec; + /// Construct a key for an individual entry in the list. fn entry_key(&self, commit: Commitment, pos: u64) -> Vec; /// Returns either a "Single" with embedded "pos" or a "list" with "head" and "tail". @@ -93,6 +97,7 @@ pub trait ListIndex { batch.db.get_ser(&self.entry_key(commit, pos)) } + /// Push a pos onto the list for the specified commitment. fn push_pos( &self, batch: &Batch<'_>, @@ -100,12 +105,14 @@ pub trait ListIndex { new_pos: ::Pos, ) -> Result<(), Error>; + /// Pop a pos off the list for the specified commitment. fn pop_pos( &self, batch: &Batch<'_>, commit: Commitment, ) -> Result::Pos>, Error>; + /// Peek the head of the list for the specified commitment. fn peek_pos( &self, batch: &Batch<'_>, @@ -113,10 +120,24 @@ pub trait ListIndex { ) -> Result::Pos>, Error>; } +/// Wrapper for the list to handle either `Single` or `Multi` entries. +/// Optimized for the common case where we have a single entry in the list. #[derive(Copy, Clone, Debug, PartialEq)] pub enum ListWrapper { - Single { pos: T }, - Multi { head: u64, tail: u64 }, + /// List with a single entry. + /// Allows direct access to the pos. + Single { + /// The MMR pos where this single entry is located. + pos: T, + }, + /// List with multiple entries. + /// Maintains head and tail of the underlying linked list. + Multi { + /// Head of the linked list. + head: u64, + /// Tail of the linked list. + tail: u64, + }, } impl Writeable for ListWrapper @@ -161,6 +182,7 @@ where } } +/// Index supporting multiple duplicate entries. pub struct MultiIndex { phantom: PhantomData<*const T>, prefix: u8, @@ -194,7 +216,7 @@ where match self.get_list(batch, commit)? { None => Ok(None), Some(ListWrapper::Single { pos }) => Ok(Some(pos)), - Some(ListWrapper::Multi { head, tail }) => { + Some(ListWrapper::Multi { head, .. }) => { if let Some(ListEntry::Head { pos, .. }) = self.get_entry(batch, commit, head)? { Ok(Some(pos)) } else { @@ -221,7 +243,7 @@ where }) = self.get_entry(batch, commit, head)? { match self.get_entry(batch, commit, current_next)? { - Some(ListEntry::Middle { pos, next, prev }) => { + Some(ListEntry::Middle { pos, next, .. }) => { let head = ListEntry::Head { pos, next }; let list: ListWrapper = ListWrapper::Multi { head: pos.pos(), @@ -234,7 +256,7 @@ where batch.db.put_ser(&self.list_key(commit), &list)?; Ok(Some(current_pos)) } - Some(ListEntry::Tail { pos, prev }) => { + Some(ListEntry::Tail { pos, .. }) => { let list = ListWrapper::Single { pos }; batch.delete(&self.entry_key(commit, current_pos.pos()))?; batch.db.put_ser(&self.list_key(commit), &list)?; diff --git a/chain/src/store.rs b/chain/src/store.rs index 5357de47d5..503bff32f6 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -19,7 +19,7 @@ use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::{Block, BlockHeader, BlockSums}; use crate::core::pow::Difficulty; use crate::core::ser::ProtocolVersion; -use crate::linked_list::{ListIndex, MultiIndex}; +use crate::linked_list::MultiIndex; use crate::types::{CommitPos, OutputPos, Tip}; use crate::util::secp::pedersen::Commitment; use croaring::Bitmap; diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index efe10aaedf..2b46e3f790 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -42,7 +42,7 @@ fn test_store_kernel_index() { assert_eq!(index.get_list(&batch, commit), Ok(None)); assert_eq!( - index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 },), + index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }), Ok(()), ); @@ -59,7 +59,7 @@ fn test_store_kernel_index() { ); assert_eq!( - index.push_pos(&batch, commit, CommitPos { pos: 2, height: 2 },), + index.push_pos(&batch, commit, CommitPos { pos: 2, height: 2 }), Ok(()), ); @@ -74,7 +74,37 @@ fn test_store_kernel_index() { ); assert_eq!( - index.push_pos(&batch, commit, CommitPos { pos: 3, height: 3 },), + index.push_pos(&batch, commit, CommitPos { pos: 3, height: 3 }), + Ok(()), + ); + + assert_eq!( + index.peek_pos(&batch, commit), + Ok(Some(CommitPos { pos: 3, height: 3 })), + ); + + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Multi { head: 3, tail: 1 })), + ); + + assert_eq!( + index.pop_pos(&batch, commit), + Ok(Some(CommitPos { pos: 3, height: 3 })), + ); + + assert_eq!( + index.peek_pos(&batch, commit), + Ok(Some(CommitPos { pos: 2, height: 2 })), + ); + + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Multi { head: 2, tail: 1 })), + ); + + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 3, height: 3 }), Ok(()), ); @@ -125,9 +155,8 @@ fn test_store_kernel_index() { Ok(Some(CommitPos { pos: 1, height: 1 })), ); - assert_eq!(index.peek_pos(&batch, commit), Ok(None),); - - assert_eq!(index.get_list(&batch, commit), Ok(None),); + assert_eq!(index.peek_pos(&batch, commit), Ok(None)); + assert_eq!(index.get_list(&batch, commit), Ok(None)); // Cleanup chain directory clean_output_dir(chain_dir); From 335727365c0a64539b9c11ed105dfd700ad44447 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 18 Apr 2020 19:33:30 +0100 Subject: [PATCH 17/48] commit pos cleanup --- api/src/handlers/utils.rs | 4 +- chain/src/chain.rs | 4 +- chain/src/store.rs | 9 +++-- chain/src/txhashset/txhashset.rs | 53 +++++++++++++++------------ chain/src/types.rs | 34 +---------------- chain/tests/store_kernel_pos_index.rs | 2 +- 6 files changed, 42 insertions(+), 64 deletions(-) diff --git a/api/src/handlers/utils.rs b/api/src/handlers/utils.rs index c033fb5bba..738cf1fce6 100644 --- a/api/src/handlers/utils.rs +++ b/api/src/handlers/utils.rs @@ -13,7 +13,7 @@ // limitations under the License. use crate::chain; -use crate::chain::types::OutputPos; +use crate::chain::types::CommitPos; use crate::core::core::{OutputFeatures, OutputIdentifier}; use crate::rest::*; use crate::types::*; @@ -33,7 +33,7 @@ pub fn w(weak: &Weak) -> Result, Error> { fn get_unspent( chain: &Arc, id: &str, -) -> Result, Error> { +) -> Result, Error> { let c = util::from_hex(id) .map_err(|_| ErrorKind::Argument(format!("Not a valid commitment: {}", id)))?; let commit = Commitment::from_vec(c); diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 443db7864c..087c0b342b 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -30,7 +30,7 @@ use crate::store; use crate::txhashset; use crate::txhashset::{PMMRHandle, TxHashSet}; use crate::types::{ - BlockStatus, ChainAdapter, NoStatus, Options, OutputPos, Tip, TxHashsetWriteStatus, + BlockStatus, ChainAdapter, CommitPos, NoStatus, Options, Tip, TxHashsetWriteStatus, }; use crate::util::secp::pedersen::{Commitment, RangeProof}; use crate::util::RwLock; @@ -500,7 +500,7 @@ impl Chain { /// spent. This querying is done in a way that is consistent with the /// current chain state, specifically the current winning (valid, most /// work) fork. - pub fn get_unspent(&self, output_ref: &OutputIdentifier) -> Result, Error> { + pub fn get_unspent(&self, output_ref: &OutputIdentifier) -> Result, Error> { self.txhashset.read().get_unspent(output_ref) } diff --git a/chain/src/store.rs b/chain/src/store.rs index 503bff32f6..a745702d2a 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -20,7 +20,7 @@ use crate::core::core::{Block, BlockHeader, BlockSums}; use crate::core::pow::Difficulty; use crate::core::ser::ProtocolVersion; use crate::linked_list::MultiIndex; -use crate::types::{CommitPos, OutputPos, Tip}; +use crate::types::{CommitPos, Tip}; use crate::util::secp::pedersen::Commitment; use croaring::Bitmap; use grin_store as store; @@ -147,6 +147,7 @@ impl ChainStore { /// An atomic batch in which all changes can be committed all at once or /// discarded on error. pub struct Batch<'a> { + /// The underlying db instance. pub db: store::Batch<'a>, } @@ -209,7 +210,7 @@ impl<'a> Batch<'a> { /// We maintain a "spent" index for each full block to allow the output_pos /// to be easily reverted during rewind. - pub fn save_spent_index(&self, h: &Hash, spent: &Vec) -> Result<(), Error> { + pub fn save_spent_index(&self, h: &Hash, spent: &Vec) -> Result<(), Error> { self.db.put_ser(&to_key(BLOCK_SPENT_PREFIX, h)[..], spent)?; Ok(()) } @@ -360,7 +361,7 @@ impl<'a> Batch<'a> { /// Get the "spent index" from the db for the specified block. /// If we need to rewind a block then we use this to "unspend" the spent outputs. - pub fn get_spent_index(&self, bh: &Hash) -> Result, Error> { + pub fn get_spent_index(&self, bh: &Hash) -> Result, Error> { option_to_not_found(self.db.get_ser(&to_key(BLOCK_SPENT_PREFIX, bh)), || { format!("spent index: {}", bh) }) @@ -479,6 +480,8 @@ impl<'a> Iterator for DifficultyIter<'a> { } } +/// Init the coinbase kernel index backed by the underlying db. +/// This index supports multiple entries per key and cannot be used via db directly. pub fn coinbase_kernel_index() -> MultiIndex { MultiIndex::init(COINBASE_KERNEL_POS_PREFIX) } diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 99074af66a..dec544460a 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -19,13 +19,16 @@ use crate::core::core::committed::Committed; use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::merkle_proof::MerkleProof; use crate::core::core::pmmr::{self, Backend, ReadonlyPMMR, RewindablePMMR, PMMR}; -use crate::core::core::{Block, BlockHeader, Input, Output, OutputIdentifier, TxKernel}; +use crate::core::core::{ + Block, BlockHeader, Input, KernelFeatures, Output, OutputIdentifier, TxKernel, +}; use crate::core::ser::{PMMRable, ProtocolVersion}; use crate::error::{Error, ErrorKind}; -use crate::store::{Batch, ChainStore}; +use crate::linked_list::ListIndex; +use crate::store::{self, Batch, ChainStore}; use crate::txhashset::bitmap_accumulator::BitmapAccumulator; use crate::txhashset::{RewindableKernelView, UTXOView}; -use crate::types::{OutputPos, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus}; +use crate::types::{CommitPos, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus}; use crate::util::secp::pedersen::{Commitment, RangeProof}; use crate::util::{file, secp_static, zip}; use croaring::Bitmap; @@ -217,7 +220,7 @@ impl TxHashSet { /// Check if an output is unspent. /// We look in the index to find the output MMR pos. /// Then we check the entry in the output MMR and confirm the hash matches. - pub fn get_unspent(&self, output_id: &OutputIdentifier) -> Result, Error> { + pub fn get_unspent(&self, output_id: &OutputIdentifier) -> Result, Error> { let commit = output_id.commit; match self.commit_index.get_output_pos_height(&commit) { Ok(Some((pos, height))) => { @@ -225,11 +228,7 @@ impl TxHashSet { ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); if let Some(out) = output_pmmr.get_data(pos) { if out == *output_id { - Ok(Some(OutputPos { - pos, - height, - features: output_id.features, - })) + Ok(Some(CommitPos { pos, height })) } else { Ok(None) } @@ -457,7 +456,7 @@ impl TxHashSet { } } debug!( - "init_height_pos_index: added entries for {} utxos, took {}s", + "init_output_pos_index: added entries for {} utxos, took {}s", total_outputs, now.elapsed().as_secs(), ); @@ -937,15 +936,26 @@ impl<'a> Extension<'a> { // Remove the spent output from the output_pos index. let mut spent = vec![]; for input in b.inputs() { - let spent_pos = self.apply_input(input, batch)?; - affected_pos.push(spent_pos.pos); + let pos = self.apply_input(input, batch)?; + affected_pos.push(pos.pos); batch.delete_output_pos_height(&input.commitment())?; - spent.push(spent_pos); + spent.push(pos); } batch.save_spent_index(&b.hash(), &spent)?; + let coinbase_kernel_index = store::coinbase_kernel_index(); for kernel in b.kernels() { - self.apply_kernel(kernel)?; + let pos = self.apply_kernel(kernel)?; + if let KernelFeatures::Coinbase = kernel.features { + coinbase_kernel_index.push_pos( + batch, + kernel.excess(), + CommitPos { + pos, + height: b.header.height, + }, + )?; + } } // Update our BitmapAccumulator based on affected outputs (both spent and created). @@ -973,7 +983,7 @@ impl<'a> Extension<'a> { ) } - fn apply_input(&mut self, input: &Input, batch: &Batch<'_>) -> Result { + fn apply_input(&mut self, input: &Input, batch: &Batch<'_>) -> Result { let commit = input.commitment(); if let Some((pos, height)) = batch.get_output_pos_height(&commit)? { // First check this input corresponds to an existing entry in the output MMR. @@ -991,11 +1001,7 @@ impl<'a> Extension<'a> { self.rproof_pmmr .prune(pos) .map_err(ErrorKind::TxHashSetErr)?; - Ok(OutputPos { - pos, - height, - features: input.features, - }) + Ok(CommitPos { pos, height }) } Ok(false) => Err(ErrorKind::AlreadySpent(commit).into()), Err(e) => Err(ErrorKind::TxHashSetErr(e).into()), @@ -1046,11 +1052,12 @@ impl<'a> Extension<'a> { } /// Push kernel onto MMR (hash and data files). - fn apply_kernel(&mut self, kernel: &TxKernel) -> Result<(), Error> { - self.kernel_pmmr + fn apply_kernel(&mut self, kernel: &TxKernel) -> Result { + let pos = self + .kernel_pmmr .push(kernel) .map_err(&ErrorKind::TxHashSetErr)?; - Ok(()) + Ok(pos) } /// Build a Merkle proof for the given output and the block diff --git a/chain/src/types.rs b/chain/src/types.rs index f5e157520d..47375bf67f 100644 --- a/chain/src/types.rs +++ b/chain/src/types.rs @@ -17,7 +17,7 @@ use chrono::prelude::{DateTime, Utc}; use crate::core::core::hash::{Hash, Hashed, ZERO_HASH}; -use crate::core::core::{Block, BlockHeader, HeaderVersion, OutputFeatures}; +use crate::core::core::{Block, BlockHeader, HeaderVersion}; use crate::core::pow::Difficulty; use crate::core::ser::{self, PMMRIndexHashable, Readable, Reader, Writeable, Writer}; use crate::error::{Error, ErrorKind}; @@ -327,38 +327,6 @@ impl Writeable for CommitPos { } } -#[derive(Clone, Copy, Debug, PartialEq)] -pub struct OutputPos { - /// MMR position - pub pos: u64, - /// Block height - pub height: u64, - /// Features - pub features: OutputFeatures, -} - -impl Readable for OutputPos { - fn read(reader: &mut dyn Reader) -> Result { - let pos = reader.read_u64()?; - let height = reader.read_u64()?; - let features = OutputFeatures::read(reader)?; - Ok(OutputPos { - pos, - height, - features, - }) - } -} - -impl Writeable for OutputPos { - fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - writer.write_u64(self.pos)?; - writer.write_u64(self.height)?; - self.features.write(writer)?; - Ok(()) - } -} - /// The tip of a fork. A handle to the fork ancestry from its leaf in the /// blockchain tree. References the max height and the latest and previous /// blocks diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index 2b46e3f790..c417daea9a 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -18,7 +18,7 @@ use grin_util as util; use crate::chain::linked_list::{self, ListIndex, ListWrapper}; use crate::chain::store::{self, ChainStore}; -use crate::chain::types::{CommitPos, OutputPos}; +use crate::chain::types::{CommitPos, CommitPos}; use crate::core::core::OutputFeatures; use crate::util::secp::pedersen::Commitment; mod chain_test_helper; From f3e589ad4cb86bcd2593b622e30c1bc3d48ac9b3 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Mon, 20 Apr 2020 11:49:47 +0100 Subject: [PATCH 18/48] split list and entry out into separate db prefixes --- chain/src/linked_list.rs | 12 +++++++----- chain/src/store.rs | 6 +++--- chain/tests/store_kernel_pos_index.rs | 2 +- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index a259903fbb..843453a109 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -185,14 +185,16 @@ where /// Index supporting multiple duplicate entries. pub struct MultiIndex { phantom: PhantomData<*const T>, - prefix: u8, + list_prefix: u8, + entry_prefix: u8, } impl MultiIndex { - pub fn init(prefix: u8) -> MultiIndex { + pub fn init(list_prefix: u8, entry_prefix: u8) -> MultiIndex { MultiIndex { phantom: PhantomData, - prefix, + list_prefix, + entry_prefix, } } } @@ -205,11 +207,11 @@ where type Entry = ListEntry; fn list_key(&self, commit: Commitment) -> Vec { - to_key(self.prefix, &mut commit.as_ref().to_vec()) + to_key(self.list_prefix, &mut commit.as_ref().to_vec()) } fn entry_key(&self, commit: Commitment, pos: u64) -> Vec { - to_key_u64(self.prefix, &mut commit.as_ref().to_vec(), pos) + to_key_u64(self.entry_prefix, &mut commit.as_ref().to_vec(), pos) } fn peek_pos(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { diff --git a/chain/src/store.rs b/chain/src/store.rs index a745702d2a..845d110dc6 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -37,8 +37,8 @@ const TAIL_PREFIX: u8 = b'T'; const HEADER_HEAD_PREFIX: u8 = b'G'; const OUTPUT_POS_PREFIX: u8 = b'p'; -// Proof of concept until we support NRD kernels. -pub const COINBASE_KERNEL_POS_PREFIX: u8 = b'K'; +pub const COINBASE_KERNEL_LIST_PREFIX: u8 = b'K'; +pub const COINBASE_KERNEL_ENTRY_PREFIX: u8 = b'k'; const BLOCK_INPUT_BITMAP_PREFIX: u8 = b'B'; const BLOCK_SUMS_PREFIX: u8 = b'M'; @@ -483,5 +483,5 @@ impl<'a> Iterator for DifficultyIter<'a> { /// Init the coinbase kernel index backed by the underlying db. /// This index supports multiple entries per key and cannot be used via db directly. pub fn coinbase_kernel_index() -> MultiIndex { - MultiIndex::init(COINBASE_KERNEL_POS_PREFIX) + MultiIndex::init(COINBASE_KERNEL_LIST_PREFIX, COINBASE_KERNEL_ENTRY_PREFIX) } diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index c417daea9a..ee76892e01 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -18,7 +18,7 @@ use grin_util as util; use crate::chain::linked_list::{self, ListIndex, ListWrapper}; use crate::chain::store::{self, ChainStore}; -use crate::chain::types::{CommitPos, CommitPos}; +use crate::chain::types::CommitPos; use crate::core::core::OutputFeatures; use crate::util::secp::pedersen::Commitment; mod chain_test_helper; From 6e6e49893ea890a8097bf8ef39cce41aec7bfccd Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Thu, 14 May 2020 12:12:03 +0100 Subject: [PATCH 19/48] cleanup and add placeholder for pop_back --- chain/src/linked_list.rs | 123 ++++++++++++++++++++++----------------- chain/src/store.rs | 4 +- 2 files changed, 73 insertions(+), 54 deletions(-) diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index 843453a109..eb83ff68cd 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Implements storage primitives required by the chain +//! Implements "linked list" storage primitive for lmdb index supporting multiple entries. use crate::core::ser::{self, Readable, Reader, Writeable, Writer}; use crate::store::Batch; @@ -38,7 +38,7 @@ impl Writeable for ListWrapperVariant { } impl Readable for ListWrapperVariant { - fn read(reader: &mut dyn Reader) -> Result { + fn read(reader: &mut R) -> Result { ListWrapperVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData) } } @@ -46,6 +46,7 @@ impl Readable for ListWrapperVariant { enum_from_primitive! { #[derive(Copy, Clone, Debug, PartialEq)] enum ListEntryVariant { + // Start at 2 here to differentiate from ListWrapperVariant above. Head = 2, Tail = 3, Middle = 4, @@ -59,7 +60,7 @@ impl Writeable for ListEntryVariant { } impl Readable for ListEntryVariant { - fn read(reader: &mut dyn Reader) -> Result { + fn read(reader: &mut R) -> Result { ListEntryVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData) } } @@ -97,6 +98,13 @@ pub trait ListIndex { batch.db.get_ser(&self.entry_key(commit, pos)) } + /// Peek the head of the list for the specified commitment. + fn peek_pos( + &self, + batch: &Batch<'_>, + commit: Commitment, + ) -> Result::Pos>, Error>; + /// Push a pos onto the list for the specified commitment. fn push_pos( &self, @@ -112,8 +120,9 @@ pub trait ListIndex { commit: Commitment, ) -> Result::Pos>, Error>; - /// Peek the head of the list for the specified commitment. - fn peek_pos( + /// Pop a pos off the end of the list for the specified commitment. + /// This is used when pruning old data. + fn pop_back( &self, batch: &Batch<'_>, commit: Commitment, @@ -168,7 +177,7 @@ where T: Readable, { /// Read the first byte to determine what needs to be read beyond that. - fn read(reader: &mut dyn Reader) -> Result, ser::Error> { + fn read(reader: &mut R) -> Result, ser::Error> { let entry = match ListWrapperVariant::read(reader)? { ListWrapperVariant::Single => ListWrapper::Single { pos: T::read(reader)?, @@ -228,52 +237,6 @@ where } } - /// Pop the head of the list. - /// Returns the output_pos. - /// Returns None if list was empty. - fn pop_pos(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { - match self.get_list(batch, commit)? { - None => Ok(None), - Some(ListWrapper::Single { pos }) => { - batch.delete(&self.list_key(commit))?; - Ok(Some(pos)) - } - Some(ListWrapper::Multi { head, tail }) => { - if let Some(ListEntry::Head { - pos: current_pos, - next: current_next, - }) = self.get_entry(batch, commit, head)? - { - match self.get_entry(batch, commit, current_next)? { - Some(ListEntry::Middle { pos, next, .. }) => { - let head = ListEntry::Head { pos, next }; - let list: ListWrapper = ListWrapper::Multi { - head: pos.pos(), - tail, - }; - batch.delete(&self.entry_key(commit, current_pos.pos()))?; - batch - .db - .put_ser(&self.entry_key(commit, pos.pos()), &head)?; - batch.db.put_ser(&self.list_key(commit), &list)?; - Ok(Some(current_pos)) - } - Some(ListEntry::Tail { pos, .. }) => { - let list = ListWrapper::Single { pos }; - batch.delete(&self.entry_key(commit, current_pos.pos()))?; - batch.db.put_ser(&self.list_key(commit), &list)?; - Ok(Some(current_pos)) - } - Some(_) => Err(Error::OtherErr("next was unexpected".into())), - None => Err(Error::OtherErr("next missing".into())), - } - } else { - Err(Error::OtherErr("expected head to be head variant".into())) - } - } - } - } - fn push_pos(&self, batch: &Batch<'_>, commit: Commitment, new_pos: T) -> Result<(), Error> { match self.get_list(batch, commit)? { None => { @@ -334,6 +297,60 @@ where } Ok(()) } + + /// Pop the head of the list. + /// Returns the output_pos. + /// Returns None if list was empty. + fn pop_pos(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { + match self.get_list(batch, commit)? { + None => Ok(None), + Some(ListWrapper::Single { pos }) => { + batch.delete(&self.list_key(commit))?; + Ok(Some(pos)) + } + Some(ListWrapper::Multi { head, tail }) => { + if let Some(ListEntry::Head { + pos: current_pos, + next: current_next, + }) = self.get_entry(batch, commit, head)? + { + match self.get_entry(batch, commit, current_next)? { + Some(ListEntry::Middle { pos, next, .. }) => { + let head = ListEntry::Head { pos, next }; + let list: ListWrapper = ListWrapper::Multi { + head: pos.pos(), + tail, + }; + batch.delete(&self.entry_key(commit, current_pos.pos()))?; + batch + .db + .put_ser(&self.entry_key(commit, pos.pos()), &head)?; + batch.db.put_ser(&self.list_key(commit), &list)?; + Ok(Some(current_pos)) + } + Some(ListEntry::Tail { pos, .. }) => { + let list = ListWrapper::Single { pos }; + batch.delete(&self.entry_key(commit, current_pos.pos()))?; + batch.db.put_ser(&self.list_key(commit), &list)?; + Ok(Some(current_pos)) + } + Some(_) => Err(Error::OtherErr("next was unexpected".into())), + None => Err(Error::OtherErr("next missing".into())), + } + } else { + Err(Error::OtherErr("expected head to be head variant".into())) + } + } + } + } + + fn pop_back( + &self, + batch: &Batch<'_>, + commit: Commitment, + ) -> Result::Pos>, Error> { + panic!("not yet implemented!") + } } pub trait PosEntry: Readable + Writeable + Copy { @@ -407,7 +424,7 @@ where T: Readable, { /// Read the first byte to determine what needs to be read beyond that. - fn read(reader: &mut dyn Reader) -> Result, ser::Error> { + fn read(reader: &mut R) -> Result, ser::Error> { let entry = match ListEntryVariant::read(reader)? { ListEntryVariant::Head => ListEntry::Head { pos: T::read(reader)?, diff --git a/chain/src/store.rs b/chain/src/store.rs index 845d110dc6..79adac268b 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -37,7 +37,9 @@ const TAIL_PREFIX: u8 = b'T'; const HEADER_HEAD_PREFIX: u8 = b'G'; const OUTPUT_POS_PREFIX: u8 = b'p'; +/// Prefix for kernel pos index lists. pub const COINBASE_KERNEL_LIST_PREFIX: u8 = b'K'; +/// Prefix for kernel pos index entries. pub const COINBASE_KERNEL_ENTRY_PREFIX: u8 = b'k'; const BLOCK_INPUT_BITMAP_PREFIX: u8 = b'B'; @@ -481,7 +483,7 @@ impl<'a> Iterator for DifficultyIter<'a> { } /// Init the coinbase kernel index backed by the underlying db. -/// This index supports multiple entries per key and cannot be used via db directly. +/// This index supports multiple entries per key. pub fn coinbase_kernel_index() -> MultiIndex { MultiIndex::init(COINBASE_KERNEL_LIST_PREFIX, COINBASE_KERNEL_ENTRY_PREFIX) } From 888c49bf63e0f1b0266b1d07cb9e5988491f48d3 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Thu, 14 May 2020 21:42:05 +0100 Subject: [PATCH 20/48] pop_pos_back (for popping off the back of the linked list) test coverage for pop_pos_back --- chain/src/linked_list.rs | 45 ++++++++++- chain/tests/store_kernel_pos_index.rs | 109 +++++++++++++++++++++++++- 2 files changed, 150 insertions(+), 4 deletions(-) diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index eb83ff68cd..7f2615efee 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -122,7 +122,7 @@ pub trait ListIndex { /// Pop a pos off the end of the list for the specified commitment. /// This is used when pruning old data. - fn pop_back( + fn pop_pos_back( &self, batch: &Batch<'_>, commit: Commitment, @@ -344,12 +344,51 @@ where } } - fn pop_back( + fn pop_pos_back( &self, batch: &Batch<'_>, commit: Commitment, ) -> Result::Pos>, Error> { - panic!("not yet implemented!") + match self.get_list(batch, commit)? { + None => Ok(None), + Some(ListWrapper::Single { pos }) => { + batch.delete(&self.list_key(commit))?; + Ok(Some(pos)) + } + Some(ListWrapper::Multi { head, tail }) => { + if let Some(ListEntry::Tail { + pos: current_pos, + prev: current_prev, + }) = self.get_entry(batch, commit, tail)? + { + match self.get_entry(batch, commit, current_prev)? { + Some(ListEntry::Middle { pos, prev, .. }) => { + let tail = ListEntry::Tail { pos, prev }; + let list: ListWrapper = ListWrapper::Multi { + head, + tail: pos.pos(), + }; + batch.delete(&self.entry_key(commit, current_pos.pos()))?; + batch + .db + .put_ser(&self.entry_key(commit, pos.pos()), &tail)?; + batch.db.put_ser(&self.list_key(commit), &list)?; + Ok(Some(current_pos)) + } + Some(ListEntry::Head { pos, .. }) => { + let list = ListWrapper::Single { pos }; + batch.delete(&self.entry_key(commit, current_pos.pos()))?; + batch.db.put_ser(&self.list_key(commit), &list)?; + Ok(Some(current_pos)) + } + Some(_) => Err(Error::OtherErr("prev was unexpected".into())), + None => Err(Error::OtherErr("prev missing".into())), + } + } else { + Err(Error::OtherErr("expected tail to be tail variant".into())) + } + } + } } } diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index ee76892e01..3d40dc8564 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -26,7 +26,7 @@ mod chain_test_helper; use self::chain_test_helper::clean_output_dir; #[test] -fn test_store_kernel_index() { +fn test_store_kernel_idx() { util::init_test_logger(); let chain_dir = ".grin_idx_1"; @@ -161,3 +161,110 @@ fn test_store_kernel_index() { // Cleanup chain directory clean_output_dir(chain_dir); } + +#[test] +fn test_store_kernel_idx_pruning() { + util::init_test_logger(); + + let chain_dir = ".grin_idx_2"; + clean_output_dir(chain_dir); + + let commit = Commitment::from_vec(vec![]); + + let store = ChainStore::new(chain_dir).unwrap(); + let batch = store.batch().unwrap(); + let index = store::coinbase_kernel_index(); + + assert_eq!(index.peek_pos(&batch, commit), Ok(None)); + assert_eq!(index.get_list(&batch, commit), Ok(None)); + + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }), + Ok(()), + ); + + assert_eq!( + index.peek_pos(&batch, commit), + Ok(Some(CommitPos { pos: 1, height: 1 })), + ); + + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Single { + pos: CommitPos { pos: 1, height: 1 } + })), + ); + + assert_eq!( + index.pop_pos_back(&batch, commit), + Ok(Some(CommitPos { pos: 1, height: 1 })), + ); + + assert_eq!(index.peek_pos(&batch, commit), Ok(None)); + assert_eq!(index.get_list(&batch, commit), Ok(None)); + + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }), + Ok(()), + ); + + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 2, height: 2 }), + Ok(()), + ); + + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 3, height: 3 }), + Ok(()), + ); + + assert_eq!( + index.peek_pos(&batch, commit), + Ok(Some(CommitPos { pos: 3, height: 3 })), + ); + + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Multi { head: 3, tail: 1 })), + ); + + assert_eq!( + index.pop_pos_back(&batch, commit), + Ok(Some(CommitPos { pos: 1, height: 1 })), + ); + + assert_eq!( + index.peek_pos(&batch, commit), + Ok(Some(CommitPos { pos: 3, height: 3 })), + ); + + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Multi { head: 3, tail: 2 })), + ); + + assert_eq!( + index.pop_pos_back(&batch, commit), + Ok(Some(CommitPos { pos: 2, height: 2 })), + ); + + assert_eq!( + index.peek_pos(&batch, commit), + Ok(Some(CommitPos { pos: 3, height: 3 })), + ); + + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Single { + pos: CommitPos { pos: 3, height: 3 } + })), + ); + + assert_eq!( + index.pop_pos_back(&batch, commit), + Ok(Some(CommitPos { pos: 3, height: 3 })), + ); + + assert_eq!(index.peek_pos(&batch, commit), Ok(None)); + assert_eq!(index.get_list(&batch, commit), Ok(None)); +} From 48d0debf2cadcadd5e8f1e44d168fa5b9f13ed7c Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Fri, 15 May 2020 13:35:13 +0100 Subject: [PATCH 21/48] wip --- chain/src/linked_list.rs | 12 ++++++++++-- chain/tests/store_kernel_pos_index.rs | 2 +- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index 7f2615efee..5bec600611 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -120,13 +120,18 @@ pub trait ListIndex { commit: Commitment, ) -> Result::Pos>, Error>; - /// Pop a pos off the end of the list for the specified commitment. - /// This is used when pruning old data. + /// Pop a pos off the back of the list (used for pruning old data). fn pop_pos_back( &self, batch: &Batch<'_>, commit: Commitment, ) -> Result::Pos>, Error>; + + /// + /// TODO - pass a cutoff in here and conditionally prune everything prior to this. + /// Loop internally to prune everything or just one at a time? + /// + fn prune(&self, batch: &Batch<'_>, commit: Commitment, cutoff_pos: u64) -> Result<(), Error>; } /// Wrapper for the list to handle either `Single` or `Multi` entries. @@ -344,6 +349,8 @@ where } } + /// Pop off the back/tail of the linked list. + /// Used when pruning old data. fn pop_pos_back( &self, batch: &Batch<'_>, @@ -392,6 +399,7 @@ where } } +/// Something that tracks pos (in an MMR). pub trait PosEntry: Readable + Writeable + Copy { fn pos(&self) -> u64; } diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index 3d40dc8564..084d862855 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -163,7 +163,7 @@ fn test_store_kernel_idx() { } #[test] -fn test_store_kernel_idx_pruning() { +fn test_store_kernel_idx_pop_back() { util::init_test_logger(); let chain_dir = ".grin_idx_2"; From 3f16218eac521e4cdb57c8fa91e01b06af9ceadd Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 16 May 2020 14:47:37 +0100 Subject: [PATCH 22/48] placeholder for prune via a trait pos must always increase in the index --- chain/src/linked_list.rs | 19 +++++++++++++++++++ chain/tests/store_kernel_pos_index.rs | 21 ++++++++++++++++----- 2 files changed, 35 insertions(+), 5 deletions(-) diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index 5bec600611..5e3f9176e3 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -126,7 +126,9 @@ pub trait ListIndex { batch: &Batch<'_>, commit: Commitment, ) -> Result::Pos>, Error>; +} +pub trait PruneableListIndex { /// /// TODO - pass a cutoff in here and conditionally prune everything prior to this. /// Loop internally to prune everything or just one at a time? @@ -249,6 +251,10 @@ where batch.db.put_ser(&self.list_key(commit), &list)?; } Some(ListWrapper::Single { pos: current_pos }) => { + if new_pos.pos() <= current_pos.pos() { + return Err(Error::OtherErr("pos must be increasing".into())); + } + let head = ListEntry::Head { pos: new_pos, next: current_pos.pos(), @@ -270,6 +276,10 @@ where batch.db.put_ser(&self.list_key(commit), &list)?; } Some(ListWrapper::Multi { head, tail }) => { + if new_pos.pos() <= head { + return Err(Error::OtherErr("pos must be increasing".into())); + } + if let Some(ListEntry::Head { pos: current_pos, next: current_next, @@ -399,6 +409,15 @@ where } } +impl PruneableListIndex for MultiIndex +where + T: PosEntry, +{ + fn prune(&self, batch: &Batch<'_>, commit: Commitment, cutoff_pos: u64) -> Result<(), Error> { + panic!("wat"); + } +} + /// Something that tracks pos (in an MMR). pub trait PosEntry: Readable + Writeable + Copy { fn pos(&self) -> u64; diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index 084d862855..0ef5283f6c 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -12,18 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -use grin_chain as chain; -use grin_core as core; -use grin_util as util; - use crate::chain::linked_list::{self, ListIndex, ListWrapper}; use crate::chain::store::{self, ChainStore}; use crate::chain::types::CommitPos; use crate::core::core::OutputFeatures; use crate::util::secp::pedersen::Commitment; +use grin_chain as chain; +use grin_core as core; +use grin_store; +use grin_util as util; mod chain_test_helper; - use self::chain_test_helper::clean_output_dir; +use crate::grin_store::Error; #[test] fn test_store_kernel_idx() { @@ -73,6 +73,17 @@ fn test_store_kernel_idx() { Ok(Some(ListWrapper::Multi { head: 2, tail: 1 })), ); + // Pos must always increase. + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }), + Err(Error::OtherErr("pos must be increasing".into())), + ); + + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 2, height: 2 }), + Err(Error::OtherErr("pos must be increasing".into())), + ); + assert_eq!( index.push_pos(&batch, commit, CommitPos { pos: 3, height: 3 }), Ok(()), From c311be8ddf11864bb49b5acbdd29c846b12f0a16 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 16 May 2020 16:09:11 +0100 Subject: [PATCH 23/48] rewind kernel_pos_idx when calling rewind_single_block --- chain/src/linked_list.rs | 34 +++++++++++++++++++++++++------- chain/src/txhashset/txhashset.rs | 28 ++++++++++++++++++-------- chain/tests/mine_simple_chain.rs | 10 ++++------ 3 files changed, 51 insertions(+), 21 deletions(-) diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index 5e3f9176e3..b55d1c1130 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -128,11 +128,10 @@ pub trait ListIndex { ) -> Result::Pos>, Error>; } +/// A pruneable list index supports pruning of old data from the index lists. +/// This allows us to efficiently maintain an index of "recent" kernel data. +/// We can maintain a window of 2 weeks of recent data, discarding anything older than this. pub trait PruneableListIndex { - /// - /// TODO - pass a cutoff in here and conditionally prune everything prior to this. - /// Loop internally to prune everything or just one at a time? - /// fn prune(&self, batch: &Batch<'_>, commit: Commitment, cutoff_pos: u64) -> Result<(), Error>; } @@ -451,10 +450,31 @@ where } } +/// Head|Middle|Tail variants for the linked list entries. pub enum ListEntry { - Head { pos: T, next: u64 }, - Tail { pos: T, prev: u64 }, - Middle { pos: T, next: u64, prev: u64 }, + /// Head of ther list. + Head { + /// The thing in the list. + pos: T, + /// The next entry in the list. + next: u64, + }, + /// Tail of the list. + Tail { + /// The thing in the list. + pos: T, + /// The previous entry in the list. + prev: u64, + }, + /// An entry in the middle of the list. + Middle { + /// The thing in the list. + pos: T, + /// The next entry in the list. + next: u64, + /// The previous entry in the list. + prev: u64, + }, } impl Writeable for ListEntry diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index dec544460a..696de9eafa 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -1124,7 +1124,8 @@ impl<'a> Extension<'a> { let mut affected_pos = vec![]; let mut current = head_header; while header.height < current.height { - let mut affected_pos_single_block = self.rewind_single_block(¤t, batch)?; + let block = batch.get_block(¤t.hash())?; + let mut affected_pos_single_block = self.rewind_single_block(&block, batch)?; affected_pos.append(&mut affected_pos_single_block); current = batch.get_previous_header(¤t)?; } @@ -1141,11 +1142,9 @@ impl<'a> Extension<'a> { // Rewind the MMRs and the output_pos index. // Returns a vec of "affected_pos" so we can apply the necessary updates to the bitmap // accumulator in a single pass for all rewound blocks. - fn rewind_single_block( - &mut self, - header: &BlockHeader, - batch: &Batch<'_>, - ) -> Result, Error> { + fn rewind_single_block(&mut self, block: &Block, batch: &Batch<'_>) -> Result, Error> { + let header = &block.header; + // The spent index allows us to conveniently "unspend" everything in a block. let spent = batch.get_spent_index(&header.hash()); @@ -1164,7 +1163,7 @@ impl<'a> Extension<'a> { if header.height == 0 { self.rewind_mmrs_to_pos(0, 0, &spent_pos)?; } else { - let prev = batch.get_previous_header(&header)?; + let prev = batch.get_previous_header(header)?; self.rewind_mmrs_to_pos(prev.output_mmr_size, prev.kernel_mmr_size, &spent_pos)?; } @@ -1175,7 +1174,6 @@ impl<'a> Extension<'a> { affected_pos.push(self.output_pmmr.last_pos); // Remove any entries from the output_pos created by the block being rewound. - let block = batch.get_block(&header.hash())?; let mut missing_count = 0; for out in block.outputs() { if batch.delete_output_pos_height(&out.commitment()).is_err() { @@ -1191,6 +1189,20 @@ impl<'a> Extension<'a> { ); } + // Now rewind the kernel_pos index based on kernels in the block being rewound. + // Pop every rewound kernel off the appropriate index list. + let coinbase_kernel_index = store::coinbase_kernel_index(); + for kernel in block.kernels() { + if let KernelFeatures::Coinbase = kernel.features { + let res = coinbase_kernel_index.pop_pos(batch, kernel.excess()); + debug!( + "rewind_single_block: popped kernel_pos_idx: {:?}, {:?}", + res, + kernel.excess() + ); + } + } + // Update output_pos based on "unspending" all spent pos from this block. // This is necessary to ensure the output_pos index correclty reflects a // reused output commitment. For example an output at pos 1, spent, reused at pos 2. diff --git a/chain/tests/mine_simple_chain.rs b/chain/tests/mine_simple_chain.rs index 567f110cd4..42f5da0f85 100644 --- a/chain/tests/mine_simple_chain.rs +++ b/chain/tests/mine_simple_chain.rs @@ -530,13 +530,11 @@ fn longer_fork() { fn spend_rewind_spend() { global::set_mining_mode(ChainTypes::AutomatedTesting); util::init_test_logger(); - clean_output_dir(".grin_spend_rewind_spend"); + let chain_dir = ".grin_spend_rewind_spend"; + clean_output_dir(chain_dir); { - let chain = init_chain( - ".grin_spend_rewind_spend", - pow::mine_genesis_block().unwrap(), - ); + let chain = init_chain(chain_dir, pow::mine_genesis_block().unwrap()); let prev = chain.head_header().unwrap(); let kc = ExtKeychain::from_random_seed(false).unwrap(); let pb = ProofBuilder::new(&kc); @@ -601,7 +599,7 @@ fn spend_rewind_spend() { } } - clean_output_dir(".grin_spend_rewind_spend"); + clean_output_dir(chain_dir); } #[test] From a55fab889e9297b389f38db003e02f2415a49947 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 16 May 2020 19:43:13 +0100 Subject: [PATCH 24/48] RewindableListIndex with rewind support. --- chain/src/linked_list.rs | 24 ++++++++++++++++++++---- chain/src/txhashset/txhashset.rs | 15 +++++++-------- chain/tests/store_kernel_pos_index.rs | 10 ++++++++++ 3 files changed, 37 insertions(+), 12 deletions(-) diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index b55d1c1130..8367c4109d 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -128,6 +128,11 @@ pub trait ListIndex { ) -> Result::Pos>, Error>; } +/// Supports "rewind" given the provided commit and a pos to rewind back to. +pub trait RewindableListIndex { + fn rewind(&self, batch: &Batch<'_>, commit: Commitment, rewind_pos: u64) -> Result<(), Error>; +} + /// A pruneable list index supports pruning of old data from the index lists. /// This allows us to efficiently maintain an index of "recent" kernel data. /// We can maintain a window of 2 weeks of recent data, discarding anything older than this. @@ -408,10 +413,21 @@ where } } -impl PruneableListIndex for MultiIndex -where - T: PosEntry, -{ +/// List index that supports rewind. +impl RewindableListIndex for MultiIndex { + fn rewind(&self, batch: &Batch<'_>, commit: Commitment, rewind_pos: u64) -> Result<(), Error> { + while self + .peek_pos(batch, commit)? + .map(|x| x.pos() > rewind_pos) + .unwrap_or(false) + { + self.pop_pos(batch, commit)?; + } + Ok(()) + } +} + +impl PruneableListIndex for MultiIndex { fn prune(&self, batch: &Batch<'_>, commit: Commitment, cutoff_pos: u64) -> Result<(), Error> { panic!("wat"); } diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 696de9eafa..149739f041 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -24,7 +24,7 @@ use crate::core::core::{ }; use crate::core::ser::{PMMRable, ProtocolVersion}; use crate::error::{Error, ErrorKind}; -use crate::linked_list::ListIndex; +use crate::linked_list::{ListIndex, RewindableListIndex}; use crate::store::{self, Batch, ChainStore}; use crate::txhashset::bitmap_accumulator::BitmapAccumulator; use crate::txhashset::{RewindableKernelView, UTXOView}; @@ -1144,6 +1144,7 @@ impl<'a> Extension<'a> { // accumulator in a single pass for all rewound blocks. fn rewind_single_block(&mut self, block: &Block, batch: &Batch<'_>) -> Result, Error> { let header = &block.header; + let prev_header = batch.get_previous_header(&header)?; // The spent index allows us to conveniently "unspend" everything in a block. let spent = batch.get_spent_index(&header.hash()); @@ -1190,16 +1191,14 @@ impl<'a> Extension<'a> { } // Now rewind the kernel_pos index based on kernels in the block being rewound. - // Pop every rewound kernel off the appropriate index list. let coinbase_kernel_index = store::coinbase_kernel_index(); for kernel in block.kernels() { if let KernelFeatures::Coinbase = kernel.features { - let res = coinbase_kernel_index.pop_pos(batch, kernel.excess()); - debug!( - "rewind_single_block: popped kernel_pos_idx: {:?}, {:?}", - res, - kernel.excess() - ); + coinbase_kernel_index.rewind( + batch, + kernel.excess(), + prev_header.kernel_mmr_size, + )?; } } diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index 0ef5283f6c..2243acfa17 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -279,3 +279,13 @@ fn test_store_kernel_idx_pop_back() { assert_eq!(index.peek_pos(&batch, commit), Ok(None)); assert_eq!(index.get_list(&batch, commit), Ok(None)); } + +#[test] +fn test_store_kernel_idx_rewind() { + util::init_test_logger(); + + let chain_dir = ".grin_idx_3"; + clean_output_dir(chain_dir); + + panic!("to be continued"); +} From c830daaa2c1ca12c0efb2a72b302a6345c0e6941 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 16 May 2020 20:48:23 +0100 Subject: [PATCH 25/48] test coverage for rewindable list index --- chain/tests/store_kernel_pos_index.rs | 60 ++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 2 deletions(-) diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index 2243acfa17..3a84035a66 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::chain::linked_list::{self, ListIndex, ListWrapper}; +use crate::chain::linked_list::{self, ListIndex, ListWrapper, RewindableListIndex}; use crate::chain::store::{self, ChainStore}; use crate::chain::types::CommitPos; use crate::core::core::OutputFeatures; @@ -278,6 +278,8 @@ fn test_store_kernel_idx_pop_back() { assert_eq!(index.peek_pos(&batch, commit), Ok(None)); assert_eq!(index.get_list(&batch, commit), Ok(None)); + + clean_output_dir(chain_dir); } #[test] @@ -287,5 +289,59 @@ fn test_store_kernel_idx_rewind() { let chain_dir = ".grin_idx_3"; clean_output_dir(chain_dir); - panic!("to be continued"); + let commit = Commitment::from_vec(vec![]); + + let store = ChainStore::new(chain_dir).unwrap(); + let batch = store.batch().unwrap(); + let index = store::coinbase_kernel_index(); + + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }), + Ok(()), + ); + + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 2, height: 2 }), + Ok(()), + ); + + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 3, height: 3 }), + Ok(()), + ); + + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Multi { head: 3, tail: 1 })), + ); + + assert_eq!(index.rewind(&batch, commit, 1), Ok(()),); + + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Single { + pos: CommitPos { pos: 1, height: 1 } + })), + ); + + // Check we can safely noop rewind. + assert_eq!(index.rewind(&batch, commit, 2), Ok(()),); + + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Single { + pos: CommitPos { pos: 1, height: 1 } + })), + ); + + assert_eq!(index.rewind(&batch, commit, 1), Ok(()),); + + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Single { + pos: CommitPos { pos: 1, height: 1 } + })), + ); + + clean_output_dir(chain_dir); } From bc26131e6e35f35f3b8ce5dbba92ec1595b348bd Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 16 May 2020 21:51:53 +0100 Subject: [PATCH 26/48] test coverage for rewind back to 0 --- chain/tests/store_kernel_pos_index.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index 3a84035a66..a630cb03f2 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -343,5 +343,12 @@ fn test_store_kernel_idx_rewind() { })), ); + // Check we can rewind back to 0. + assert_eq!(index.rewind(&batch, commit, 0), Ok(()),); + + assert_eq!(index.get_list(&batch, commit), Ok(None),); + + assert_eq!(index.rewind(&batch, commit, 0), Ok(()),); + clean_output_dir(chain_dir); } From 88b6e95f0923d80969a38d93ddc8fd80b14b41a8 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 16 May 2020 22:17:23 +0100 Subject: [PATCH 27/48] rewind past end of list --- chain/tests/store_kernel_pos_index.rs | 31 +++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index a630cb03f2..ad60e35842 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -350,5 +350,36 @@ fn test_store_kernel_idx_rewind() { assert_eq!(index.rewind(&batch, commit, 0), Ok(()),); + // Now check we can rewind past the end of a list safely. + + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }), + Ok(()), + ); + + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 2, height: 2 }), + Ok(()), + ); + + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 3, height: 3 }), + Ok(()), + ); + + assert_eq!( + index.pop_pos_back(&batch, commit), + Ok(Some(CommitPos { pos: 1, height: 1 })), + ); + + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Multi { head: 3, tail: 2 })), + ); + + assert_eq!(index.rewind(&batch, commit, 1), Ok(()),); + + assert_eq!(index.get_list(&batch, commit), Ok(None),); + clean_output_dir(chain_dir); } From ae0a99ce1cdc2566752f0848e1c0774b4ae143c6 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sun, 17 May 2020 11:20:52 +0100 Subject: [PATCH 28/48] add tests for kernel_pos_idx with multiple commits --- chain/tests/store_kernel_pos_index.rs | 89 +++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index ad60e35842..48adec0716 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -383,3 +383,92 @@ fn test_store_kernel_idx_rewind() { clean_output_dir(chain_dir); } + +#[test] +fn test_store_kernel_idx_multiple_commits() { + util::init_test_logger(); + + let chain_dir = ".grin_idx_4"; + clean_output_dir(chain_dir); + + let commit = Commitment::from_vec(vec![]); + let commit2 = Commitment::from_vec(vec![1]); + + let store = ChainStore::new(chain_dir).unwrap(); + let batch = store.batch().unwrap(); + let index = store::coinbase_kernel_index(); + + assert_eq!(index.get_list(&batch, commit), Ok(None)); + assert_eq!(index.get_list(&batch, commit2), Ok(None)); + + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }), + Ok(()), + ); + + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Single { + pos: CommitPos { pos: 1, height: 1 } + })), + ); + + assert_eq!(index.get_list(&batch, commit2), Ok(None)); + + assert_eq!( + index.push_pos(&batch, commit2, CommitPos { pos: 2, height: 2 }), + Ok(()), + ); + + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Single { + pos: CommitPos { pos: 1, height: 1 } + })), + ); + + assert_eq!( + index.get_list(&batch, commit2), + Ok(Some(ListWrapper::Single { + pos: CommitPos { pos: 2, height: 2 } + })), + ); + + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 3, height: 3 }), + Ok(()), + ); + + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Multi { head: 3, tail: 1 })), + ); + + assert_eq!( + index.get_list(&batch, commit2), + Ok(Some(ListWrapper::Single { + pos: CommitPos { pos: 2, height: 2 } + })), + ); + + assert_eq!( + index.pop_pos(&batch, commit), + Ok(Some(CommitPos { pos: 3, height: 3 })), + ); + + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Single { + pos: CommitPos { pos: 1, height: 1 } + })), + ); + + assert_eq!( + index.get_list(&batch, commit2), + Ok(Some(ListWrapper::Single { + pos: CommitPos { pos: 2, height: 2 } + })), + ); + + clean_output_dir(chain_dir); +} From 47e9093878c4991e2213853e959ed92939621b2a Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Fri, 29 May 2020 17:37:05 +0100 Subject: [PATCH 29/48] commit --- chain/src/chain.rs | 4 +++ chain/src/error.rs | 3 +++ chain/src/linked_list.rs | 1 + chain/src/store.rs | 17 +++++++------ chain/src/txhashset/txhashset.rs | 35 +++++++++++++++++++-------- chain/tests/store_kernel_pos_index.rs | 8 +++--- 6 files changed, 46 insertions(+), 22 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 087c0b342b..4cd4872259 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -517,6 +517,10 @@ impl Chain { pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> { let header_pmmr = self.header_pmmr.read(); let txhashset = self.txhashset.read(); + + // TODO - we need to validate the kernel here, cannot do this via utxo_view... + // txhashet::extending_readonly + txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, batch| { utxo.validate_tx(tx, batch)?; Ok(()) diff --git a/chain/src/error.rs b/chain/src/error.rs index f1cec32d33..e4634c45d2 100644 --- a/chain/src/error.rs +++ b/chain/src/error.rs @@ -122,6 +122,9 @@ pub enum ErrorKind { /// Tx not valid based on lock_height. #[fail(display = "Transaction Lock Height")] TxLockHeight, + /// Tx is not valid due to NRD relative_height restriction. + #[fail(display = "NRD Relative Height")] + NRDRelativeHeight, /// No chain exists and genesis block is required #[fail(display = "Genesis Block Required")] GenesisBlockRequired, diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index 8367c4109d..7a19274990 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -130,6 +130,7 @@ pub trait ListIndex { /// Supports "rewind" given the provided commit and a pos to rewind back to. pub trait RewindableListIndex { + /// Rewind the index for the given commitment to the specified position. fn rewind(&self, batch: &Batch<'_>, commit: Commitment, rewind_pos: u64) -> Result<(), Error>; } diff --git a/chain/src/store.rs b/chain/src/store.rs index 79adac268b..aca5e1ffee 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -37,10 +37,10 @@ const TAIL_PREFIX: u8 = b'T'; const HEADER_HEAD_PREFIX: u8 = b'G'; const OUTPUT_POS_PREFIX: u8 = b'p'; -/// Prefix for kernel pos index lists. -pub const COINBASE_KERNEL_LIST_PREFIX: u8 = b'K'; -/// Prefix for kernel pos index entries. -pub const COINBASE_KERNEL_ENTRY_PREFIX: u8 = b'k'; +/// Prefix for NRD kernel pos index lists. +pub const NRD_KERNEL_LIST_PREFIX: u8 = b'K'; +/// Prefix for NRD kernel pos index entries. +pub const NRD_KERNEL_ENTRY_PREFIX: u8 = b'k'; const BLOCK_INPUT_BITMAP_PREFIX: u8 = b'B'; const BLOCK_SUMS_PREFIX: u8 = b'M'; @@ -482,8 +482,9 @@ impl<'a> Iterator for DifficultyIter<'a> { } } -/// Init the coinbase kernel index backed by the underlying db. -/// This index supports multiple entries per key. -pub fn coinbase_kernel_index() -> MultiIndex { - MultiIndex::init(COINBASE_KERNEL_LIST_PREFIX, COINBASE_KERNEL_ENTRY_PREFIX) +/// Init the NRD "recent history" kernel index backed by the underlying db. +/// List index supports multiple entries per key, maintaining insertion order. +/// Allows for fast lookup of the most recent entry per excess commitment. +pub fn nrd_recent_kernel_index() -> MultiIndex { + MultiIndex::init(NRD_KERNEL_LIST_PREFIX, NRD_KERNEL_ENTRY_PREFIX) } diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 149739f041..9fbe2f5520 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -943,18 +943,33 @@ impl<'a> Extension<'a> { } batch.save_spent_index(&b.hash(), &spent)?; - let coinbase_kernel_index = store::coinbase_kernel_index(); + let kernel_index = store::nrd_recent_kernel_index(); for kernel in b.kernels() { let pos = self.apply_kernel(kernel)?; - if let KernelFeatures::Coinbase = kernel.features { - coinbase_kernel_index.push_pos( - batch, + if let KernelFeatures::NoRecentDuplicate { + relative_height, .. + } = kernel.features + { + debug!("checking NRD index: {:?}", kernel.excess()); + if let Some(prev) = kernel_index.peek_pos(batch, kernel.excess())? { + let h2 = b.header.height; + let h1 = prev.height; + let delta = h2.saturating_sub(h1); + debug!("NRD check: {}, {}, {:?}", h2, h1, relative_height); + if delta < relative_height.into() { + return Err(ErrorKind::NRDRelativeHeight.into()); + } + } + let new_pos = CommitPos { + pos, + height: b.header.height, + }; + debug!( + "pushing entry to NRD index: {:?}: {:?}", kernel.excess(), - CommitPos { - pos, - height: b.header.height, - }, - )?; + new_pos + ); + kernel_index.push_pos(batch, kernel.excess(), new_pos)?; } } @@ -1191,7 +1206,7 @@ impl<'a> Extension<'a> { } // Now rewind the kernel_pos index based on kernels in the block being rewound. - let coinbase_kernel_index = store::coinbase_kernel_index(); + let coinbase_kernel_index = store::nrd_recent_kernel_index(); for kernel in block.kernels() { if let KernelFeatures::Coinbase = kernel.features { coinbase_kernel_index.rewind( diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index 48adec0716..ab3ca309ed 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -36,7 +36,7 @@ fn test_store_kernel_idx() { let store = ChainStore::new(chain_dir).unwrap(); let batch = store.batch().unwrap(); - let index = store::coinbase_kernel_index(); + let index = store::nrd_recent_kernel_index(); assert_eq!(index.peek_pos(&batch, commit), Ok(None)); assert_eq!(index.get_list(&batch, commit), Ok(None)); @@ -184,7 +184,7 @@ fn test_store_kernel_idx_pop_back() { let store = ChainStore::new(chain_dir).unwrap(); let batch = store.batch().unwrap(); - let index = store::coinbase_kernel_index(); + let index = store::nrd_recent_kernel_index(); assert_eq!(index.peek_pos(&batch, commit), Ok(None)); assert_eq!(index.get_list(&batch, commit), Ok(None)); @@ -293,7 +293,7 @@ fn test_store_kernel_idx_rewind() { let store = ChainStore::new(chain_dir).unwrap(); let batch = store.batch().unwrap(); - let index = store::coinbase_kernel_index(); + let index = store::nrd_recent_kernel_index(); assert_eq!( index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }), @@ -396,7 +396,7 @@ fn test_store_kernel_idx_multiple_commits() { let store = ChainStore::new(chain_dir).unwrap(); let batch = store.batch().unwrap(); - let index = store::coinbase_kernel_index(); + let index = store::nrd_recent_kernel_index(); assert_eq!(index.get_list(&batch, commit), Ok(None)); assert_eq!(index.get_list(&batch, commit2), Ok(None)); From e60ab72f3c6ca50c56032d2ff65a81ff1174696f Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Fri, 29 May 2020 18:13:37 +0100 Subject: [PATCH 30/48] cleanup --- chain/tests/store_kernel_pos_index.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index ab3ca309ed..60cef0b819 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -12,13 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::chain::linked_list::{self, ListIndex, ListWrapper, RewindableListIndex}; +use crate::chain::linked_list::{ListIndex, ListWrapper, RewindableListIndex}; use crate::chain::store::{self, ChainStore}; use crate::chain::types::CommitPos; -use crate::core::core::OutputFeatures; use crate::util::secp::pedersen::Commitment; use grin_chain as chain; -use grin_core as core; use grin_store; use grin_util as util; mod chain_test_helper; From 933b39eca080899ab9110badec6a2a3437f090ec Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Fri, 29 May 2020 20:50:31 +0100 Subject: [PATCH 31/48] hook NRD relative lock height validation into block processing and tx validation --- chain/src/chain.rs | 38 +++++++++++++---- chain/src/txhashset/txhashset.rs | 70 +++++++++++++++++++------------- 2 files changed, 71 insertions(+), 37 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 4cd4872259..cea6f49a72 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -19,7 +19,8 @@ use crate::core::core::hash::{Hash, Hashed, ZERO_HASH}; use crate::core::core::merkle_proof::MerkleProof; use crate::core::core::verifier_cache::VerifierCache; use crate::core::core::{ - Block, BlockHeader, BlockSums, Committed, Output, OutputIdentifier, Transaction, TxKernel, + Block, BlockHeader, BlockSums, Committed, KernelFeatures, Output, OutputIdentifier, + Transaction, TxKernel, }; use crate::core::global; use crate::core::pow; @@ -513,17 +514,38 @@ impl Chain { }) } - /// Validate the tx against the current UTXO set. + /// Validate the tx against the current UTXO set and recent kernels (NRD relative lock heights). pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> { - let header_pmmr = self.header_pmmr.read(); - let txhashset = self.txhashset.read(); + self.validate_tx_kernels(tx)?; + self.validate_tx_against_utxo(tx)?; + Ok(()) + } - // TODO - we need to validate the kernel here, cannot do this via utxo_view... - // txhashet::extending_readonly + /// Validates NRD relative height locks against "recent" kernel history. + /// Applies the kernels to the current kernel MMR in a readonly extension. + /// The extension and the db batch are discarded. + /// The batch ensures duplicate NRD kernels within the tx are handled correctly. + fn validate_tx_kernels(&self, tx: &Transaction) -> Result<(), Error> { + let has_nrd_kernel = tx.kernels().iter().any(|k| match k.features { + KernelFeatures::NoRecentDuplicate { .. } => true, + _ => false, + }); + if !has_nrd_kernel { + return Ok(()); + } + let mut header_pmmr = self.header_pmmr.write(); + let mut txhashset = self.txhashset.write(); + txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| { + let height = self.next_block_height()?; + ext.extension.apply_kernels(tx.kernels(), height, batch) + }) + } + fn validate_tx_against_utxo(&self, tx: &Transaction) -> Result<(), Error> { + let header_pmmr = self.header_pmmr.read(); + let txhashset = self.txhashset.read(); txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, batch| { - utxo.validate_tx(tx, batch)?; - Ok(()) + utxo.validate_tx(tx, batch) }) } diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 9fbe2f5520..b4a090a05c 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -943,35 +943,9 @@ impl<'a> Extension<'a> { } batch.save_spent_index(&b.hash(), &spent)?; - let kernel_index = store::nrd_recent_kernel_index(); - for kernel in b.kernels() { - let pos = self.apply_kernel(kernel)?; - if let KernelFeatures::NoRecentDuplicate { - relative_height, .. - } = kernel.features - { - debug!("checking NRD index: {:?}", kernel.excess()); - if let Some(prev) = kernel_index.peek_pos(batch, kernel.excess())? { - let h2 = b.header.height; - let h1 = prev.height; - let delta = h2.saturating_sub(h1); - debug!("NRD check: {}, {}, {:?}", h2, h1, relative_height); - if delta < relative_height.into() { - return Err(ErrorKind::NRDRelativeHeight.into()); - } - } - let new_pos = CommitPos { - pos, - height: b.header.height, - }; - debug!( - "pushing entry to NRD index: {:?}: {:?}", - kernel.excess(), - new_pos - ); - kernel_index.push_pos(batch, kernel.excess(), new_pos)?; - } - } + // Apply the kernels to the kernel MMR. + // Note: This validates and NRD relative height locks via the "recent" kernel index. + self.apply_kernels(b.kernels(), b.header.height, batch)?; // Update our BitmapAccumulator based on affected outputs (both spent and created). self.apply_to_bitmap_accumulator(&affected_pos)?; @@ -1066,6 +1040,44 @@ impl<'a> Extension<'a> { Ok(output_pos) } + /// Apply kernels to the kernel MMR. + /// Validate any NRD relative height locks via the "recent" kernel index. + /// Note: This is used for both block processing and tx validation. + /// In the block processing case we use the block height. + /// In the tx validation case we use the "next" block height based on current chain head. + pub fn apply_kernels( + &mut self, + kernels: &[TxKernel], + height: u64, + batch: &Batch<'_>, + ) -> Result<(), Error> { + let kernel_index = store::nrd_recent_kernel_index(); + for kernel in kernels { + let pos = self.apply_kernel(kernel)?; + if let KernelFeatures::NoRecentDuplicate { + relative_height, .. + } = kernel.features + { + debug!("checking NRD index: {:?}", kernel.excess()); + if let Some(prev) = kernel_index.peek_pos(batch, kernel.excess())? { + let diff = height.saturating_sub(prev.height); + debug!("NRD check: {}, {:?}, {:?}", height, prev, relative_height); + if diff < relative_height.into() { + return Err(ErrorKind::NRDRelativeHeight.into()); + } + } + let new_pos = CommitPos { pos, height }; + debug!( + "pushing entry to NRD index: {:?}: {:?}", + kernel.excess(), + new_pos + ); + kernel_index.push_pos(batch, kernel.excess(), new_pos)?; + } + } + Ok(()) + } + /// Push kernel onto MMR (hash and data files). fn apply_kernel(&mut self, kernel: &TxKernel) -> Result { let pos = self From a0383897da4802d42b66e7df348a0a1951ca3b23 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Fri, 29 May 2020 20:57:03 +0100 Subject: [PATCH 32/48] cleanup --- chain/src/chain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index cea6f49a72..af708be8de 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -516,8 +516,8 @@ impl Chain { /// Validate the tx against the current UTXO set and recent kernels (NRD relative lock heights). pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> { - self.validate_tx_kernels(tx)?; self.validate_tx_against_utxo(tx)?; + self.validate_tx_kernels(tx)?; Ok(()) } From 4b64991f9889d07bdd8006624e86af0df042a3d9 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sun, 31 May 2020 13:37:04 +0100 Subject: [PATCH 33/48] set local chain type for kernel_idx tests --- chain/src/linked_list.rs | 6 ++++++ chain/tests/store_kernel_pos_index.rs | 19 +++++++++++-------- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index 7a19274990..c22e9995d3 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -138,6 +138,7 @@ pub trait RewindableListIndex { /// This allows us to efficiently maintain an index of "recent" kernel data. /// We can maintain a window of 2 weeks of recent data, discarding anything older than this. pub trait PruneableListIndex { + /// Prune old data. fn prune(&self, batch: &Batch<'_>, commit: Commitment, cutoff_pos: u64) -> Result<(), Error>; } @@ -211,6 +212,7 @@ pub struct MultiIndex { } impl MultiIndex { + /// Initialize a new multi index with the specified list and entry prefixes. pub fn init(list_prefix: u8, entry_prefix: u8) -> MultiIndex { MultiIndex { phantom: PhantomData, @@ -436,6 +438,7 @@ impl PruneableListIndex for MultiIndex { /// Something that tracks pos (in an MMR). pub trait PosEntry: Readable + Writeable + Copy { + /// Accessor for the underlying (MMR) pos. fn pos(&self) -> u64; } @@ -445,9 +448,12 @@ impl PosEntry for CommitPos { } } +/// Entry maintained in the list index. pub trait ListIndexEntry: Readable + Writeable { + /// Type of the underlying pos indexed in the list. type Pos: PosEntry; + /// Accessor for the underlying pos. fn get_pos(&self) -> Self::Pos; } diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index 60cef0b819..5458d5d734 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -15,18 +15,24 @@ use crate::chain::linked_list::{ListIndex, ListWrapper, RewindableListIndex}; use crate::chain::store::{self, ChainStore}; use crate::chain::types::CommitPos; +use crate::core::global; use crate::util::secp::pedersen::Commitment; use grin_chain as chain; +use grin_core as core; use grin_store; use grin_util as util; mod chain_test_helper; use self::chain_test_helper::clean_output_dir; use crate::grin_store::Error; -#[test] -fn test_store_kernel_idx() { +fn setup_test() { util::init_test_logger(); + global::set_local_chain_type(global::ChainTypes::AutomatedTesting); +} +#[test] +fn test_store_kernel_idx() { + setup_test(); let chain_dir = ".grin_idx_1"; clean_output_dir(chain_dir); @@ -173,8 +179,7 @@ fn test_store_kernel_idx() { #[test] fn test_store_kernel_idx_pop_back() { - util::init_test_logger(); - + setup_test(); let chain_dir = ".grin_idx_2"; clean_output_dir(chain_dir); @@ -282,8 +287,7 @@ fn test_store_kernel_idx_pop_back() { #[test] fn test_store_kernel_idx_rewind() { - util::init_test_logger(); - + setup_test(); let chain_dir = ".grin_idx_3"; clean_output_dir(chain_dir); @@ -384,8 +388,7 @@ fn test_store_kernel_idx_rewind() { #[test] fn test_store_kernel_idx_multiple_commits() { - util::init_test_logger(); - + setup_test(); let chain_dir = ".grin_idx_4"; clean_output_dir(chain_dir); From 8ed26c88c80d0589778735862862d98cf6fb8d6f Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sun, 31 May 2020 14:24:11 +0100 Subject: [PATCH 34/48] add test coverage for NRD rules in block processing --- chain/src/txhashset/txhashset.rs | 6 ++ chain/tests/nrd_validation_rules.rs | 158 ++++++++++++++++++++++++++++ 2 files changed, 164 insertions(+) create mode 100644 chain/tests/nrd_validation_rules.rs diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index b4a090a05c..2ef367b8a8 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -22,6 +22,7 @@ use crate::core::core::pmmr::{self, Backend, ReadonlyPMMR, RewindablePMMR, PMMR} use crate::core::core::{ Block, BlockHeader, Input, KernelFeatures, Output, OutputIdentifier, TxKernel, }; +use crate::core::global; use crate::core::ser::{PMMRable, ProtocolVersion}; use crate::error::{Error, ErrorKind}; use crate::linked_list::{ListIndex, RewindableListIndex}; @@ -1054,6 +1055,11 @@ impl<'a> Extension<'a> { let kernel_index = store::nrd_recent_kernel_index(); for kernel in kernels { let pos = self.apply_kernel(kernel)?; + + if !global::is_nrd_enabled() { + return Ok(()); + } + if let KernelFeatures::NoRecentDuplicate { relative_height, .. } = kernel.features diff --git a/chain/tests/nrd_validation_rules.rs b/chain/tests/nrd_validation_rules.rs new file mode 100644 index 0000000000..630f4ea90e --- /dev/null +++ b/chain/tests/nrd_validation_rules.rs @@ -0,0 +1,158 @@ +// Copyright 2020 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod chain_test_helper; + +use grin_chain as chain; +use grin_core as core; +use grin_keychain as keychain; +use grin_util as util; + +use self::chain_test_helper::{clean_output_dir, genesis_block, init_chain}; +use crate::chain::{Chain, Error, Options}; +use crate::core::core::{Block, KernelFeatures, NRDRelativeHeight, Transaction, TxKernel}; +use crate::core::libtx::{aggsig, build, reward, ProofBuilder}; +use crate::core::{consensus, global, pow}; +use crate::keychain::{BlindingFactor, ExtKeychain, ExtKeychainPath, Identifier, Keychain}; +use chrono::Duration; + +fn build_block( + chain: &Chain, + keychain: &K, + key_id: &Identifier, + txs: Vec, +) -> Result +where + K: Keychain, +{ + // Tests need to build "invalid" blocks so disable NRD feature flag temprorarily. + let is_nrd_enabled = global::is_nrd_enabled(); + global::set_local_nrd_enabled(false); + + let prev = chain.head_header()?; + let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap()); + let fee = txs.iter().map(|x| x.fee()).sum(); + let reward = + reward::output(keychain, &ProofBuilder::new(keychain), key_id, fee, false).unwrap(); + + let mut block = Block::new(&prev, txs, next_header_info.clone().difficulty, reward)?; + + block.header.timestamp = prev.timestamp + Duration::seconds(60); + block.header.pow.secondary_scaling = next_header_info.secondary_scaling; + + chain.set_txhashset_roots(&mut block)?; + + let edge_bits = global::min_edge_bits(); + block.header.pow.proof.edge_bits = edge_bits; + pow::pow_size( + &mut block.header, + next_header_info.difficulty, + global::proofsize(), + edge_bits, + ) + .unwrap(); + + // Restore NRD feature flag after building the potentially "invalid" block. + global::set_local_nrd_enabled(is_nrd_enabled); + + Ok(block) +} + +#[test] +fn process_block_nrd_validation_rules() -> Result<(), Error> { + global::set_local_chain_type(global::ChainTypes::AutomatedTesting); + global::set_local_nrd_enabled(true); + + util::init_test_logger(); + + let chain_dir = ".grin.nrd_kernel"; + clean_output_dir(chain_dir); + + let keychain = ExtKeychain::from_random_seed(false).unwrap(); + let builder = ProofBuilder::new(&keychain); + let genesis = genesis_block(&keychain); + let chain = init_chain(chain_dir, genesis.clone()); + + for n in 1..9 { + let key_id = ExtKeychainPath::new(1, n, 0, 0, 0).to_identifier(); + let block = build_block(&chain, &keychain, &key_id, vec![])?; + chain.process_block(block, Options::MINE).unwrap(); + } + + assert_eq!(chain.head().unwrap().height, 8); + + // TODO - build 2 "half txs" with shared NRD kernel and locked with relative_height = 2 + // Check invalid if tx1 and tx2 included in same block. + // Check invalid if tx2 included in next block. + // Check valid if tx2 included in subsequent block (height diff at least 2). + + let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate { + fee: 20000, + relative_height: NRDRelativeHeight::new(2)?, + }); + + // // Construct the message to be signed. + let msg = kernel.msg_to_sign().unwrap(); + + // // Generate a kernel with public excess and associated signature. + let excess = BlindingFactor::rand(&keychain.secp()); + let skey = excess.secret_key(&keychain.secp()).unwrap(); + kernel.excess = keychain.secp().commit(0, skey).unwrap(); + let pubkey = &kernel.excess.to_pubkey(&keychain.secp()).unwrap(); + kernel.excess_sig = + aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey)).unwrap(); + kernel.verify().unwrap(); + + let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier(); + let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier(); + let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier(); + + let tx1 = build::transaction_with_kernel( + vec![ + build::coinbase_input(consensus::REWARD, key_id1.clone()), + build::output(consensus::REWARD - 20000, key_id2.clone()), + ], + kernel.clone(), + excess.clone(), + &keychain, + &builder, + ) + .unwrap(); + + let tx2 = build::transaction_with_kernel( + vec![ + build::input(consensus::REWARD - 20000, key_id2.clone()), + build::output(consensus::REWARD - 40000, key_id3.clone()), + ], + kernel.clone(), + excess.clone(), + &keychain, + &builder, + ) + .unwrap(); + + let key_id9 = ExtKeychainPath::new(1, 9, 0, 0, 0).to_identifier(); + + // Check block containing both tx1 and tx2 is invalid. + let block = build_block(&chain, &keychain, &key_id9, vec![tx1, tx2])?; + assert!(chain.process_block(block, Options::MINE).is_err()); + + panic!("tbc"); + + // chain.process_block(block, Options::MINE).unwrap(); + // chain.validate(false).unwrap(); + + clean_output_dir(chain_dir); + Ok(()) +} From fa6f1dfaf16439c4f2fc9f1817551d640a645254 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sun, 31 May 2020 18:28:54 +0100 Subject: [PATCH 35/48] NRD test coverage and cleanup --- chain/src/chain.rs | 5 +++ chain/src/pipe.rs | 6 +-- chain/src/txhashset/txhashset.rs | 2 +- chain/tests/nrd_validation_rules.rs | 59 +++++++++++++++++++---------- 4 files changed, 49 insertions(+), 23 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index af708be8de..8523c0bd6a 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -297,6 +297,11 @@ impl Chain { /// Returns true if it has been added to the longest chain /// or false if it has added to a fork (or orphan?). fn process_block_single(&self, b: Block, opts: Options) -> Result, Error> { + // Process the header first. + // If invalid then fail early. + // If valid then continue with block processing with header_head committed to db etc. + self.process_block_header(&b.header, opts)?; + let (maybe_new_head, prev_head) = { let mut header_pmmr = self.header_pmmr.write(); let mut txhashset = self.txhashset.write(); diff --git a/chain/src/pipe.rs b/chain/src/pipe.rs index 8f6bc34337..66e660a228 100644 --- a/chain/src/pipe.rs +++ b/chain/src/pipe.rs @@ -234,9 +234,6 @@ pub fn sync_block_headers( /// Note: In contrast to processing a full block we treat "already known" as success /// to allow processing to continue (for header itself). pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> { - // Check this header is not an orphan, we must know about the previous header to continue. - let prev_header = ctx.batch.get_previous_header(&header)?; - // If we have already processed the full block for this header then done. // Note: "already known" in this context is success so subsequent processing can continue. { @@ -246,6 +243,9 @@ pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> } } + // Check this header is not an orphan, we must know about the previous header to continue. + let prev_header = ctx.batch.get_previous_header(&header)?; + // If we have not yet seen the full block then check if we have seen this header. // If it does not increase total_difficulty beyond our current header_head // then we can (re)accept this header and process the full block (or request it). diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 2ef367b8a8..8b31eade0d 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -1057,7 +1057,7 @@ impl<'a> Extension<'a> { let pos = self.apply_kernel(kernel)?; if !global::is_nrd_enabled() { - return Ok(()); + continue; } if let KernelFeatures::NoRecentDuplicate { diff --git a/chain/tests/nrd_validation_rules.rs b/chain/tests/nrd_validation_rules.rs index 630f4ea90e..a6a83249e0 100644 --- a/chain/tests/nrd_validation_rules.rs +++ b/chain/tests/nrd_validation_rules.rs @@ -32,13 +32,15 @@ fn build_block( keychain: &K, key_id: &Identifier, txs: Vec, + nrd_override: bool, ) -> Result where K: Keychain, { - // Tests need to build "invalid" blocks so disable NRD feature flag temprorarily. + // Override current NRD feature flag. + // This allows us to build an "invalid" block for testing the block processing pipeline. let is_nrd_enabled = global::is_nrd_enabled(); - global::set_local_nrd_enabled(false); + global::set_local_nrd_enabled(nrd_override); let prev = chain.head_header()?; let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap()); @@ -70,7 +72,7 @@ where } #[test] -fn process_block_nrd_validation_rules() -> Result<(), Error> { +fn process_block_nrd_validation() -> Result<(), Error> { global::set_local_chain_type(global::ChainTypes::AutomatedTesting); global::set_local_nrd_enabled(true); @@ -86,16 +88,11 @@ fn process_block_nrd_validation_rules() -> Result<(), Error> { for n in 1..9 { let key_id = ExtKeychainPath::new(1, n, 0, 0, 0).to_identifier(); - let block = build_block(&chain, &keychain, &key_id, vec![])?; - chain.process_block(block, Options::MINE).unwrap(); + let block = build_block(&chain, &keychain, &key_id, vec![], false)?; + chain.process_block(block, Options::NONE)?; } - assert_eq!(chain.head().unwrap().height, 8); - - // TODO - build 2 "half txs" with shared NRD kernel and locked with relative_height = 2 - // Check invalid if tx1 and tx2 included in same block. - // Check invalid if tx2 included in next block. - // Check valid if tx2 included in subsequent block (height diff at least 2). + assert_eq!(chain.head()?.height, 8); let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate { fee: 20000, @@ -143,15 +140,39 @@ fn process_block_nrd_validation_rules() -> Result<(), Error> { .unwrap(); let key_id9 = ExtKeychainPath::new(1, 9, 0, 0, 0).to_identifier(); + let key_id10 = ExtKeychainPath::new(1, 10, 0, 0, 0).to_identifier(); + let key_id11 = ExtKeychainPath::new(1, 11, 0, 0, 0).to_identifier(); - // Check block containing both tx1 and tx2 is invalid. - let block = build_block(&chain, &keychain, &key_id9, vec![tx1, tx2])?; - assert!(chain.process_block(block, Options::MINE).is_err()); - - panic!("tbc"); - - // chain.process_block(block, Options::MINE).unwrap(); - // chain.validate(false).unwrap(); + // Block containing both tx1 and tx2 is invalid. + // Not valid for two duplicate NRD kernels to co-exist in same block. + let block_invalid_9 = build_block( + &chain, + &keychain, + &key_id9, + vec![tx1.clone(), tx2.clone()], + false, + )?; + assert!(chain.process_block(block_invalid_9, Options::NONE).is_err()); + + assert_eq!(chain.head()?.height, 8); + + // Block containing tx1 is valid. + let block_valid_9 = build_block(&chain, &keychain, &key_id9, vec![tx1.clone()], false)?; + chain.process_block(block_valid_9, Options::NONE)?; + + // Block at height 10 is invalid if it contains tx2 due to NRD rule (relative_height=2). + let block_invalid_10 = build_block(&chain, &keychain, &key_id10, vec![tx2.clone()], false)?; + assert!(chain + .process_block(block_invalid_10, Options::NONE) + .is_err()); + + // Block at height 10 is valid if we do not include tx2. + let block_valid_10 = build_block(&chain, &keychain, &key_id10, vec![], false)?; + chain.process_block(block_valid_10, Options::NONE)?; + + // Block at height 11 is valid with tx2 as NRD rule is met (relative_height=2). + let block_valid_11 = build_block(&chain, &keychain, &key_id11, vec![tx2.clone()], false)?; + chain.process_block(block_valid_11, Options::NONE)?; clean_output_dir(chain_dir); Ok(()) From 9c87a23e12638d4326a319fb47f8894c648087e1 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sun, 31 May 2020 18:33:36 +0100 Subject: [PATCH 36/48] NRD relative height 1 test --- chain/tests/nrd_validation_rules.rs | 96 +++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/chain/tests/nrd_validation_rules.rs b/chain/tests/nrd_validation_rules.rs index a6a83249e0..c5f5aeea13 100644 --- a/chain/tests/nrd_validation_rules.rs +++ b/chain/tests/nrd_validation_rules.rs @@ -177,3 +177,99 @@ fn process_block_nrd_validation() -> Result<(), Error> { clean_output_dir(chain_dir); Ok(()) } + +#[test] +fn process_block_nrd_validation_relative_height_1() -> Result<(), Error> { + global::set_local_chain_type(global::ChainTypes::AutomatedTesting); + global::set_local_nrd_enabled(true); + + util::init_test_logger(); + + let chain_dir = ".grin.nrd_kernel_relative_height_1"; + clean_output_dir(chain_dir); + + let keychain = ExtKeychain::from_random_seed(false).unwrap(); + let builder = ProofBuilder::new(&keychain); + let genesis = genesis_block(&keychain); + let chain = init_chain(chain_dir, genesis.clone()); + + for n in 1..9 { + let key_id = ExtKeychainPath::new(1, n, 0, 0, 0).to_identifier(); + let block = build_block(&chain, &keychain, &key_id, vec![], false)?; + chain.process_block(block, Options::NONE)?; + } + + assert_eq!(chain.head()?.height, 8); + + let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate { + fee: 20000, + relative_height: NRDRelativeHeight::new(1)?, + }); + + // // Construct the message to be signed. + let msg = kernel.msg_to_sign().unwrap(); + + // // Generate a kernel with public excess and associated signature. + let excess = BlindingFactor::rand(&keychain.secp()); + let skey = excess.secret_key(&keychain.secp()).unwrap(); + kernel.excess = keychain.secp().commit(0, skey).unwrap(); + let pubkey = &kernel.excess.to_pubkey(&keychain.secp()).unwrap(); + kernel.excess_sig = + aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey)).unwrap(); + kernel.verify().unwrap(); + + let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier(); + let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier(); + let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier(); + + let tx1 = build::transaction_with_kernel( + vec![ + build::coinbase_input(consensus::REWARD, key_id1.clone()), + build::output(consensus::REWARD - 20000, key_id2.clone()), + ], + kernel.clone(), + excess.clone(), + &keychain, + &builder, + ) + .unwrap(); + + let tx2 = build::transaction_with_kernel( + vec![ + build::input(consensus::REWARD - 20000, key_id2.clone()), + build::output(consensus::REWARD - 40000, key_id3.clone()), + ], + kernel.clone(), + excess.clone(), + &keychain, + &builder, + ) + .unwrap(); + + let key_id9 = ExtKeychainPath::new(1, 9, 0, 0, 0).to_identifier(); + let key_id10 = ExtKeychainPath::new(1, 10, 0, 0, 0).to_identifier(); + + // Block containing both tx1 and tx2 is invalid. + // Not valid for two duplicate NRD kernels to co-exist in same block. + let block_invalid_9 = build_block( + &chain, + &keychain, + &key_id9, + vec![tx1.clone(), tx2.clone()], + false, + )?; + assert!(chain.process_block(block_invalid_9, Options::NONE).is_err()); + + assert_eq!(chain.head()?.height, 8); + + // Block containing tx1 is valid. + let block_valid_9 = build_block(&chain, &keychain, &key_id9, vec![tx1.clone()], false)?; + chain.process_block(block_valid_9, Options::NONE)?; + + // Block at height 10 is valid with tx2 as NRD rule is met (relative_height=1). + let block_valid_10 = build_block(&chain, &keychain, &key_id10, vec![tx2.clone()], false)?; + chain.process_block(block_valid_10, Options::NONE)?; + + clean_output_dir(chain_dir); + Ok(()) +} From 3d4fe1fbb06f9dd5676b0cda2e466a066524004e Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sun, 31 May 2020 22:20:42 +0100 Subject: [PATCH 37/48] test coverage for NRD kernels in block processing --- chain/src/txhashset/txhashset.rs | 59 ++++---- chain/tests/nrd_validation_rules.rs | 201 ++++++++++++++++++++++------ 2 files changed, 192 insertions(+), 68 deletions(-) diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 8b31eade0d..4a4b79b285 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -1056,29 +1056,29 @@ impl<'a> Extension<'a> { for kernel in kernels { let pos = self.apply_kernel(kernel)?; - if !global::is_nrd_enabled() { - continue; - } - - if let KernelFeatures::NoRecentDuplicate { - relative_height, .. - } = kernel.features - { - debug!("checking NRD index: {:?}", kernel.excess()); - if let Some(prev) = kernel_index.peek_pos(batch, kernel.excess())? { - let diff = height.saturating_sub(prev.height); - debug!("NRD check: {}, {:?}, {:?}", height, prev, relative_height); - if diff < relative_height.into() { - return Err(ErrorKind::NRDRelativeHeight.into()); + // If NRD enabled then enforce NRD relative height rule. + // Otherwise just conntinue and apply the next kernel. + if global::is_nrd_enabled() { + if let KernelFeatures::NoRecentDuplicate { + relative_height, .. + } = kernel.features + { + debug!("checking NRD index: {:?}", kernel.excess()); + if let Some(prev) = kernel_index.peek_pos(batch, kernel.excess())? { + let diff = height.saturating_sub(prev.height); + debug!("NRD check: {}, {:?}, {:?}", height, prev, relative_height); + if diff < relative_height.into() { + return Err(ErrorKind::NRDRelativeHeight.into()); + } } + let new_pos = CommitPos { pos, height }; + debug!( + "pushing entry to NRD index: {:?}: {:?}", + kernel.excess(), + new_pos + ); + kernel_index.push_pos(batch, kernel.excess(), new_pos)?; } - let new_pos = CommitPos { pos, height }; - debug!( - "pushing entry to NRD index: {:?}: {:?}", - kernel.excess(), - new_pos - ); - kernel_index.push_pos(batch, kernel.excess(), new_pos)?; } } Ok(()) @@ -1223,15 +1223,14 @@ impl<'a> Extension<'a> { ); } - // Now rewind the kernel_pos index based on kernels in the block being rewound. - let coinbase_kernel_index = store::nrd_recent_kernel_index(); - for kernel in block.kernels() { - if let KernelFeatures::Coinbase = kernel.features { - coinbase_kernel_index.rewind( - batch, - kernel.excess(), - prev_header.kernel_mmr_size, - )?; + // If NRD feature flag is enabled rewind the kernel_pos index + // for any NRD kernels in the block being rewound. + if global::is_nrd_enabled() { + let kernel_index = store::nrd_recent_kernel_index(); + for kernel in block.kernels() { + if let KernelFeatures::NoRecentDuplicate { .. } = kernel.features { + kernel_index.rewind(batch, kernel.excess(), prev_header.kernel_mmr_size)?; + } } } diff --git a/chain/tests/nrd_validation_rules.rs b/chain/tests/nrd_validation_rules.rs index c5f5aeea13..693e25e262 100644 --- a/chain/tests/nrd_validation_rules.rs +++ b/chain/tests/nrd_validation_rules.rs @@ -21,7 +21,9 @@ use grin_util as util; use self::chain_test_helper::{clean_output_dir, genesis_block, init_chain}; use crate::chain::{Chain, Error, Options}; -use crate::core::core::{Block, KernelFeatures, NRDRelativeHeight, Transaction, TxKernel}; +use crate::core::core::{ + Block, BlockHeader, KernelFeatures, NRDRelativeHeight, Transaction, TxKernel, +}; use crate::core::libtx::{aggsig, build, reward, ProofBuilder}; use crate::core::{consensus, global, pow}; use crate::keychain::{BlindingFactor, ExtKeychain, ExtKeychainPath, Identifier, Keychain}; @@ -32,42 +34,45 @@ fn build_block( keychain: &K, key_id: &Identifier, txs: Vec, - nrd_override: bool, ) -> Result where K: Keychain, { - // Override current NRD feature flag. - // This allows us to build an "invalid" block for testing the block processing pipeline. - let is_nrd_enabled = global::is_nrd_enabled(); - global::set_local_nrd_enabled(nrd_override); - let prev = chain.head_header()?; - let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap()); + build_block_from_prev(&prev, chain, keychain, key_id, txs) +} + +fn build_block_from_prev( + prev: &BlockHeader, + chain: &Chain, + keychain: &K, + key_id: &Identifier, + txs: Vec, +) -> Result +where + K: Keychain, +{ + let next_header_info = + consensus::next_difficulty(prev.height, chain.difficulty_iter().unwrap()); let fee = txs.iter().map(|x| x.fee()).sum(); let reward = reward::output(keychain, &ProofBuilder::new(keychain), key_id, fee, false).unwrap(); - let mut block = Block::new(&prev, txs, next_header_info.clone().difficulty, reward)?; + let mut block = Block::new(prev, txs, next_header_info.clone().difficulty, reward)?; block.header.timestamp = prev.timestamp + Duration::seconds(60); block.header.pow.secondary_scaling = next_header_info.secondary_scaling; chain.set_txhashset_roots(&mut block)?; - let edge_bits = global::min_edge_bits(); - block.header.pow.proof.edge_bits = edge_bits; + block.header.pow.proof.edge_bits = global::min_edge_bits(); pow::pow_size( &mut block.header, next_header_info.difficulty, global::proofsize(), - edge_bits, + global::min_edge_bits(), ) .unwrap(); - - // Restore NRD feature flag after building the potentially "invalid" block. - global::set_local_nrd_enabled(is_nrd_enabled); - Ok(block) } @@ -88,7 +93,7 @@ fn process_block_nrd_validation() -> Result<(), Error> { for n in 1..9 { let key_id = ExtKeychainPath::new(1, n, 0, 0, 0).to_identifier(); - let block = build_block(&chain, &keychain, &key_id, vec![], false)?; + let block = build_block(&chain, &keychain, &key_id, vec![])?; chain.process_block(block, Options::NONE)?; } @@ -145,33 +150,42 @@ fn process_block_nrd_validation() -> Result<(), Error> { // Block containing both tx1 and tx2 is invalid. // Not valid for two duplicate NRD kernels to co-exist in same block. - let block_invalid_9 = build_block( - &chain, - &keychain, - &key_id9, - vec![tx1.clone(), tx2.clone()], - false, - )?; + // Jump through some hoops to build an invalid block by disabling the feature flag. + // TODO - We need a good way of building invalid stuff in tests. + let block_invalid_9 = { + global::set_local_nrd_enabled(false); + let block = build_block(&chain, &keychain, &key_id9, vec![tx1.clone(), tx2.clone()])?; + global::set_local_nrd_enabled(true); + block + }; assert!(chain.process_block(block_invalid_9, Options::NONE).is_err()); assert_eq!(chain.head()?.height, 8); // Block containing tx1 is valid. - let block_valid_9 = build_block(&chain, &keychain, &key_id9, vec![tx1.clone()], false)?; + let block_valid_9 = build_block(&chain, &keychain, &key_id9, vec![tx1.clone()])?; chain.process_block(block_valid_9, Options::NONE)?; // Block at height 10 is invalid if it contains tx2 due to NRD rule (relative_height=2). - let block_invalid_10 = build_block(&chain, &keychain, &key_id10, vec![tx2.clone()], false)?; + // Jump through some hoops to build an invalid block by disabling the feature flag. + // TODO - We need a good way of building invalid stuff in tests. + let block_invalid_10 = { + global::set_local_nrd_enabled(false); + let block = build_block(&chain, &keychain, &key_id10, vec![tx2.clone()])?; + global::set_local_nrd_enabled(true); + block + }; + assert!(chain .process_block(block_invalid_10, Options::NONE) .is_err()); // Block at height 10 is valid if we do not include tx2. - let block_valid_10 = build_block(&chain, &keychain, &key_id10, vec![], false)?; + let block_valid_10 = build_block(&chain, &keychain, &key_id10, vec![])?; chain.process_block(block_valid_10, Options::NONE)?; // Block at height 11 is valid with tx2 as NRD rule is met (relative_height=2). - let block_valid_11 = build_block(&chain, &keychain, &key_id11, vec![tx2.clone()], false)?; + let block_valid_11 = build_block(&chain, &keychain, &key_id11, vec![tx2.clone()])?; chain.process_block(block_valid_11, Options::NONE)?; clean_output_dir(chain_dir); @@ -195,7 +209,7 @@ fn process_block_nrd_validation_relative_height_1() -> Result<(), Error> { for n in 1..9 { let key_id = ExtKeychainPath::new(1, n, 0, 0, 0).to_identifier(); - let block = build_block(&chain, &keychain, &key_id, vec![], false)?; + let block = build_block(&chain, &keychain, &key_id, vec![])?; chain.process_block(block, Options::NONE)?; } @@ -251,25 +265,136 @@ fn process_block_nrd_validation_relative_height_1() -> Result<(), Error> { // Block containing both tx1 and tx2 is invalid. // Not valid for two duplicate NRD kernels to co-exist in same block. - let block_invalid_9 = build_block( - &chain, - &keychain, - &key_id9, - vec![tx1.clone(), tx2.clone()], - false, - )?; + // Jump through some hoops here to build an "invalid" block. + // TODO - We need a good way of building invalid stuff for tests. + let block_invalid_9 = { + global::set_local_nrd_enabled(false); + let block = build_block(&chain, &keychain, &key_id9, vec![tx1.clone(), tx2.clone()])?; + global::set_local_nrd_enabled(true); + block + }; + assert!(chain.process_block(block_invalid_9, Options::NONE).is_err()); assert_eq!(chain.head()?.height, 8); // Block containing tx1 is valid. - let block_valid_9 = build_block(&chain, &keychain, &key_id9, vec![tx1.clone()], false)?; + let block_valid_9 = build_block(&chain, &keychain, &key_id9, vec![tx1.clone()])?; chain.process_block(block_valid_9, Options::NONE)?; // Block at height 10 is valid with tx2 as NRD rule is met (relative_height=1). - let block_valid_10 = build_block(&chain, &keychain, &key_id10, vec![tx2.clone()], false)?; + let block_valid_10 = build_block(&chain, &keychain, &key_id10, vec![tx2.clone()])?; chain.process_block(block_valid_10, Options::NONE)?; clean_output_dir(chain_dir); Ok(()) } + +#[test] +fn process_block_nrd_validation_fork() -> Result<(), Error> { + global::set_local_chain_type(global::ChainTypes::AutomatedTesting); + global::set_local_nrd_enabled(true); + + util::init_test_logger(); + + let chain_dir = ".grin.nrd_kernel_fork"; + clean_output_dir(chain_dir); + + let keychain = ExtKeychain::from_random_seed(false).unwrap(); + let builder = ProofBuilder::new(&keychain); + let genesis = genesis_block(&keychain); + let chain = init_chain(chain_dir, genesis.clone()); + + for n in 1..9 { + let key_id = ExtKeychainPath::new(1, n, 0, 0, 0).to_identifier(); + let block = build_block(&chain, &keychain, &key_id, vec![])?; + chain.process_block(block, Options::NONE)?; + } + + let header_8 = chain.head_header()?; + assert_eq!(header_8.height, 8); + + let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate { + fee: 20000, + relative_height: NRDRelativeHeight::new(2)?, + }); + + // // Construct the message to be signed. + let msg = kernel.msg_to_sign().unwrap(); + + // // Generate a kernel with public excess and associated signature. + let excess = BlindingFactor::rand(&keychain.secp()); + let skey = excess.secret_key(&keychain.secp()).unwrap(); + kernel.excess = keychain.secp().commit(0, skey).unwrap(); + let pubkey = &kernel.excess.to_pubkey(&keychain.secp()).unwrap(); + kernel.excess_sig = + aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey)).unwrap(); + kernel.verify().unwrap(); + + let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier(); + let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier(); + let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier(); + + let tx1 = build::transaction_with_kernel( + vec![ + build::coinbase_input(consensus::REWARD, key_id1.clone()), + build::output(consensus::REWARD - 20000, key_id2.clone()), + ], + kernel.clone(), + excess.clone(), + &keychain, + &builder, + ) + .unwrap(); + + let tx2 = build::transaction_with_kernel( + vec![ + build::input(consensus::REWARD - 20000, key_id2.clone()), + build::output(consensus::REWARD - 40000, key_id3.clone()), + ], + kernel.clone(), + excess.clone(), + &keychain, + &builder, + ) + .unwrap(); + + let key_id9 = ExtKeychainPath::new(1, 9, 0, 0, 0).to_identifier(); + let key_id10 = ExtKeychainPath::new(1, 10, 0, 0, 0).to_identifier(); + let key_id11 = ExtKeychainPath::new(1, 11, 0, 0, 0).to_identifier(); + + // Block containing tx1 is valid. + let block_valid_9 = + build_block_from_prev(&header_8, &chain, &keychain, &key_id9, vec![tx1.clone()])?; + chain.process_block(block_valid_9.clone(), Options::NONE)?; + + // Block at height 10 is valid if we do not include tx2. + let block_valid_10 = + build_block_from_prev(&block_valid_9.header, &chain, &keychain, &key_id10, vec![])?; + chain.process_block(block_valid_10, Options::NONE)?; + + // Process an alternative "fork" block also at height 9. + // The "other" block at height 9 should not affect this one in terms of NRD kernels + // as the recent kernel index should be rewound. + let block_valid_9b = + build_block_from_prev(&header_8, &chain, &keychain, &key_id9, vec![tx1.clone()])?; + chain.process_block(block_valid_9b.clone(), Options::NONE)?; + + // Process an alternative block at height 10 on this same fork. + let block_valid_10b = + build_block_from_prev(&block_valid_9b.header, &chain, &keychain, &key_id10, vec![])?; + chain.process_block(block_valid_10b.clone(), Options::NONE)?; + + // Block at height 11 is valid with tx2 as NRD rule is met (relative_height=2). + let block_valid_11b = build_block_from_prev( + &block_valid_10b.header, + &chain, + &keychain, + &key_id11, + vec![tx2.clone()], + )?; + chain.process_block(block_valid_11b, Options::NONE)?; + + clean_output_dir(chain_dir); + Ok(()) +} From f260771a26fff178905fe87d34a0885cc6a49a3b Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sun, 31 May 2020 22:25:52 +0100 Subject: [PATCH 38/48] cleanup --- chain/src/txhashset/txhashset.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 4a4b79b285..740d041d98 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -1052,13 +1052,13 @@ impl<'a> Extension<'a> { height: u64, batch: &Batch<'_>, ) -> Result<(), Error> { - let kernel_index = store::nrd_recent_kernel_index(); for kernel in kernels { let pos = self.apply_kernel(kernel)?; // If NRD enabled then enforce NRD relative height rule. // Otherwise just conntinue and apply the next kernel. if global::is_nrd_enabled() { + let kernel_index = store::nrd_recent_kernel_index(); if let KernelFeatures::NoRecentDuplicate { relative_height, .. } = kernel.features From 188e28957cc7f927b198b70ef1f2588664085d15 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Mon, 1 Jun 2020 13:22:56 +0100 Subject: [PATCH 39/48] start of test coverage for txpool NRD kernel rules --- pool/tests/common.rs | 38 +++- ...ernels.rs => nrd_kernel_enable_disable.rs} | 0 pool/tests/nrd_kernel_relative_height_rule.rs | 162 ++++++++++++++++++ 3 files changed, 198 insertions(+), 2 deletions(-) rename pool/tests/{nrd_kernels.rs => nrd_kernel_enable_disable.rs} (100%) create mode 100644 pool/tests/nrd_kernel_relative_height_rule.rs diff --git a/pool/tests/common.rs b/pool/tests/common.rs index a5b0fc0aa5..08e66ebf01 100644 --- a/pool/tests/common.rs +++ b/pool/tests/common.rs @@ -18,9 +18,11 @@ use self::chain::store::ChainStore; use self::chain::types::Tip; use self::core::core::hash::{Hash, Hashed}; use self::core::core::verifier_cache::VerifierCache; -use self::core::core::{Block, BlockHeader, BlockSums, Committed, KernelFeatures, Transaction}; +use self::core::core::{ + Block, BlockHeader, BlockSums, Committed, KernelFeatures, Transaction, TxKernel, +}; use self::core::libtx; -use self::keychain::{ExtKeychain, Keychain}; +use self::keychain::{BlindingFactor, ExtKeychain, Keychain}; use self::pool::types::*; use self::pool::TransactionPool; use self::util::secp::pedersen::Commitment; @@ -257,6 +259,38 @@ where .unwrap() } +pub fn test_transaction_with_kernel( + keychain: &K, + input_values: Vec, + output_values: Vec, + kernel: TxKernel, + excess: BlindingFactor, +) -> Transaction +where + K: Keychain, +{ + let mut tx_elements = Vec::new(); + + for input_value in input_values { + let key_id = ExtKeychain::derive_key_id(1, input_value as u32, 0, 0, 0); + tx_elements.push(libtx::build::input(input_value, key_id)); + } + + for output_value in output_values { + let key_id = ExtKeychain::derive_key_id(1, output_value as u32, 0, 0, 0); + tx_elements.push(libtx::build::output(output_value, key_id)); + } + + libtx::build::transaction_with_kernel( + tx_elements, + kernel, + excess, + keychain, + &libtx::ProofBuilder::new(keychain), + ) + .unwrap() +} + pub fn test_source() -> TxSource { TxSource::Broadcast } diff --git a/pool/tests/nrd_kernels.rs b/pool/tests/nrd_kernel_enable_disable.rs similarity index 100% rename from pool/tests/nrd_kernels.rs rename to pool/tests/nrd_kernel_enable_disable.rs diff --git a/pool/tests/nrd_kernel_relative_height_rule.rs b/pool/tests/nrd_kernel_relative_height_rule.rs new file mode 100644 index 0000000000..d5202a154e --- /dev/null +++ b/pool/tests/nrd_kernel_relative_height_rule.rs @@ -0,0 +1,162 @@ +// Copyright 2020 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod common; + +use self::core::core::hash::Hashed; +use self::core::core::verifier_cache::LruVerifierCache; +use self::core::core::{ + Block, BlockHeader, HeaderVersion, KernelFeatures, NRDRelativeHeight, Transaction, TxKernel, +}; +use self::core::global; +use self::core::libtx::aggsig; +use self::core::pow::Difficulty; +use self::core::{consensus, libtx}; +use self::keychain::{BlindingFactor, ExtKeychain, Keychain}; +use self::pool::types::PoolError; +use self::util::RwLock; +use crate::common::*; +use grin_core as core; +use grin_keychain as keychain; +use grin_pool as pool; +use grin_util as util; +use std::sync::Arc; + +// TODO - +// 1) check we do not allow a tx with 2 duplicate NRD kernels +// 2) check we allow back to back txs if relative_height=1 (tx in pool and tx incoming) +// 3) check we do not allow back to back if relative_height=2 (tx in pool and tx incoming) + +// relative_height=1 +// what about stempool and txpool? +// 1 in txpool, do not allow it in the stempool +// 1 in stempool, do not allow in stempool +// 1 in txpool do not allow in stempool + +#[test] +fn test_nrd_kernel_relative_height_rule_1() -> Result<(), PoolError> { + util::init_test_logger(); + global::set_local_chain_type(global::ChainTypes::AutomatedTesting); + global::set_local_nrd_enabled(true); + + let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); + + let db_root = ".grin_nrd_kernel_relative_height_rule_1"; + clean_output_dir(db_root.into()); + + let mut chain = ChainAdapter::init(db_root.into()).unwrap(); + + let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); + + // Initialize the chain/txhashset with an initial block + // so we have a non-empty UTXO set. + let add_block = |prev_header: BlockHeader, txs: Vec, chain: &mut ChainAdapter| { + let height = prev_header.height + 1; + let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0); + let fee = txs.iter().map(|x| x.fee()).sum(); + let reward = libtx::reward::output( + &keychain, + &libtx::ProofBuilder::new(&keychain), + &key_id, + fee, + false, + ) + .unwrap(); + let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap(); + + // Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from). + block.header.prev_root = prev_header.hash(); + + chain.update_db_for_block(&block); + block + }; + + let block = add_block(BlockHeader::default(), vec![], &mut chain); + let header = block.header; + + // Now create tx to spend that first coinbase (now matured). + // Provides us with some useful outputs to test with. + let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]); + + // Mine that initial tx so we can spend it with multiple txs + let mut block = add_block(header, vec![initial_tx], &mut chain); + let mut header = block.header; + + // Initialize a new pool with our chain adapter. + let mut pool = test_setup(Arc::new(chain.clone()), verifier_cache); + + // Now mine several more blocks out to HF3 + for _ in 0..7 { + block = add_block(header, vec![], &mut chain); + header = block.header; + } + + assert_eq!(header.height, consensus::TESTING_THIRD_HARD_FORK); + assert_eq!(header.version, HeaderVersion(4)); + + let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate { + fee: 6, + relative_height: NRDRelativeHeight::new(1)?, + }); + let msg = kernel.msg_to_sign().unwrap(); + + // Generate a kernel with public excess and associated signature. + let excess = BlindingFactor::rand(&keychain.secp()); + let skey = excess.secret_key(&keychain.secp()).unwrap(); + kernel.excess = keychain.secp().commit(0, skey).unwrap(); + let pubkey = &kernel.excess.to_pubkey(&keychain.secp()).unwrap(); + kernel.excess_sig = + aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey)).unwrap(); + kernel.verify().unwrap(); + + let tx1 = test_transaction_with_kernel( + &keychain, + vec![10, 20], + vec![24], + kernel.clone(), + excess.clone(), + ); + + let tx2 = test_transaction_with_kernel( + &keychain, + vec![24], + vec![18], + kernel.clone(), + excess.clone(), + ); + + // Confirm we can successfully add tx1 with NRD kernel to txpool. + assert_eq!( + pool.add_to_pool(test_source(), tx1.clone(), false, &header), + Ok(()), + ); + + // Confirm we cannot add tx2 to txpool while tx1 is in there (pair of duplicate NRD kernels). + assert_eq!( + pool.add_to_pool(test_source(), tx2.clone(), false, &header), + Ok(()), + ); + + // assert_eq!(pool.total_size(), 1); + + // let txs = pool.prepare_mineable_transactions().unwrap(); + // assert_eq!(txs.len(), 1); + + // Cleanup db directory + clean_output_dir(db_root.into()); + + panic!("wip"); + + Ok(()) +} From 580b414ba5891e203663a8f3893f00f9808e4ad0 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Tue, 2 Jun 2020 16:52:32 +0100 Subject: [PATCH 40/48] wip --- core/src/core/transaction.rs | 33 ++++++++++++++++- pool/src/pool.rs | 19 ++++++++++ pool/src/transaction_pool.rs | 19 ++++++++-- pool/tests/nrd_kernel_relative_height_rule.rs | 37 ++++++++++++++----- 4 files changed, 94 insertions(+), 14 deletions(-) diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index b37cee0edc..a21c779c78 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -909,6 +909,36 @@ impl TransactionBody { Ok(()) } + // It is never valid to have multiple duplicate NRD kernels (by public excess) + // in the same transaction or block. We check this here. + // We skip this check if NRD feature is not enabled. + fn verify_no_nrd_duplicates(&self) -> Result<(), Error> { + if !global::is_nrd_enabled() { + return Ok(()); + } + + let mut nrd_excess: Vec = self + .kernels + .iter() + .filter(|x| match x.features { + KernelFeatures::NoRecentDuplicate { .. } => true, + _ => false, + }) + .map(|x| x.excess()) + .collect(); + + // Sort and dedup and compare length to look for duplicates. + nrd_excess.sort(); + let original_count = nrd_excess.len(); + nrd_excess.dedup(); + let dedup_count = nrd_excess.len(); + if original_count == dedup_count { + Ok(()) + } else { + Err(Error::InvalidNRDRelativeHeight) + } + } + // Verify that inputs|outputs|kernels are sorted in lexicographical order // and that there are no duplicates (they are all unique within this transaction). fn verify_sorted(&self) -> Result<(), Error> { @@ -970,6 +1000,7 @@ impl TransactionBody { /// * kernel signature verification pub fn validate_read(&self, weighting: Weighting) -> Result<(), Error> { self.verify_weight(weighting)?; + self.verify_no_nrd_duplicates()?; self.verify_sorted()?; self.verify_cut_through()?; Ok(()) @@ -1227,8 +1258,8 @@ impl Transaction { weighting: Weighting, verifier: Arc>, ) -> Result<(), Error> { - self.body.validate(weighting, verifier)?; self.body.verify_features()?; + self.body.validate(weighting, verifier)?; self.verify_kernel_sums(self.overage(), self.offset.clone())?; Ok(()) } diff --git a/pool/src/pool.rs b/pool/src/pool.rs index eaa894d8d2..e09d310eb5 100644 --- a/pool/src/pool.rs +++ b/pool/src/pool.rs @@ -29,6 +29,7 @@ use grin_util as util; use std::cmp::Reverse; use std::collections::{HashMap, HashSet}; use std::sync::Arc; +use util::secp::pedersen::Commitment; use util::static_secp_instance; pub struct Pool @@ -70,6 +71,19 @@ where .map(|x| x.tx.clone()) } + /// Query the tx pool for an individual tx matching the given public excess. + /// Used for checking for duplicate NRD kernels in the txpool. + pub fn retrieve_tx_by_kernel_excess(&self, excess: Commitment) -> Option { + for x in &self.entries { + for k in x.tx.kernels() { + if k.excess() == excess { + return Some(x.tx.clone()); + } + } + } + None + } + /// Query the tx pool for an individual tx matching the given kernel hash. pub fn retrieve_tx_by_kernel_hash(&self, hash: Hash) -> Option { for x in &self.entries { @@ -298,6 +312,8 @@ where extra_tx: Option, header: &BlockHeader, ) -> Result<(), PoolError> { + error!("***** TODO - reconcile needs to be NRD aware (0 height and >0 height"); + let existing_entries = self.entries.clone(); self.entries.clear(); @@ -425,6 +441,7 @@ where tx_buckets.into_iter().flat_map(|x| x.raw_txs).collect() } + /// TODO - This is kernel based. How does this interact with NRD? pub fn find_matching_transactions(&self, kernels: &[TxKernel]) -> Vec { // While the inputs outputs can be cut-through the kernel will stay intact // In order to deaggregate tx we look for tx with the same kernel @@ -446,6 +463,8 @@ where /// Quick reconciliation step - we can evict any txs in the pool where /// inputs or kernels intersect with the block. pub fn reconcile_block(&mut self, block: &Block) { + error!("***** TODO - reconcile_block needs to be NRD aware (0 height and >0 height"); + // Filter txs in the pool based on the latest block. // Reject any txs where we see a matching tx kernel in the block. // Also reject any txs where we see a conflicting tx, diff --git a/pool/src/transaction_pool.rs b/pool/src/transaction_pool.rs index 8f330b9548..b87ddc85e4 100644 --- a/pool/src/transaction_pool.rs +++ b/pool/src/transaction_pool.rs @@ -160,15 +160,28 @@ where stem: bool, header: &BlockHeader, ) -> Result<(), PoolError> { - // Quick check to deal with common case of seeing the *same* tx - // broadcast from multiple peers simultaneously. - if !stem && self.txpool.contains_tx(tx.hash()) { + // Quick check for duplicate txs. + // Our stempool is private and we do not want to reveal anything about the txs contained. + // If this is a stem tx and we have already seen it in the stempool quietly drop it. + // Otherwise if we have seen it in the txpool then "duplicate tx" error. + if stem && self.stempool.contains_tx(tx.hash()) { + return Ok(()); + } else if self.txpool.contains_tx(tx.hash()) { return Err(PoolError::DuplicateTx); } // Check this tx is valid based on current header version. + // NRD kernels only valid post HF3 and if NRD feature enabled. self.verify_kernel_variants(&tx, header)?; + // Reject any NRD kernels if duplicate kernels already present in the stempool/txpool. + // It is never valid for duplicates to co-exist in the stempool/txpool (min relative_height = 1) + if stem { + self.stempool.verify_nrd_height_0(&tx)?; + } else { + self.txpool + } + // Do we have the capacity to accept this transaction? let acceptability = self.is_acceptable(&tx, stem); let mut evict = false; diff --git a/pool/tests/nrd_kernel_relative_height_rule.rs b/pool/tests/nrd_kernel_relative_height_rule.rs index d5202a154e..4d3db123ce 100644 --- a/pool/tests/nrd_kernel_relative_height_rule.rs +++ b/pool/tests/nrd_kernel_relative_height_rule.rs @@ -15,6 +15,7 @@ pub mod common; use self::core::core::hash::Hashed; +use self::core::core::transaction; use self::core::core::verifier_cache::LruVerifierCache; use self::core::core::{ Block, BlockHeader, HeaderVersion, KernelFeatures, NRDRelativeHeight, Transaction, TxKernel, @@ -50,13 +51,11 @@ fn test_nrd_kernel_relative_height_rule_1() -> Result<(), PoolError> { global::set_local_chain_type(global::ChainTypes::AutomatedTesting); global::set_local_nrd_enabled(true); - let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); - let db_root = ".grin_nrd_kernel_relative_height_rule_1"; clean_output_dir(db_root.into()); let mut chain = ChainAdapter::init(db_root.into()).unwrap(); - + let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); // Initialize the chain/txhashset with an initial block @@ -136,18 +135,38 @@ fn test_nrd_kernel_relative_height_rule_1() -> Result<(), PoolError> { excess.clone(), ); - // Confirm we can successfully add tx1 with NRD kernel to txpool. + // Confirm we can successfully add tx1 with NRD kernel to stempool. assert_eq!( - pool.add_to_pool(test_source(), tx1.clone(), false, &header), + pool.add_to_pool(test_source(), tx1.clone(), true, &header), Ok(()), ); - // Confirm we cannot add tx2 to txpool while tx1 is in there (pair of duplicate NRD kernels). + // Confirm we cannot add tx2 to stempool while tx1 is in there (duplicate NRD kernels). assert_eq!( - pool.add_to_pool(test_source(), tx2.clone(), false, &header), - Ok(()), + pool.add_to_pool(test_source(), tx2.clone(), true, &header), + Err(PoolError::InvalidTx( + transaction::Error::InvalidNRDRelativeHeight + )) ); + // // Confirm we can successfully add tx1 with NRD kernel to txpool. + // assert_eq!( + // pool.add_to_pool(test_source(), tx1.clone(), false, &header), + // Ok(()), + // ); + + // // Confirm we cannot add tx2 to txpool while tx1 is in there (duplicate NRD kernels). + // assert_eq!( + // pool.add_to_pool(test_source(), tx2.clone(), false, &header), + // Err(PoolError::InvalidTx(transaction::Error::InvalidNRDRelativeHeight)) + // ); + + // // Confirm we cannot add tx2 to stempool while tx1 is in txpool (duplicate NRD kernels). + // assert_eq!( + // pool.add_to_pool(test_source(), tx2.clone(), true, &header), + // Err(PoolError::InvalidTx(transaction::Error::InvalidNRDRelativeHeight)) + // ); + // assert_eq!(pool.total_size(), 1); // let txs = pool.prepare_mineable_transactions().unwrap(); @@ -156,7 +175,5 @@ fn test_nrd_kernel_relative_height_rule_1() -> Result<(), PoolError> { // Cleanup db directory clean_output_dir(db_root.into()); - panic!("wip"); - Ok(()) } From cb885a030cbdc7e1d86d283668d13d6e0765e55a Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Tue, 2 Jun 2020 22:02:22 +0100 Subject: [PATCH 41/48] rework pool tests to use real chain (was mock chain) to better reflect reality (tx/block validation rules etc.) --- core/src/global.rs | 3 +- pool/src/types.rs | 4 +- pool/tests/block_building.rs | 182 ++++++------- pool/tests/block_max_weight.rs | 221 +++++++--------- pool/tests/block_reconciliation.rs | 293 +++++++++------------ pool/tests/coinbase_maturity.rs | 88 +++---- pool/tests/common.rs | 276 ++++++++++++-------- pool/tests/nrd_kernels.rs | 216 ---------------- pool/tests/nrd_kernels_disabled.rs | 98 +++++++ pool/tests/nrd_kernels_enabled.rs | 98 +++++++ pool/tests/transaction_pool.rs | 393 +++++------------------------ 11 files changed, 758 insertions(+), 1114 deletions(-) delete mode 100644 pool/tests/nrd_kernels.rs create mode 100644 pool/tests/nrd_kernels_disabled.rs create mode 100644 pool/tests/nrd_kernels_enabled.rs diff --git a/core/src/global.rs b/core/src/global.rs index 6377c1e8c6..386c3abba2 100644 --- a/core/src/global.rs +++ b/core/src/global.rs @@ -78,7 +78,8 @@ pub const TESTING_INITIAL_GRAPH_WEIGHT: u32 = 1; pub const TESTING_INITIAL_DIFFICULTY: u64 = 1; /// Testing max_block_weight (artifically low, just enough to support a few txs). -pub const TESTING_MAX_BLOCK_WEIGHT: usize = 150; +// pub const TESTING_MAX_BLOCK_WEIGHT: usize = 150; +pub const TESTING_MAX_BLOCK_WEIGHT: usize = 250; /// If a peer's last updated difficulty is 2 hours ago and its difficulty's lower than ours, /// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers. diff --git a/pool/src/types.rs b/pool/src/types.rs index 131b5fed0e..c2580e74c2 100644 --- a/pool/src/types.rs +++ b/pool/src/types.rs @@ -287,9 +287,9 @@ pub trait PoolAdapter: Send + Sync { /// Dummy adapter used as a placeholder for real implementations #[allow(dead_code)] -pub struct NoopAdapter {} +pub struct NoopPoolAdapter {} -impl PoolAdapter for NoopAdapter { +impl PoolAdapter for NoopPoolAdapter { fn tx_accepted(&self, _entry: &PoolEntry) {} fn stem_tx_accepted(&self, _entry: &PoolEntry) -> Result<(), PoolError> { Ok(()) diff --git a/pool/tests/block_building.rs b/pool/tests/block_building.rs index 0fd144fff9..02c9404f3c 100644 --- a/pool/tests/block_building.rs +++ b/pool/tests/block_building.rs @@ -16,126 +16,98 @@ pub mod common; use self::core::core::hash::Hashed; use self::core::core::verifier_cache::LruVerifierCache; -use self::core::core::{Block, BlockHeader, Transaction}; -use self::core::pow::Difficulty; -use self::core::{global, libtx}; +use self::core::global; use self::keychain::{ExtKeychain, Keychain}; +use self::pool::PoolError; use self::util::RwLock; use crate::common::*; use grin_core as core; use grin_keychain as keychain; +use grin_pool as pool; use grin_util as util; use std::sync::Arc; #[test] -fn test_transaction_pool_block_building() { +fn test_transaction_pool_block_building() -> Result<(), PoolError> { util::init_test_logger(); global::set_local_chain_type(global::ChainTypes::AutomatedTesting); let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); - let db_root = ".grin_block_building".to_string(); - clean_output_dir(db_root.clone()); + let db_root = "target/.block_building"; + clean_output_dir(db_root.into()); + + let genesis = genesis_block(&keychain); + let chain = Arc::new(init_chain(db_root, genesis)); + let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); + + // Initialize a new pool with our chain adapter. + let mut pool = init_transaction_pool( + Arc::new(ChainAdapter { + chain: chain.clone(), + }), + verifier_cache, + ); + + add_some_blocks(&chain, 3, &keychain); + + let header_1 = chain.get_header_by_height(1).unwrap(); + + // Now create tx to spend an early coinbase (now matured). + // Provides us with some useful outputs to test with. + let initial_tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]); + + // Mine that initial tx so we can spend it with multiple txs. + add_block(&chain, vec![initial_tx], &keychain); + + let header = chain.head_header().unwrap(); + + let root_tx_1 = test_transaction(&keychain, vec![10, 20], vec![24]); + let root_tx_2 = test_transaction(&keychain, vec![30], vec![28]); + let root_tx_3 = test_transaction(&keychain, vec![40], vec![38]); + + let child_tx_1 = test_transaction(&keychain, vec![24], vec![22]); + let child_tx_2 = test_transaction(&keychain, vec![38], vec![32]); { - let mut chain = ChainAdapter::init(db_root.clone()).unwrap(); - - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); - - // Initialize the chain/txhashset with an initial block - // so we have a non-empty UTXO set. - let add_block = - |prev_header: BlockHeader, txs: Vec, chain: &mut ChainAdapter| { - let height = prev_header.height + 1; - let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0); - let fee = txs.iter().map(|x| x.fee()).sum(); - let reward = libtx::reward::output( - &keychain, - &libtx::ProofBuilder::new(&keychain), - &key_id, - fee, - false, - ) - .unwrap(); - let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap(); - - // Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from). - block.header.prev_root = prev_header.hash(); - - chain.update_db_for_block(&block); - block - }; - - let block = add_block(BlockHeader::default(), vec![], &mut chain); - let header = block.header; - - // Now create tx to spend that first coinbase (now matured). - // Provides us with some useful outputs to test with. - let initial_tx = - test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]); - - // Mine that initial tx so we can spend it with multiple txs - let block = add_block(header, vec![initial_tx], &mut chain); - let header = block.header; - - // Initialize a new pool with our chain adapter. - let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache)); - - let root_tx_1 = test_transaction(&keychain, vec![10, 20], vec![24]); - let root_tx_2 = test_transaction(&keychain, vec![30], vec![28]); - let root_tx_3 = test_transaction(&keychain, vec![40], vec![38]); - - let child_tx_1 = test_transaction(&keychain, vec![24], vec![22]); - let child_tx_2 = test_transaction(&keychain, vec![38], vec![32]); - - { - let mut write_pool = pool.write(); - - // Add the three root txs to the pool. - write_pool - .add_to_pool(test_source(), root_tx_1.clone(), false, &header) - .unwrap(); - write_pool - .add_to_pool(test_source(), root_tx_2.clone(), false, &header) - .unwrap(); - write_pool - .add_to_pool(test_source(), root_tx_3.clone(), false, &header) - .unwrap(); - - // Now add the two child txs to the pool. - write_pool - .add_to_pool(test_source(), child_tx_1.clone(), false, &header) - .unwrap(); - write_pool - .add_to_pool(test_source(), child_tx_2.clone(), false, &header) - .unwrap(); - - assert_eq!(write_pool.total_size(), 5); - } - - let txs = pool.read().prepare_mineable_transactions().unwrap(); - - let block = add_block(header, txs, &mut chain); - - // Check the block contains what we expect. - assert_eq!(block.inputs().len(), 4); - assert_eq!(block.outputs().len(), 4); - assert_eq!(block.kernels().len(), 6); - - assert!(block.kernels().contains(&root_tx_1.kernels()[0])); - assert!(block.kernels().contains(&root_tx_2.kernels()[0])); - assert!(block.kernels().contains(&root_tx_3.kernels()[0])); - assert!(block.kernels().contains(&child_tx_1.kernels()[0])); - assert!(block.kernels().contains(&child_tx_1.kernels()[0])); - - // Now reconcile the transaction pool with the new block - // and check the resulting contents of the pool are what we expect. - { - let mut write_pool = pool.write(); - write_pool.reconcile_block(&block).unwrap(); - - assert_eq!(write_pool.total_size(), 0); - } + // Add the three root txs to the pool. + pool.add_to_pool(test_source(), root_tx_1.clone(), false, &header)?; + pool.add_to_pool(test_source(), root_tx_2.clone(), false, &header)?; + pool.add_to_pool(test_source(), root_tx_3.clone(), false, &header)?; + + // Now add the two child txs to the pool. + pool.add_to_pool(test_source(), child_tx_1.clone(), false, &header)?; + pool.add_to_pool(test_source(), child_tx_2.clone(), false, &header)?; + + assert_eq!(pool.total_size(), 5); } + + let txs = pool.prepare_mineable_transactions()?; + + add_block(&chain, txs, &keychain); + + // Get full block from head of the chain (block we just processed). + let block = chain.get_block(&chain.head().unwrap().hash()).unwrap(); + + // Check the block contains what we expect. + assert_eq!(block.inputs().len(), 4); + assert_eq!(block.outputs().len(), 4); + assert_eq!(block.kernels().len(), 6); + + assert!(block.kernels().contains(&root_tx_1.kernels()[0])); + assert!(block.kernels().contains(&root_tx_2.kernels()[0])); + assert!(block.kernels().contains(&root_tx_3.kernels()[0])); + assert!(block.kernels().contains(&child_tx_1.kernels()[0])); + assert!(block.kernels().contains(&child_tx_1.kernels()[0])); + + // Now reconcile the transaction pool with the new block + // and check the resulting contents of the pool are what we expect. + { + pool.reconcile_block(&block)?; + assert_eq!(pool.total_size(), 0); + } + // Cleanup db directory - clean_output_dir(db_root.clone()); + clean_output_dir(db_root.into()); + + Ok(()) } diff --git a/pool/tests/block_max_weight.rs b/pool/tests/block_max_weight.rs index fec5dcfccd..5bcff08f32 100644 --- a/pool/tests/block_max_weight.rs +++ b/pool/tests/block_max_weight.rs @@ -15,13 +15,9 @@ //! Test coverage for block building at the limit of max_block_weight. pub mod common; - use self::core::core::hash::Hashed; use self::core::core::verifier_cache::LruVerifierCache; -use self::core::core::{Block, BlockHeader, Transaction}; use self::core::global; -use self::core::libtx; -use self::core::pow::Difficulty; use self::keychain::{ExtKeychain, Keychain}; use self::util::RwLock; use crate::common::*; @@ -37,126 +33,103 @@ fn test_block_building_max_weight() { let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); - let db_root = ".grin_block_building_max_weight".to_string(); - clean_output_dir(db_root.clone()); - - { - let mut chain = ChainAdapter::init(db_root.clone()).unwrap(); - - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); - - // Convenient was to add a new block to the chain. - let add_block = - |prev_header: BlockHeader, txs: Vec, chain: &mut ChainAdapter| { - let height = prev_header.height + 1; - let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0); - let fee = txs.iter().map(|x| x.fee()).sum(); - let reward = libtx::reward::output( - &keychain, - &libtx::ProofBuilder::new(&keychain), - &key_id, - fee, - false, - ) - .unwrap(); - let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap(); - - // Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from). - block.header.prev_root = prev_header.hash(); - - chain.update_db_for_block(&block); - block - }; - - // Initialize the chain/txhashset with an initial block - // so we have a non-empty UTXO set. - let block = add_block(BlockHeader::default(), vec![], &mut chain); - let header = block.header; - - // Now create tx to spend that first coinbase (now matured). - // Provides us with some useful outputs to test with. - let initial_tx = - test_transaction_spending_coinbase(&keychain, &header, vec![100, 200, 300]); - - // Mine that initial tx so we can spend it with multiple txs - let block = add_block(header, vec![initial_tx], &mut chain); - let header = block.header; - - // Initialize a new pool with our chain adapter. - let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache)); - - // Build some dependent txs to add to the txpool. - // We will build a block from a subset of these. - let txs = vec![ - test_transaction(&keychain, vec![100], vec![90, 1]), - test_transaction(&keychain, vec![90], vec![80, 2]), - test_transaction(&keychain, vec![200], vec![199]), - test_transaction(&keychain, vec![300], vec![290, 3]), - test_transaction(&keychain, vec![290], vec![280, 4]), - ]; - - // Fees and weights of our original txs in insert order. - assert_eq!( - txs.iter().map(|x| x.fee()).collect::>(), - [9, 8, 1, 7, 6] - ); - assert_eq!( - txs.iter().map(|x| x.tx_weight()).collect::>(), - [8, 8, 4, 8, 8] - ); - assert_eq!( - txs.iter().map(|x| x.fee_to_weight()).collect::>(), - [1125, 1000, 250, 875, 750] - ); - - // Populate our txpool with the txs. - { - let mut write_pool = pool.write(); - for tx in txs { - println!("***** {}", tx.fee_to_weight()); - write_pool - .add_to_pool(test_source(), tx, false, &header) - .unwrap(); - } - } - - // Check we added them all to the txpool successfully. - assert_eq!(pool.read().total_size(), 5); - - // Prepare some "mineable" txs from the txpool. - // Note: We cannot fit all the txs from the txpool into a block. - let txs = pool.read().prepare_mineable_transactions().unwrap(); - - // Fees and weights of the "mineable" txs. - assert_eq!(txs.iter().map(|x| x.fee()).collect::>(), [9, 8, 7]); - assert_eq!( - txs.iter().map(|x| x.tx_weight()).collect::>(), - [8, 8, 8] - ); - assert_eq!( - txs.iter().map(|x| x.fee_to_weight()).collect::>(), - [1125, 1000, 875] - ); - - let block = add_block(header, txs, &mut chain); - - // Check contents of the block itself (including coinbase reward). - assert_eq!(block.inputs().len(), 2); - assert_eq!(block.outputs().len(), 6); - assert_eq!(block.kernels().len(), 4); - - // Now reconcile the transaction pool with the new block - // and check the resulting contents of the pool are what we expect. - { - let mut write_pool = pool.write(); - write_pool.reconcile_block(&block).unwrap(); - - // We should still have 2 tx in the pool after accepting the new block. - // This one exceeded the max block weight when building the block so - // remained in the txpool. - assert_eq!(write_pool.total_size(), 2); - } + let db_root = "target/.block_max_weight"; + clean_output_dir(db_root.into()); + + let genesis = genesis_block(&keychain); + let chain = Arc::new(init_chain(db_root, genesis)); + let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); + + // Initialize a new pool with our chain adapter. + let mut pool = init_transaction_pool( + Arc::new(ChainAdapter { + chain: chain.clone(), + }), + verifier_cache, + ); + + add_some_blocks(&chain, 3, &keychain); + + let header_1 = chain.get_header_by_height(1).unwrap(); + + // Now create tx to spend an early coinbase (now matured). + // Provides us with some useful outputs to test with. + let initial_tx = + test_transaction_spending_coinbase(&keychain, &header_1, vec![100, 200, 300, 1000]); + + // Mine that initial tx so we can spend it with multiple txs. + add_block(&chain, vec![initial_tx], &keychain); + + let header = chain.head_header().unwrap(); + + // Build some dependent txs to add to the txpool. + // We will build a block from a subset of these. + let txs = vec![ + test_transaction(&keychain, vec![1000], vec![390, 130, 120, 110]), + test_transaction(&keychain, vec![100], vec![90, 1]), + test_transaction(&keychain, vec![90], vec![80, 2]), + test_transaction(&keychain, vec![200], vec![199]), + test_transaction(&keychain, vec![300], vec![290, 3]), + test_transaction(&keychain, vec![290], vec![280, 4]), + ]; + + // Fees and weights of our original txs in insert order. + assert_eq!( + txs.iter().map(|x| x.fee()).collect::>(), + [250, 9, 8, 1, 7, 6] + ); + assert_eq!( + txs.iter().map(|x| x.tx_weight()).collect::>(), + [16, 8, 8, 4, 8, 8] + ); + assert_eq!( + txs.iter().map(|x| x.fee_to_weight()).collect::>(), + [15625, 1125, 1000, 250, 875, 750] + ); + + // Populate our txpool with the txs. + for tx in txs { + pool.add_to_pool(test_source(), tx, false, &header).unwrap(); } + + // Check we added them all to the txpool successfully. + assert_eq!(pool.total_size(), 6); + + // // Prepare some "mineable" txs from the txpool. + // // Note: We cannot fit all the txs from the txpool into a block. + let txs = pool.prepare_mineable_transactions().unwrap(); + + // Fees and weights of the "mineable" txs. + assert_eq!( + txs.iter().map(|x| x.fee()).collect::>(), + [250, 9, 8, 7] + ); + assert_eq!( + txs.iter().map(|x| x.tx_weight()).collect::>(), + [16, 8, 8, 8] + ); + assert_eq!( + txs.iter().map(|x| x.fee_to_weight()).collect::>(), + [15625, 1125, 1000, 875] + ); + + add_block(&chain, txs, &keychain); + let block = chain.get_block(&chain.head().unwrap().hash()).unwrap(); + + // Check contents of the block itself (including coinbase reward). + assert_eq!(block.inputs().len(), 3); + assert_eq!(block.outputs().len(), 10); + assert_eq!(block.kernels().len(), 5); + + // Now reconcile the transaction pool with the new block + // and check the resulting contents of the pool are what we expect. + pool.reconcile_block(&block).unwrap(); + + // We should still have 2 tx in the pool after accepting the new block. + // This one exceeded the max block weight when building the block so + // remained in the txpool. + assert_eq!(pool.total_size(), 2); + // Cleanup db directory - clean_output_dir(db_root.clone()); + clean_output_dir(db_root.into()); } diff --git a/pool/tests/block_reconciliation.rs b/pool/tests/block_reconciliation.rs index 804f6a219e..0ec47fe6af 100644 --- a/pool/tests/block_reconciliation.rs +++ b/pool/tests/block_reconciliation.rs @@ -16,9 +16,7 @@ pub mod common; use self::core::core::hash::Hashed; use self::core::core::verifier_cache::LruVerifierCache; -use self::core::core::{Block, BlockHeader}; -use self::core::pow::Difficulty; -use self::core::{global, libtx}; +use self::core::global; use self::keychain::{ExtKeychain, Keychain}; use self::util::RwLock; use crate::common::ChainAdapter; @@ -30,185 +28,124 @@ use std::sync::Arc; #[test] fn test_transaction_pool_block_reconciliation() { + util::init_test_logger(); global::set_local_chain_type(global::ChainTypes::AutomatedTesting); let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); - let db_root = ".grin_block_reconciliation".to_string(); - clean_output_dir(db_root.clone()); - { - let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap()); - - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); - - // Initialize a new pool with our chain adapter. - let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone())); - - let header = { - let height = 1; - let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0); - let reward = libtx::reward::output( - &keychain, - &libtx::ProofBuilder::new(&keychain), - &key_id, - 0, - false, - ) + let db_root = "target/.block_reconciliation"; + clean_output_dir(db_root.into()); + + let genesis = genesis_block(&keychain); + let chain = Arc::new(init_chain(db_root, genesis)); + let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); + + // Initialize a new pool with our chain adapter. + let mut pool = init_transaction_pool( + Arc::new(ChainAdapter { + chain: chain.clone(), + }), + verifier_cache, + ); + + add_some_blocks(&chain, 3, &keychain); + + let header_1 = chain.get_header_by_height(1).unwrap(); + + // Now create tx to spend an early coinbase (now matured). + // Provides us with some useful outputs to test with. + let initial_tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]); + + // Mine that initial tx so we can spend it with multiple txs. + add_block(&chain, vec![initial_tx], &keychain); + + let header = chain.head_header().unwrap(); + + // Preparation: We will introduce three root pool transactions. + // 1. A transaction that should be invalidated because it is exactly + // contained in the block. + // 2. A transaction that should be invalidated because the input is + // consumed in the block, although it is not exactly consumed. + // 3. A transaction that should remain after block reconciliation. + let block_transaction = test_transaction(&keychain, vec![10], vec![8]); + let conflict_transaction = test_transaction(&keychain, vec![20], vec![12, 6]); + let valid_transaction = test_transaction(&keychain, vec![30], vec![13, 15]); + + // We will also introduce a few children: + // 4. A transaction that descends from transaction 1, that is in + // turn exactly contained in the block. + let block_child = test_transaction(&keychain, vec![8], vec![5, 1]); + // 5. A transaction that descends from transaction 4, that is not + // contained in the block at all and should be valid after + // reconciliation. + let pool_child = test_transaction(&keychain, vec![5], vec![3]); + // 6. A transaction that descends from transaction 2 that does not + // conflict with anything in the block in any way, but should be + // invalidated (orphaned). + let conflict_child = test_transaction(&keychain, vec![12], vec![2]); + // 7. A transaction that descends from transaction 2 that should be + // valid due to its inputs being satisfied by the block. + let conflict_valid_child = test_transaction(&keychain, vec![6], vec![4]); + // 8. A transaction that descends from transaction 3 that should be + // invalidated due to an output conflict. + let valid_child_conflict = test_transaction(&keychain, vec![13], vec![9]); + // 9. A transaction that descends from transaction 3 that should remain + // valid after reconciliation. + let valid_child_valid = test_transaction(&keychain, vec![15], vec![11]); + // 10. A transaction that descends from both transaction 6 and + // transaction 9 + let mixed_child = test_transaction(&keychain, vec![2, 11], vec![7]); + + let txs_to_add = vec![ + block_transaction, + conflict_transaction, + valid_transaction.clone(), + block_child, + pool_child.clone(), + conflict_child, + conflict_valid_child.clone(), + valid_child_conflict.clone(), + valid_child_valid.clone(), + mixed_child, + ]; + + // First we add the above transactions to the pool. + // All should be accepted. + assert_eq!(pool.total_size(), 0); + + for tx in &txs_to_add { + pool.add_to_pool(test_source(), tx.clone(), false, &header) .unwrap(); - let genesis = BlockHeader::default(); - let mut block = Block::new(&genesis, vec![], Difficulty::min(), reward).unwrap(); - - // Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from). - block.header.prev_root = genesis.hash(); - - chain.update_db_for_block(&block); - - block.header - }; - - // Now create tx to spend that first coinbase (now matured). - // Provides us with some useful outputs to test with. - let initial_tx = - test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]); - - let block = { - let key_id = ExtKeychain::derive_key_id(1, 2, 0, 0, 0); - let fees = initial_tx.fee(); - let reward = libtx::reward::output( - &keychain, - &libtx::ProofBuilder::new(&keychain), - &key_id, - fees, - false, - ) - .unwrap(); - let mut block = - Block::new(&header, vec![initial_tx], Difficulty::min(), reward).unwrap(); - - // Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from). - block.header.prev_root = header.hash(); - - chain.update_db_for_block(&block); - - block - }; - - let header = block.header; - - // Preparation: We will introduce three root pool transactions. - // 1. A transaction that should be invalidated because it is exactly - // contained in the block. - // 2. A transaction that should be invalidated because the input is - // consumed in the block, although it is not exactly consumed. - // 3. A transaction that should remain after block reconciliation. - let block_transaction = test_transaction(&keychain, vec![10], vec![8]); - let conflict_transaction = test_transaction(&keychain, vec![20], vec![12, 6]); - let valid_transaction = test_transaction(&keychain, vec![30], vec![13, 15]); - - // We will also introduce a few children: - // 4. A transaction that descends from transaction 1, that is in - // turn exactly contained in the block. - let block_child = test_transaction(&keychain, vec![8], vec![5, 1]); - // 5. A transaction that descends from transaction 4, that is not - // contained in the block at all and should be valid after - // reconciliation. - let pool_child = test_transaction(&keychain, vec![5], vec![3]); - // 6. A transaction that descends from transaction 2 that does not - // conflict with anything in the block in any way, but should be - // invalidated (orphaned). - let conflict_child = test_transaction(&keychain, vec![12], vec![2]); - // 7. A transaction that descends from transaction 2 that should be - // valid due to its inputs being satisfied by the block. - let conflict_valid_child = test_transaction(&keychain, vec![6], vec![4]); - // 8. A transaction that descends from transaction 3 that should be - // invalidated due to an output conflict. - let valid_child_conflict = test_transaction(&keychain, vec![13], vec![9]); - // 9. A transaction that descends from transaction 3 that should remain - // valid after reconciliation. - let valid_child_valid = test_transaction(&keychain, vec![15], vec![11]); - // 10. A transaction that descends from both transaction 6 and - // transaction 9 - let mixed_child = test_transaction(&keychain, vec![2, 11], vec![7]); - - let txs_to_add = vec![ - block_transaction, - conflict_transaction, - valid_transaction.clone(), - block_child, - pool_child.clone(), - conflict_child, - conflict_valid_child.clone(), - valid_child_conflict.clone(), - valid_child_valid.clone(), - mixed_child, - ]; - - // First we add the above transactions to the pool. - // All should be accepted. - { - let mut write_pool = pool.write(); - assert_eq!(write_pool.total_size(), 0); - - for tx in &txs_to_add { - write_pool - .add_to_pool(test_source(), tx.clone(), false, &header) - .unwrap(); - } - - assert_eq!(write_pool.total_size(), txs_to_add.len()); - } - - // Now we prepare the block that will cause the above conditions to be met. - // First, the transactions we want in the block: - // - Copy of 1 - let block_tx_1 = test_transaction(&keychain, vec![10], vec![8]); - // - Conflict w/ 2, satisfies 7 - let block_tx_2 = test_transaction(&keychain, vec![20], vec![6]); - // - Copy of 4 - let block_tx_3 = test_transaction(&keychain, vec![8], vec![5, 1]); - // - Output conflict w/ 8 - let block_tx_4 = test_transaction(&keychain, vec![40], vec![9, 31]); - - let block_txs = vec![block_tx_1, block_tx_2, block_tx_3, block_tx_4]; - - // Now apply this block. - let block = { - let key_id = ExtKeychain::derive_key_id(1, 3, 0, 0, 0); - let fees = block_txs.iter().map(|tx| tx.fee()).sum(); - let reward = libtx::reward::output( - &keychain, - &libtx::ProofBuilder::new(&keychain), - &key_id, - fees, - false, - ) - .unwrap(); - let mut block = Block::new(&header, block_txs, Difficulty::min(), reward).unwrap(); - - // Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from). - block.header.prev_root = header.hash(); - - chain.update_db_for_block(&block); - block - }; - - // Check the pool still contains everything we expect at this point. - { - let write_pool = pool.write(); - assert_eq!(write_pool.total_size(), txs_to_add.len()); - } - - // And reconcile the pool with this latest block. - { - let mut write_pool = pool.write(); - write_pool.reconcile_block(&block).unwrap(); - - assert_eq!(write_pool.total_size(), 4); - assert_eq!(write_pool.txpool.entries[0].tx, valid_transaction); - assert_eq!(write_pool.txpool.entries[1].tx, pool_child); - assert_eq!(write_pool.txpool.entries[2].tx, conflict_valid_child); - assert_eq!(write_pool.txpool.entries[3].tx, valid_child_valid); - } } + + assert_eq!(pool.total_size(), txs_to_add.len()); + + // Now we prepare the block that will cause the above conditions to be met. + // First, the transactions we want in the block: + // - Copy of 1 + let block_tx_1 = test_transaction(&keychain, vec![10], vec![8]); + // - Conflict w/ 2, satisfies 7 + let block_tx_2 = test_transaction(&keychain, vec![20], vec![6]); + // - Copy of 4 + let block_tx_3 = test_transaction(&keychain, vec![8], vec![5, 1]); + // - Output conflict w/ 8 + let block_tx_4 = test_transaction(&keychain, vec![40], vec![9, 31]); + + let block_txs = vec![block_tx_1, block_tx_2, block_tx_3, block_tx_4]; + add_block(&chain, block_txs, &keychain); + let block = chain.get_block(&chain.head().unwrap().hash()).unwrap(); + + // Check the pool still contains everything we expect at this point. + assert_eq!(pool.total_size(), txs_to_add.len()); + + // And reconcile the pool with this latest block. + pool.reconcile_block(&block).unwrap(); + + assert_eq!(pool.total_size(), 4); + assert_eq!(pool.txpool.entries[0].tx, valid_transaction); + assert_eq!(pool.txpool.entries[1].tx, pool_child); + assert_eq!(pool.txpool.entries[2].tx, conflict_valid_child); + assert_eq!(pool.txpool.entries[3].tx, valid_child_valid); + // Cleanup db directory - clean_output_dir(db_root.clone()); + clean_output_dir(db_root.into()); } diff --git a/pool/tests/coinbase_maturity.rs b/pool/tests/coinbase_maturity.rs index 53e9d03d1b..f0952e757a 100644 --- a/pool/tests/coinbase_maturity.rs +++ b/pool/tests/coinbase_maturity.rs @@ -14,12 +14,10 @@ pub mod common; -use self::core::core::hash::Hash; use self::core::core::verifier_cache::LruVerifierCache; -use self::core::core::{BlockHeader, BlockSums, Transaction}; use self::core::global; use self::keychain::{ExtKeychain, Keychain}; -use self::pool::types::{BlockChain, PoolError}; +use self::pool::types::PoolError; use self::util::RwLock; use crate::common::*; use grin_core as core; @@ -28,61 +26,49 @@ use grin_pool as pool; use grin_util as util; use std::sync::Arc; -#[derive(Clone)] -pub struct CoinbaseMaturityErrorChainAdapter {} +/// Test we correctly verify coinbase maturity when adding txs to the pool. +#[test] +fn test_coinbase_maturity() { + util::init_test_logger(); + global::set_local_chain_type(global::ChainTypes::AutomatedTesting); + let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); -impl CoinbaseMaturityErrorChainAdapter { - pub fn new() -> CoinbaseMaturityErrorChainAdapter { - CoinbaseMaturityErrorChainAdapter {} - } -} + let db_root = "target/.coinbase_maturity"; + clean_output_dir(db_root.into()); -impl BlockChain for CoinbaseMaturityErrorChainAdapter { - fn chain_head(&self) -> Result { - unimplemented!(); - } + let genesis = genesis_block(&keychain); + let chain = Arc::new(init_chain(db_root, genesis)); + let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); - fn get_block_header(&self, _hash: &Hash) -> Result { - unimplemented!(); - } + // Initialize a new pool with our chain adapter. + let mut pool = init_transaction_pool( + Arc::new(ChainAdapter { + chain: chain.clone(), + }), + verifier_cache, + ); - fn get_block_sums(&self, _hash: &Hash) -> Result { - unimplemented!(); - } + // Add a single block, introducing coinbase output to be spent later. + add_block(&chain, vec![], &keychain); - fn validate_tx(&self, _tx: &Transaction) -> Result<(), PoolError> { - unimplemented!(); - } + let header_1 = chain.get_header_by_height(1).unwrap(); + let tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![100]); - // Returns an ImmatureCoinbase for every tx we pass in. - fn verify_coinbase_maturity(&self, _tx: &Transaction) -> Result<(), PoolError> { - Err(PoolError::ImmatureCoinbase) - } + // Coinbase is not yet matured and cannot be spent. + let header = chain.head_header().unwrap(); + assert_eq!( + pool.add_to_pool(test_source(), tx.clone(), true, &header) + .err(), + Some(PoolError::ImmatureCoinbase) + ); - // Mocking this out for these tests. - fn verify_tx_lock_height(&self, _tx: &Transaction) -> Result<(), PoolError> { + // Add 2 more blocks. Original coinbase output is now matured and can be spent. + add_some_blocks(&chain, 2, &keychain); + let header = chain.head_header().unwrap(); + assert_eq!( + pool.add_to_pool(test_source(), tx.clone(), true, &header), Ok(()) - } -} - -/// Test we correctly verify coinbase maturity when adding txs to the pool. -#[test] -fn test_coinbase_maturity() { - global::set_local_chain_type(global::ChainTypes::AutomatedTesting); - let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); - - // Mocking this up with an adapter that will raise an error for coinbase - // maturity. - let chain = Arc::new(CoinbaseMaturityErrorChainAdapter::new()); - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); - let pool = RwLock::new(test_setup(chain, verifier_cache)); + ); - { - let mut write_pool = pool.write(); - let tx = test_transaction(&keychain, vec![50], vec![49]); - match write_pool.add_to_pool(test_source(), tx.clone(), true, &BlockHeader::default()) { - Err(PoolError::ImmatureCoinbase) => {} - _ => panic!("Expected an immature coinbase error here."), - } - } + clean_output_dir(db_root.into()); } diff --git a/pool/tests/common.rs b/pool/tests/common.rs index a5b0fc0aa5..3b602abd8e 100644 --- a/pool/tests/common.rs +++ b/pool/tests/common.rs @@ -14,144 +14,200 @@ //! Common test functions -use self::chain::store::ChainStore; -use self::chain::types::Tip; -use self::core::core::hash::{Hash, Hashed}; -use self::core::core::verifier_cache::VerifierCache; -use self::core::core::{Block, BlockHeader, BlockSums, Committed, KernelFeatures, Transaction}; -use self::core::libtx; -use self::keychain::{ExtKeychain, Keychain}; +use self::chain::types::{NoopAdapter, Options}; +use self::chain::Chain; +use self::core::consensus; +use self::core::core::hash::Hash; +use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache}; +use self::core::core::{Block, BlockHeader, BlockSums, KernelFeatures, Transaction}; +use self::core::genesis; +use self::core::global; +use self::core::libtx::{build, reward, ProofBuilder}; +use self::core::pow; +use self::keychain::{ExtKeychain, ExtKeychainPath, Keychain}; use self::pool::types::*; use self::pool::TransactionPool; -use self::util::secp::pedersen::Commitment; use self::util::RwLock; +use chrono::Duration; use grin_chain as chain; use grin_core as core; use grin_keychain as keychain; use grin_pool as pool; use grin_util as util; -use std::collections::HashSet; use std::fs; use std::sync::Arc; -#[derive(Clone)] -pub struct ChainAdapter { - pub store: Arc>, - pub utxo: Arc>>, +/// Build genesis block with reward (non-empty, like we have in mainnet). +pub fn genesis_block(keychain: &K) -> Block +where + K: Keychain, +{ + let key_id = keychain::ExtKeychain::derive_key_id(1, 0, 0, 0, 0); + let reward = reward::output(keychain, &ProofBuilder::new(keychain), &key_id, 0, false).unwrap(); + + genesis::genesis_dev().with_reward(reward.0, reward.1) } -impl ChainAdapter { - pub fn init(db_root: String) -> Result { - let target_dir = format!("target/{}", db_root); - let chain_store = ChainStore::new(&target_dir) - .map_err(|e| format!("failed to init chain_store, {:?}", e))?; - let store = Arc::new(RwLock::new(chain_store)); - let utxo = Arc::new(RwLock::new(HashSet::new())); +pub fn init_chain(dir_name: &str, genesis: Block) -> Chain { + let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); + Chain::init( + dir_name.to_string(), + Arc::new(NoopAdapter {}), + genesis, + pow::verify_size, + verifier_cache, + false, + ) + .unwrap() +} - Ok(ChainAdapter { store, utxo }) +pub fn add_some_blocks(chain: &Chain, count: u64, keychain: &K) +where + K: Keychain, +{ + for _ in 0..count { + add_block(chain, vec![], keychain); } +} - pub fn update_db_for_block(&self, block: &Block) { - let header = &block.header; - let tip = Tip::from_header(header); - let s = self.store.write(); - let batch = s.batch().unwrap(); - - batch.save_block_header(header).unwrap(); - batch.save_body_head(&tip).unwrap(); - - // Retrieve previous block_sums from the db. - let prev_sums = if let Ok(prev_sums) = batch.get_block_sums(&tip.prev_block_h) { - prev_sums - } else { - BlockSums::default() - }; - - // Overage is based purely on the new block. - // Previous block_sums have taken all previous overage into account. - let overage = header.overage(); - - // Offset on the other hand is the total kernel offset from the new block. - let offset = header.total_kernel_offset(); - - // Verify the kernel sums for the block_sums with the new block applied. - let (utxo_sum, kernel_sum) = (prev_sums, block as &dyn Committed) - .verify_kernel_sums(overage, offset) - .unwrap(); - - let block_sums = BlockSums { - utxo_sum, - kernel_sum, - }; - batch.save_block_sums(&header.hash(), block_sums).unwrap(); - - batch.commit().unwrap(); - - { - let mut utxo = self.utxo.write(); - for x in block.inputs() { - utxo.remove(&x.commitment()); - } - for x in block.outputs() { - utxo.insert(x.commitment()); - } - } - } +pub fn add_block(chain: &Chain, txs: Vec, keychain: &K) +where + K: Keychain, +{ + let prev = chain.head_header().unwrap(); + let height = prev.height + 1; + let next_header_info = consensus::next_difficulty(height, chain.difficulty_iter().unwrap()); + let fee = txs.iter().map(|x| x.fee()).sum(); + let key_id = ExtKeychainPath::new(1, height as u32, 0, 0, 0).to_identifier(); + let reward = + reward::output(keychain, &ProofBuilder::new(keychain), &key_id, fee, false).unwrap(); + + let mut block = Block::new(&prev, txs, next_header_info.clone().difficulty, reward).unwrap(); + + block.header.timestamp = prev.timestamp + Duration::seconds(60); + block.header.pow.secondary_scaling = next_header_info.secondary_scaling; + + chain.set_txhashset_roots(&mut block).unwrap(); + + let edge_bits = global::min_edge_bits(); + block.header.pow.proof.edge_bits = edge_bits; + pow::pow_size( + &mut block.header, + next_header_info.difficulty, + global::proofsize(), + edge_bits, + ) + .unwrap(); + + chain.process_block(block, Options::NONE).unwrap(); +} + +#[derive(Clone)] +pub struct ChainAdapter { + pub chain: Arc, } +// impl ChainAdapter { +// pub fn init(db_root: String) -> Result { +// let target_dir = format!("target/{}", db_root); +// let chain_store = ChainStore::new(&target_dir) +// .map_err(|e| format!("failed to init chain_store, {:?}", e))?; +// let store = Arc::new(RwLock::new(chain_store)); +// let utxo = Arc::new(RwLock::new(HashSet::new())); + +// Ok(ChainAdapter { store, utxo }) +// } + +// pub fn update_db_for_block(&self, block: &Block) { +// let header = &block.header; +// let tip = Tip::from_header(header); +// let s = self.store.write(); +// let batch = s.batch().unwrap(); + +// batch.save_block_header(header).unwrap(); +// batch.save_body_head(&tip).unwrap(); + +// // Retrieve previous block_sums from the db. +// let prev_sums = if let Ok(prev_sums) = batch.get_block_sums(&tip.prev_block_h) { +// prev_sums +// } else { +// BlockSums::default() +// }; + +// // Overage is based purely on the new block. +// // Previous block_sums have taken all previous overage into account. +// let overage = header.overage(); + +// // Offset on the other hand is the total kernel offset from the new block. +// let offset = header.total_kernel_offset(); + +// // Verify the kernel sums for the block_sums with the new block applied. +// let (utxo_sum, kernel_sum) = (prev_sums, block as &dyn Committed) +// .verify_kernel_sums(overage, offset) +// .unwrap(); + +// let block_sums = BlockSums { +// utxo_sum, +// kernel_sum, +// }; +// batch.save_block_sums(&header.hash(), block_sums).unwrap(); + +// batch.commit().unwrap(); + +// { +// let mut utxo = self.utxo.write(); +// for x in block.inputs() { +// utxo.remove(&x.commitment()); +// } +// for x in block.outputs() { +// utxo.insert(x.commitment()); +// } +// } +// } +// } + impl BlockChain for ChainAdapter { fn chain_head(&self) -> Result { - let s = self.store.read(); - s.head_header() - .map_err(|_| PoolError::Other(format!("failed to get chain head"))) + self.chain + .head_header() + .map_err(|_| PoolError::Other("failed to get chain head".into())) } fn get_block_header(&self, hash: &Hash) -> Result { - let s = self.store.read(); - s.get_block_header(hash) - .map_err(|_| PoolError::Other(format!("failed to get block header"))) + self.chain + .get_block_header(hash) + .map_err(|_| PoolError::Other("failed to get block header".into())) } fn get_block_sums(&self, hash: &Hash) -> Result { - let s = self.store.read(); - s.get_block_sums(hash) - .map_err(|_| PoolError::Other(format!("failed to get block sums"))) + self.chain + .get_block_sums(hash) + .map_err(|_| PoolError::Other("failed to get block sums".into())) } fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> { - let utxo = self.utxo.read(); - - for x in tx.outputs() { - if utxo.contains(&x.commitment()) { - return Err(PoolError::Other(format!("output commitment not unique"))); - } - } - - for x in tx.inputs() { - if !utxo.contains(&x.commitment()) { - return Err(PoolError::Other(format!("not in utxo set"))); - } - } - - Ok(()) + self.chain + .validate_tx(tx) + .map_err(|_| PoolError::Other("failed to validate tx".into())) } - // Mocking this check out for these tests. - // We will test the Merkle proof verification logic elsewhere. - fn verify_coinbase_maturity(&self, _tx: &Transaction) -> Result<(), PoolError> { - Ok(()) + fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), PoolError> { + self.chain + .verify_coinbase_maturity(tx) + .map_err(|_| PoolError::ImmatureCoinbase) } - // Mocking this out for these tests. - fn verify_tx_lock_height(&self, _tx: &Transaction) -> Result<(), PoolError> { - Ok(()) + fn verify_tx_lock_height(&self, tx: &Transaction) -> Result<(), PoolError> { + self.chain + .verify_tx_lock_height(tx) + .map_err(|_| PoolError::ImmatureTransaction) } } -pub fn test_setup( +pub fn init_transaction_pool( chain: Arc, verifier_cache: Arc>, -) -> TransactionPool +) -> TransactionPool where B: BlockChain, V: VerifierCache + 'static, @@ -165,7 +221,7 @@ where }, chain.clone(), verifier_cache.clone(), - Arc::new(NoopAdapter {}), + Arc::new(NoopPoolAdapter {}), ) } @@ -189,19 +245,19 @@ where // single input spending a single coinbase (deterministic key_id aka height) { let key_id = ExtKeychain::derive_key_id(1, header.height as u32, 0, 0, 0); - tx_elements.push(libtx::build::coinbase_input(coinbase_reward, key_id)); + tx_elements.push(build::coinbase_input(coinbase_reward, key_id)); } for output_value in output_values { let key_id = ExtKeychain::derive_key_id(1, output_value as u32, 0, 0, 0); - tx_elements.push(libtx::build::output(output_value, key_id)); + tx_elements.push(build::output(output_value, key_id)); } - libtx::build::transaction( + build::transaction( KernelFeatures::Plain { fee: fees as u64 }, tx_elements, keychain, - &libtx::ProofBuilder::new(keychain), + &ProofBuilder::new(keychain), ) .unwrap() } @@ -240,19 +296,19 @@ where for input_value in input_values { let key_id = ExtKeychain::derive_key_id(1, input_value as u32, 0, 0, 0); - tx_elements.push(libtx::build::input(input_value, key_id)); + tx_elements.push(build::input(input_value, key_id)); } for output_value in output_values { let key_id = ExtKeychain::derive_key_id(1, output_value as u32, 0, 0, 0); - tx_elements.push(libtx::build::output(output_value, key_id)); + tx_elements.push(build::output(output_value, key_id)); } - libtx::build::transaction( + build::transaction( kernel_features, tx_elements, keychain, - &libtx::ProofBuilder::new(keychain), + &ProofBuilder::new(keychain), ) .unwrap() } @@ -262,7 +318,7 @@ pub fn test_source() -> TxSource { } pub fn clean_output_dir(db_root: String) { - if let Err(e) = fs::remove_dir_all(format!("target/{}", db_root)) { + if let Err(e) = fs::remove_dir_all(db_root) { println!("cleaning output dir failed - {:?}", e) } } diff --git a/pool/tests/nrd_kernels.rs b/pool/tests/nrd_kernels.rs deleted file mode 100644 index 75839d31a5..0000000000 --- a/pool/tests/nrd_kernels.rs +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2020 The Grin Developers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod common; - -use self::core::core::hash::Hashed; -use self::core::core::verifier_cache::LruVerifierCache; -use self::core::core::{ - Block, BlockHeader, HeaderVersion, KernelFeatures, NRDRelativeHeight, Transaction, -}; -use self::core::global; -use self::core::pow::Difficulty; -use self::core::{consensus, libtx}; -use self::keychain::{ExtKeychain, Keychain}; -use self::pool::types::PoolError; -use self::util::RwLock; -use crate::common::*; -use grin_core as core; -use grin_keychain as keychain; -use grin_pool as pool; -use grin_util as util; -use std::sync::Arc; - -#[test] -fn test_nrd_kernel_verification_block_version() { - util::init_test_logger(); - global::set_local_chain_type(global::ChainTypes::AutomatedTesting); - global::set_local_nrd_enabled(true); - - let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); - - let db_root = ".grin_nrd_kernels"; - clean_output_dir(db_root.into()); - - let mut chain = ChainAdapter::init(db_root.into()).unwrap(); - - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); - - // Initialize the chain/txhashset with an initial block - // so we have a non-empty UTXO set. - let add_block = |prev_header: BlockHeader, txs: Vec, chain: &mut ChainAdapter| { - let height = prev_header.height + 1; - let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0); - let fee = txs.iter().map(|x| x.fee()).sum(); - let reward = libtx::reward::output( - &keychain, - &libtx::ProofBuilder::new(&keychain), - &key_id, - fee, - false, - ) - .unwrap(); - let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap(); - - // Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from). - block.header.prev_root = prev_header.hash(); - - chain.update_db_for_block(&block); - block - }; - - let block = add_block(BlockHeader::default(), vec![], &mut chain); - let header = block.header; - - // Now create tx to spend that first coinbase (now matured). - // Provides us with some useful outputs to test with. - let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]); - - // Mine that initial tx so we can spend it with multiple txs - let mut block = add_block(header, vec![initial_tx], &mut chain); - let mut header = block.header; - - // Initialize a new pool with our chain adapter. - let mut pool = test_setup(Arc::new(chain.clone()), verifier_cache); - - let tx_1 = test_transaction_with_kernel_features( - &keychain, - vec![10, 20], - vec![24], - KernelFeatures::NoRecentDuplicate { - fee: 6, - relative_height: NRDRelativeHeight::new(1440).unwrap(), - }, - ); - - assert!(header.version < HeaderVersion(4)); - - assert_eq!( - pool.add_to_pool(test_source(), tx_1.clone(), false, &header), - Err(PoolError::NRDKernelPreHF3) - ); - - // Now mine several more blocks out to HF3 - for _ in 0..7 { - block = add_block(header, vec![], &mut chain); - header = block.header; - } - assert_eq!(header.height, consensus::TESTING_THIRD_HARD_FORK); - assert_eq!(header.version, HeaderVersion(4)); - - // Now confirm we can successfully add transaction with NRD kernel to txpool. - assert_eq!( - pool.add_to_pool(test_source(), tx_1.clone(), false, &header), - Ok(()), - ); - - assert_eq!(pool.total_size(), 1); - - let txs = pool.prepare_mineable_transactions().unwrap(); - assert_eq!(txs.len(), 1); - - // Cleanup db directory - clean_output_dir(db_root.into()); -} - -#[test] -fn test_nrd_kernel_verification_nrd_disabled() { - util::init_test_logger(); - global::set_local_chain_type(global::ChainTypes::AutomatedTesting); - - let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); - - let db_root = ".grin_nrd_kernel_disabled"; - clean_output_dir(db_root.into()); - - let mut chain = ChainAdapter::init(db_root.into()).unwrap(); - - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); - - // Initialize the chain/txhashset with an initial block - // so we have a non-empty UTXO set. - let add_block = |prev_header: BlockHeader, txs: Vec, chain: &mut ChainAdapter| { - let height = prev_header.height + 1; - let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0); - let fee = txs.iter().map(|x| x.fee()).sum(); - let reward = libtx::reward::output( - &keychain, - &libtx::ProofBuilder::new(&keychain), - &key_id, - fee, - false, - ) - .unwrap(); - let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap(); - - // Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from). - block.header.prev_root = prev_header.hash(); - - chain.update_db_for_block(&block); - block - }; - - let block = add_block(BlockHeader::default(), vec![], &mut chain); - let header = block.header; - - // Now create tx to spend that first coinbase (now matured). - // Provides us with some useful outputs to test with. - let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]); - - // Mine that initial tx so we can spend it with multiple txs - let mut block = add_block(header, vec![initial_tx], &mut chain); - let mut header = block.header; - - // Initialize a new pool with our chain adapter. - let mut pool = test_setup(Arc::new(chain.clone()), verifier_cache); - - let tx_1 = test_transaction_with_kernel_features( - &keychain, - vec![10, 20], - vec![24], - KernelFeatures::NoRecentDuplicate { - fee: 6, - relative_height: NRDRelativeHeight::new(1440).unwrap(), - }, - ); - - assert!(header.version < HeaderVersion(4)); - - assert_eq!( - pool.add_to_pool(test_source(), tx_1.clone(), false, &header), - Err(PoolError::NRDKernelNotEnabled) - ); - - // Now mine several more blocks out to HF3 - for _ in 0..7 { - block = add_block(header, vec![], &mut chain); - header = block.header; - } - assert_eq!(header.height, consensus::TESTING_THIRD_HARD_FORK); - assert_eq!(header.version, HeaderVersion(4)); - - // NRD kernel support not enabled via feature flag, so not valid. - assert_eq!( - pool.add_to_pool(test_source(), tx_1.clone(), false, &header), - Err(PoolError::NRDKernelNotEnabled) - ); - - assert_eq!(pool.total_size(), 0); - - let txs = pool.prepare_mineable_transactions().unwrap(); - assert_eq!(txs.len(), 0); - - // Cleanup db directory - clean_output_dir(db_root.into()); -} diff --git a/pool/tests/nrd_kernels_disabled.rs b/pool/tests/nrd_kernels_disabled.rs new file mode 100644 index 0000000000..1966812ac0 --- /dev/null +++ b/pool/tests/nrd_kernels_disabled.rs @@ -0,0 +1,98 @@ +// Copyright 2020 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod common; + +use self::core::consensus; +use self::core::core::verifier_cache::LruVerifierCache; +use self::core::core::{HeaderVersion, KernelFeatures, NRDRelativeHeight}; +use self::core::global; +use self::keychain::{ExtKeychain, Keychain}; +use self::pool::types::PoolError; +use self::util::RwLock; +use crate::common::*; +use grin_core as core; +use grin_keychain as keychain; +use grin_pool as pool; +use grin_util as util; +use std::sync::Arc; + +#[test] +fn test_nrd_kernels_disabled() { + util::init_test_logger(); + global::set_local_chain_type(global::ChainTypes::AutomatedTesting); + global::set_local_nrd_enabled(false); + + let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); + + let db_root = "target/.nrd_kernels_disabled"; + clean_output_dir(db_root.into()); + + let genesis = genesis_block(&keychain); + let chain = Arc::new(init_chain(db_root, genesis)); + let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); + + // Initialize a new pool with our chain adapter. + let mut pool = init_transaction_pool( + Arc::new(ChainAdapter { + chain: chain.clone(), + }), + verifier_cache, + ); + + // Add some blocks. + add_some_blocks(&chain, 3, &keychain); + + // Spend the initial coinbase. + let header_1 = chain.get_header_by_height(1).unwrap(); + let tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]); + add_block(&chain, vec![tx], &keychain); + + let tx_1 = test_transaction_with_kernel_features( + &keychain, + vec![10, 20], + vec![24], + KernelFeatures::NoRecentDuplicate { + fee: 6, + relative_height: NRDRelativeHeight::new(1440).unwrap(), + }, + ); + + let header = chain.head_header().unwrap(); + assert!(header.version < HeaderVersion(4)); + + assert_eq!( + pool.add_to_pool(test_source(), tx_1.clone(), false, &header), + Err(PoolError::NRDKernelNotEnabled) + ); + + // Now mine several more blocks out to HF3 + add_some_blocks(&chain, 5, &keychain); + let header = chain.head_header().unwrap(); + assert_eq!(header.height, consensus::TESTING_THIRD_HARD_FORK); + assert_eq!(header.version, HeaderVersion(4)); + + // NRD kernel support not enabled via feature flag, so not valid. + assert_eq!( + pool.add_to_pool(test_source(), tx_1.clone(), false, &header), + Err(PoolError::NRDKernelNotEnabled) + ); + + assert_eq!(pool.total_size(), 0); + let txs = pool.prepare_mineable_transactions().unwrap(); + assert_eq!(txs.len(), 0); + + // Cleanup db directory + clean_output_dir(db_root.into()); +} diff --git a/pool/tests/nrd_kernels_enabled.rs b/pool/tests/nrd_kernels_enabled.rs new file mode 100644 index 0000000000..3524eaa41c --- /dev/null +++ b/pool/tests/nrd_kernels_enabled.rs @@ -0,0 +1,98 @@ +// Copyright 2020 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod common; + +use self::core::consensus; +use self::core::core::verifier_cache::LruVerifierCache; +use self::core::core::{HeaderVersion, KernelFeatures, NRDRelativeHeight}; +use self::core::global; +use self::keychain::{ExtKeychain, Keychain}; +use self::pool::types::PoolError; +use self::util::RwLock; +use crate::common::*; +use grin_core as core; +use grin_keychain as keychain; +use grin_pool as pool; +use grin_util as util; +use std::sync::Arc; + +#[test] +fn test_nrd_kernels_enabled() { + util::init_test_logger(); + global::set_local_chain_type(global::ChainTypes::AutomatedTesting); + global::set_local_nrd_enabled(true); + + let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); + + let db_root = "target/.nrd_kernels_enabled"; + clean_output_dir(db_root.into()); + + let genesis = genesis_block(&keychain); + let chain = Arc::new(init_chain(db_root, genesis)); + let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); + + // Initialize a new pool with our chain adapter. + let mut pool = init_transaction_pool( + Arc::new(ChainAdapter { + chain: chain.clone(), + }), + verifier_cache, + ); + + // Add some blocks. + add_some_blocks(&chain, 3, &keychain); + + // Spend the initial coinbase. + let header_1 = chain.get_header_by_height(1).unwrap(); + let tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]); + add_block(&chain, vec![tx], &keychain); + + let tx_1 = test_transaction_with_kernel_features( + &keychain, + vec![10, 20], + vec![24], + KernelFeatures::NoRecentDuplicate { + fee: 6, + relative_height: NRDRelativeHeight::new(1440).unwrap(), + }, + ); + + let header = chain.head_header().unwrap(); + assert!(header.version < HeaderVersion(4)); + + assert_eq!( + pool.add_to_pool(test_source(), tx_1.clone(), false, &header), + Err(PoolError::NRDKernelPreHF3) + ); + + // Now mine several more blocks out to HF3 + add_some_blocks(&chain, 5, &keychain); + let header = chain.head_header().unwrap(); + assert_eq!(header.height, consensus::TESTING_THIRD_HARD_FORK); + assert_eq!(header.version, HeaderVersion(4)); + + // NRD kernel support not enabled via feature flag, so not valid. + assert_eq!( + pool.add_to_pool(test_source(), tx_1.clone(), false, &header), + Ok(()) + ); + + assert_eq!(pool.total_size(), 1); + let txs = pool.prepare_mineable_transactions().unwrap(); + assert_eq!(txs.len(), 1); + + // Cleanup db directory + clean_output_dir(db_root.into()); +} diff --git a/pool/tests/transaction_pool.rs b/pool/tests/transaction_pool.rs index ee9457952b..a8e3dd4612 100644 --- a/pool/tests/transaction_pool.rs +++ b/pool/tests/transaction_pool.rs @@ -15,9 +15,8 @@ pub mod common; use self::core::core::verifier_cache::LruVerifierCache; -use self::core::core::{transaction, Block, BlockHeader, Weighting}; -use self::core::pow::Difficulty; -use self::core::{global, libtx}; +use self::core::core::{transaction, Weighting}; +use self::core::global; use self::keychain::{ExtKeychain, Keychain}; use self::pool::TxSource; use self::util::RwLock; @@ -31,65 +30,47 @@ use std::sync::Arc; /// Test we can add some txs to the pool (both stempool and txpool). #[test] fn test_the_transaction_pool() { - // Use mainnet config to allow for reasonably large block weights. - global::set_local_chain_type(global::ChainTypes::Mainnet); + util::init_test_logger(); + global::set_local_chain_type(global::ChainTypes::AutomatedTesting); let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); - let db_root = ".grin_transaction_pool".to_string(); - clean_output_dir(db_root.clone()); - - let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap()); + let db_root = "target/.transaction_pool"; + clean_output_dir(db_root.into()); + let genesis = genesis_block(&keychain); + let chain = Arc::new(init_chain(db_root, genesis)); let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); // Initialize a new pool with our chain adapter. - let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone())); - - let header = { - let height = 1; - let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0); - let reward = libtx::reward::output( - &keychain, - &libtx::ProofBuilder::new(&keychain), - &key_id, - 0, - false, - ) - .unwrap(); - let block = Block::new(&BlockHeader::default(), vec![], Difficulty::min(), reward).unwrap(); - - chain.update_db_for_block(&block); - - block.header - }; - - // Now create tx to spend a coinbase, giving us some useful outputs for testing - // with. - let initial_tx = { - test_transaction_spending_coinbase( - &keychain, - &header, - vec![500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400], - ) - }; + let mut pool = init_transaction_pool( + Arc::new(ChainAdapter { + chain: chain.clone(), + }), + verifier_cache.clone(), + ); + + add_some_blocks(&chain, 3, &keychain); + let header = chain.head_header().unwrap(); + + let header_1 = chain.get_header_by_height(1).unwrap(); + let initial_tx = test_transaction_spending_coinbase( + &keychain, + &header_1, + vec![500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400], + ); // Add this tx to the pool (stem=false, direct to txpool). { - let mut write_pool = pool.write(); - write_pool - .add_to_pool(test_source(), initial_tx, false, &header) + pool.add_to_pool(test_source(), initial_tx, false, &header) .unwrap(); - assert_eq!(write_pool.total_size(), 1); + assert_eq!(pool.total_size(), 1); } // Test adding a tx that "double spends" an output currently spent by a tx // already in the txpool. In this case we attempt to spend the original coinbase twice. { let tx = test_transaction_spending_coinbase(&keychain, &header, vec![501]); - let mut write_pool = pool.write(); - assert!(write_pool - .add_to_pool(test_source(), tx, false, &header) - .is_err()); + assert!(pool.add_to_pool(test_source(), tx, false, &header).is_err()); } // tx1 spends some outputs from the initial test tx. @@ -97,32 +78,26 @@ fn test_the_transaction_pool() { // tx2 spends some outputs from both tx1 and the initial test tx. let tx2 = test_transaction(&keychain, vec![499, 700], vec![498]); - // Take a write lock and add a couple of tx entries to the pool. { - let mut write_pool = pool.write(); - // Check we have a single initial tx in the pool. - assert_eq!(write_pool.total_size(), 1); + assert_eq!(pool.total_size(), 1); // First, add a simple tx directly to the txpool (stem = false). - write_pool - .add_to_pool(test_source(), tx1.clone(), false, &header) + pool.add_to_pool(test_source(), tx1.clone(), false, &header) .unwrap(); - assert_eq!(write_pool.total_size(), 2); + assert_eq!(pool.total_size(), 2); // Add another tx spending outputs from the previous tx. - write_pool - .add_to_pool(test_source(), tx2.clone(), false, &header) + pool.add_to_pool(test_source(), tx2.clone(), false, &header) .unwrap(); - assert_eq!(write_pool.total_size(), 3); + assert_eq!(pool.total_size(), 3); } // Test adding the exact same tx multiple times (same kernel signature). // This will fail for stem=false during tx aggregation due to duplicate // outputs and duplicate kernels. { - let mut write_pool = pool.write(); - assert!(write_pool + assert!(pool .add_to_pool(test_source(), tx1.clone(), false, &header) .is_err()); } @@ -131,8 +106,7 @@ fn test_the_transaction_pool() { // Note: not the *same* tx, just same underlying inputs/outputs. { let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]); - let mut write_pool = pool.write(); - assert!(write_pool + assert!(pool .add_to_pool(test_source(), tx1a, false, &header) .is_err()); } @@ -140,8 +114,7 @@ fn test_the_transaction_pool() { // Test adding a tx attempting to spend a non-existent output. { let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]); - let mut write_pool = pool.write(); - assert!(write_pool + assert!(pool .add_to_pool(test_source(), bad_tx, false, &header) .is_err()); } @@ -152,71 +125,53 @@ fn test_the_transaction_pool() { // to be immediately stolen via a "replay" tx. { let tx = test_transaction(&keychain, vec![900], vec![498]); - let mut write_pool = pool.write(); - assert!(write_pool - .add_to_pool(test_source(), tx, false, &header) - .is_err()); + assert!(pool.add_to_pool(test_source(), tx, false, &header).is_err()); } // Confirm the tx pool correctly identifies an invalid tx (already spent). { - let mut write_pool = pool.write(); let tx3 = test_transaction(&keychain, vec![500], vec![497]); - assert!(write_pool + assert!(pool .add_to_pool(test_source(), tx3, false, &header) .is_err()); - assert_eq!(write_pool.total_size(), 3); + assert_eq!(pool.total_size(), 3); } // Now add a couple of txs to the stempool (stem = true). { - let mut write_pool = pool.write(); let tx = test_transaction(&keychain, vec![599], vec![598]); - write_pool - .add_to_pool(test_source(), tx, true, &header) - .unwrap(); + pool.add_to_pool(test_source(), tx, true, &header).unwrap(); let tx2 = test_transaction(&keychain, vec![598], vec![597]); - write_pool - .add_to_pool(test_source(), tx2, true, &header) - .unwrap(); - assert_eq!(write_pool.total_size(), 3); - assert_eq!(write_pool.stempool.size(), 2); + pool.add_to_pool(test_source(), tx2, true, &header).unwrap(); + assert_eq!(pool.total_size(), 3); + assert_eq!(pool.stempool.size(), 2); } // Check we can take some entries from the stempool and "fluff" them into the // txpool. This also exercises multi-kernel txs. { - let mut write_pool = pool.write(); - let agg_tx = write_pool - .stempool - .all_transactions_aggregate() - .unwrap() - .unwrap(); + let agg_tx = pool.stempool.all_transactions_aggregate().unwrap().unwrap(); assert_eq!(agg_tx.kernels().len(), 2); - write_pool - .add_to_pool(test_source(), agg_tx, false, &header) + pool.add_to_pool(test_source(), agg_tx, false, &header) .unwrap(); - assert_eq!(write_pool.total_size(), 4); - assert!(write_pool.stempool.is_empty()); + assert_eq!(pool.total_size(), 4); + assert!(pool.stempool.is_empty()); } // Adding a duplicate tx to the stempool will result in it being fluffed. // This handles the case of the stem path having a cycle in it. { - let mut write_pool = pool.write(); let tx = test_transaction(&keychain, vec![597], vec![596]); - write_pool - .add_to_pool(test_source(), tx.clone(), true, &header) + pool.add_to_pool(test_source(), tx.clone(), true, &header) .unwrap(); - assert_eq!(write_pool.total_size(), 4); - assert_eq!(write_pool.stempool.size(), 1); + assert_eq!(pool.total_size(), 4); + assert_eq!(pool.stempool.size(), 1); // Duplicate stem tx so fluff, adding it to txpool and removing it from stempool. - write_pool - .add_to_pool(test_source(), tx.clone(), true, &header) + pool.add_to_pool(test_source(), tx.clone(), true, &header) .unwrap(); - assert_eq!(write_pool.total_size(), 5); - assert!(write_pool.stempool.is_empty()); + assert_eq!(pool.total_size(), 5); + assert!(pool.stempool.is_empty()); } // Now check we can correctly deaggregate a multi-kernel tx based on current @@ -224,8 +179,6 @@ fn test_the_transaction_pool() { // We will do this be adding a new tx to the pool // that is a superset of a tx already in the pool. { - let mut write_pool = pool.write(); - let tx4 = test_transaction(&keychain, vec![800], vec![799]); // tx1 and tx2 are already in the txpool (in aggregated form) // tx4 is the "new" part of this aggregated tx that we care about @@ -235,11 +188,10 @@ fn test_the_transaction_pool() { .validate(Weighting::AsTransaction, verifier_cache.clone()) .unwrap(); - write_pool - .add_to_pool(test_source(), agg_tx, false, &header) + pool.add_to_pool(test_source(), agg_tx, false, &header) .unwrap(); - assert_eq!(write_pool.total_size(), 6); - let entry = write_pool.txpool.entries.last().unwrap(); + assert_eq!(pool.total_size(), 6); + let entry = pool.txpool.entries.last().unwrap(); assert_eq!(entry.tx.kernels().len(), 1); assert_eq!(entry.src, TxSource::Deaggregate); } @@ -247,232 +199,19 @@ fn test_the_transaction_pool() { // Check we cannot "double spend" an output spent in a previous block. // We use the initial coinbase output here for convenience. { - let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap()); - - let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); - - // Initialize a new pool with our chain adapter. - let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone())); - - let header = { - let height = 1; - let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0); - let reward = libtx::reward::output( - &keychain, - &libtx::ProofBuilder::new(&keychain), - &key_id, - 0, - false, - ) - .unwrap(); - let block = - Block::new(&BlockHeader::default(), vec![], Difficulty::min(), reward).unwrap(); - - chain.update_db_for_block(&block); - - block.header - }; - - // Now create tx to spend a coinbase, giving us some useful outputs for testing - // with. - let initial_tx = { - test_transaction_spending_coinbase( - &keychain, - &header, - vec![500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400], - ) - }; - - // Add this tx to the pool (stem=false, direct to txpool). - { - let mut write_pool = pool.write(); - write_pool - .add_to_pool(test_source(), initial_tx, false, &header) - .unwrap(); - assert_eq!(write_pool.total_size(), 1); - } - - // Test adding a tx that "double spends" an output currently spent by a tx - // already in the txpool. In this case we attempt to spend the original coinbase twice. - { - let tx = test_transaction_spending_coinbase(&keychain, &header, vec![501]); - let mut write_pool = pool.write(); - assert!(write_pool - .add_to_pool(test_source(), tx, false, &header) - .is_err()); - } - - // tx1 spends some outputs from the initial test tx. - let tx1 = test_transaction(&keychain, vec![500, 600], vec![499, 599]); - // tx2 spends some outputs from both tx1 and the initial test tx. - let tx2 = test_transaction(&keychain, vec![499, 700], vec![498]); + let double_spend_tx = test_transaction_spending_coinbase(&keychain, &header, vec![1000]); - // Take a write lock and add a couple of tx entries to the pool. - { - let mut write_pool = pool.write(); - - // Check we have a single initial tx in the pool. - assert_eq!(write_pool.total_size(), 1); - - // First, add a simple tx directly to the txpool (stem = false). - write_pool - .add_to_pool(test_source(), tx1.clone(), false, &header) - .unwrap(); - assert_eq!(write_pool.total_size(), 2); - - // Add another tx spending outputs from the previous tx. - write_pool - .add_to_pool(test_source(), tx2.clone(), false, &header) - .unwrap(); - assert_eq!(write_pool.total_size(), 3); - } - - // Test adding the exact same tx multiple times (same kernel signature). - // This will fail for stem=false during tx aggregation due to duplicate - // outputs and duplicate kernels. - { - let mut write_pool = pool.write(); - assert!(write_pool - .add_to_pool(test_source(), tx1.clone(), false, &header) - .is_err()); - } - - // Test adding a duplicate tx with the same input and outputs. - // Note: not the *same* tx, just same underlying inputs/outputs. - { - let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]); - let mut write_pool = pool.write(); - assert!(write_pool - .add_to_pool(test_source(), tx1a, false, &header) - .is_err()); - } - - // Test adding a tx attempting to spend a non-existent output. - { - let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]); - let mut write_pool = pool.write(); - assert!(write_pool - .add_to_pool(test_source(), bad_tx, false, &header) - .is_err()); - } - - // Test adding a tx that would result in a duplicate output (conflicts with - // output from tx2). For reasons of security all outputs in the UTXO set must - // be unique. Otherwise spending one will almost certainly cause the other - // to be immediately stolen via a "replay" tx. - { - let tx = test_transaction(&keychain, vec![900], vec![498]); - let mut write_pool = pool.write(); - assert!(write_pool - .add_to_pool(test_source(), tx, false, &header) - .is_err()); - } - - // Confirm the tx pool correctly identifies an invalid tx (already spent). - { - let mut write_pool = pool.write(); - let tx3 = test_transaction(&keychain, vec![500], vec![497]); - assert!(write_pool - .add_to_pool(test_source(), tx3, false, &header) - .is_err()); - assert_eq!(write_pool.total_size(), 3); - } - - // Now add a couple of txs to the stempool (stem = true). - { - let mut write_pool = pool.write(); - let tx = test_transaction(&keychain, vec![599], vec![598]); - write_pool - .add_to_pool(test_source(), tx, true, &header) - .unwrap(); - let tx2 = test_transaction(&keychain, vec![598], vec![597]); - write_pool - .add_to_pool(test_source(), tx2, true, &header) - .unwrap(); - assert_eq!(write_pool.total_size(), 3); - assert_eq!(write_pool.stempool.size(), 2); - } - - // Check we can take some entries from the stempool and "fluff" them into the - // txpool. This also exercises multi-kernel txs. - { - let mut write_pool = pool.write(); - let agg_tx = write_pool - .stempool - .all_transactions_aggregate() - .unwrap() - .unwrap(); - assert_eq!(agg_tx.kernels().len(), 2); - write_pool - .add_to_pool(test_source(), agg_tx, false, &header) - .unwrap(); - assert_eq!(write_pool.total_size(), 4); - assert!(write_pool.stempool.is_empty()); - } - - // Adding a duplicate tx to the stempool will result in it being fluffed. - // This handles the case of the stem path having a cycle in it. - { - let mut write_pool = pool.write(); - let tx = test_transaction(&keychain, vec![597], vec![596]); - write_pool - .add_to_pool(test_source(), tx.clone(), true, &header) - .unwrap(); - assert_eq!(write_pool.total_size(), 4); - assert_eq!(write_pool.stempool.size(), 1); - - // Duplicate stem tx so fluff, adding it to txpool and removing it from stempool. - write_pool - .add_to_pool(test_source(), tx.clone(), true, &header) - .unwrap(); - assert_eq!(write_pool.total_size(), 5); - assert!(write_pool.stempool.is_empty()); - } - - // Now check we can correctly deaggregate a multi-kernel tx based on current - // contents of the txpool. - // We will do this be adding a new tx to the pool - // that is a superset of a tx already in the pool. - { - let mut write_pool = pool.write(); - - let tx4 = test_transaction(&keychain, vec![800], vec![799]); - // tx1 and tx2 are already in the txpool (in aggregated form) - // tx4 is the "new" part of this aggregated tx that we care about - let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4]).unwrap(); - - agg_tx - .validate(Weighting::AsTransaction, verifier_cache.clone()) - .unwrap(); - - write_pool - .add_to_pool(test_source(), agg_tx, false, &header) - .unwrap(); - assert_eq!(write_pool.total_size(), 6); - let entry = write_pool.txpool.entries.last().unwrap(); - assert_eq!(entry.tx.kernels().len(), 1); - assert_eq!(entry.src, TxSource::Deaggregate); - } - - // Check we cannot "double spend" an output spent in a previous block. - // We use the initial coinbase output here for convenience. - { - let mut write_pool = pool.write(); - - let double_spend_tx = - { test_transaction_spending_coinbase(&keychain, &header, vec![1000]) }; - - // check we cannot add a double spend to the stempool - assert!(write_pool - .add_to_pool(test_source(), double_spend_tx.clone(), true, &header) - .is_err()); + // check we cannot add a double spend to the stempool + assert!(pool + .add_to_pool(test_source(), double_spend_tx.clone(), true, &header) + .is_err()); - // check we cannot add a double spend to the txpool - assert!(write_pool - .add_to_pool(test_source(), double_spend_tx.clone(), false, &header) - .is_err()); - } + // check we cannot add a double spend to the txpool + assert!(pool + .add_to_pool(test_source(), double_spend_tx.clone(), false, &header) + .is_err()); } + // Cleanup db directory - clean_output_dir(db_root.clone()); + clean_output_dir(db_root.into()); } From 3f1defecc3ad8ea40893de3d8fe085d6a43596dd Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Fri, 5 Jun 2020 20:15:34 +0100 Subject: [PATCH 42/48] cleanup --- core/src/global.rs | 1 - pool/tests/common.rs | 59 -------------------------------------------- 2 files changed, 60 deletions(-) diff --git a/core/src/global.rs b/core/src/global.rs index 386c3abba2..4589e12703 100644 --- a/core/src/global.rs +++ b/core/src/global.rs @@ -78,7 +78,6 @@ pub const TESTING_INITIAL_GRAPH_WEIGHT: u32 = 1; pub const TESTING_INITIAL_DIFFICULTY: u64 = 1; /// Testing max_block_weight (artifically low, just enough to support a few txs). -// pub const TESTING_MAX_BLOCK_WEIGHT: usize = 150; pub const TESTING_MAX_BLOCK_WEIGHT: usize = 250; /// If a peer's last updated difficulty is 2 hours ago and its difficulty's lower than ours, diff --git a/pool/tests/common.rs b/pool/tests/common.rs index 3b602abd8e..43a601d572 100644 --- a/pool/tests/common.rs +++ b/pool/tests/common.rs @@ -107,65 +107,6 @@ pub struct ChainAdapter { pub chain: Arc, } -// impl ChainAdapter { -// pub fn init(db_root: String) -> Result { -// let target_dir = format!("target/{}", db_root); -// let chain_store = ChainStore::new(&target_dir) -// .map_err(|e| format!("failed to init chain_store, {:?}", e))?; -// let store = Arc::new(RwLock::new(chain_store)); -// let utxo = Arc::new(RwLock::new(HashSet::new())); - -// Ok(ChainAdapter { store, utxo }) -// } - -// pub fn update_db_for_block(&self, block: &Block) { -// let header = &block.header; -// let tip = Tip::from_header(header); -// let s = self.store.write(); -// let batch = s.batch().unwrap(); - -// batch.save_block_header(header).unwrap(); -// batch.save_body_head(&tip).unwrap(); - -// // Retrieve previous block_sums from the db. -// let prev_sums = if let Ok(prev_sums) = batch.get_block_sums(&tip.prev_block_h) { -// prev_sums -// } else { -// BlockSums::default() -// }; - -// // Overage is based purely on the new block. -// // Previous block_sums have taken all previous overage into account. -// let overage = header.overage(); - -// // Offset on the other hand is the total kernel offset from the new block. -// let offset = header.total_kernel_offset(); - -// // Verify the kernel sums for the block_sums with the new block applied. -// let (utxo_sum, kernel_sum) = (prev_sums, block as &dyn Committed) -// .verify_kernel_sums(overage, offset) -// .unwrap(); - -// let block_sums = BlockSums { -// utxo_sum, -// kernel_sum, -// }; -// batch.save_block_sums(&header.hash(), block_sums).unwrap(); - -// batch.commit().unwrap(); - -// { -// let mut utxo = self.utxo.write(); -// for x in block.inputs() { -// utxo.remove(&x.commitment()); -// } -// for x in block.outputs() { -// utxo.insert(x.commitment()); -// } -// } -// } -// } - impl BlockChain for ChainAdapter { fn chain_head(&self) -> Result { self.chain From fb11b5dfd021ddfb20456b417c1b18ce7e842366 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sun, 7 Jun 2020 15:32:15 +0100 Subject: [PATCH 43/48] cleanup pruneable trait for kernel pos index --- chain/src/linked_list.rs | 62 +++++++++++++-------------- chain/tests/store_kernel_pos_index.rs | 2 +- 2 files changed, 30 insertions(+), 34 deletions(-) diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index c22e9995d3..92f3d3416d 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -119,13 +119,6 @@ pub trait ListIndex { batch: &Batch<'_>, commit: Commitment, ) -> Result::Pos>, Error>; - - /// Pop a pos off the back of the list (used for pruning old data). - fn pop_pos_back( - &self, - batch: &Batch<'_>, - commit: Commitment, - ) -> Result::Pos>, Error>; } /// Supports "rewind" given the provided commit and a pos to rewind back to. @@ -137,9 +130,16 @@ pub trait RewindableListIndex { /// A pruneable list index supports pruning of old data from the index lists. /// This allows us to efficiently maintain an index of "recent" kernel data. /// We can maintain a window of 2 weeks of recent data, discarding anything older than this. -pub trait PruneableListIndex { +pub trait PruneableListIndex: ListIndex { /// Prune old data. fn prune(&self, batch: &Batch<'_>, commit: Commitment, cutoff_pos: u64) -> Result<(), Error>; + + /// Pop a pos off the back of the list (used for pruning old data). + fn pop_pos_back( + &self, + batch: &Batch<'_>, + commit: Commitment, + ) -> Result::Pos>, Error>; } /// Wrapper for the list to handle either `Single` or `Multi` entries. @@ -365,14 +365,30 @@ where } } } +} + +/// List index that supports rewind. +impl RewindableListIndex for MultiIndex { + fn rewind(&self, batch: &Batch<'_>, commit: Commitment, rewind_pos: u64) -> Result<(), Error> { + while self + .peek_pos(batch, commit)? + .map(|x| x.pos() > rewind_pos) + .unwrap_or(false) + { + self.pop_pos(batch, commit)?; + } + Ok(()) + } +} + +impl PruneableListIndex for MultiIndex { + fn prune(&self, batch: &Batch<'_>, commit: Commitment, cutoff_pos: u64) -> Result<(), Error> { + panic!("wat"); + } /// Pop off the back/tail of the linked list. /// Used when pruning old data. - fn pop_pos_back( - &self, - batch: &Batch<'_>, - commit: Commitment, - ) -> Result::Pos>, Error> { + fn pop_pos_back(&self, batch: &Batch<'_>, commit: Commitment) -> Result, Error> { match self.get_list(batch, commit)? { None => Ok(None), Some(ListWrapper::Single { pos }) => { @@ -416,26 +432,6 @@ where } } -/// List index that supports rewind. -impl RewindableListIndex for MultiIndex { - fn rewind(&self, batch: &Batch<'_>, commit: Commitment, rewind_pos: u64) -> Result<(), Error> { - while self - .peek_pos(batch, commit)? - .map(|x| x.pos() > rewind_pos) - .unwrap_or(false) - { - self.pop_pos(batch, commit)?; - } - Ok(()) - } -} - -impl PruneableListIndex for MultiIndex { - fn prune(&self, batch: &Batch<'_>, commit: Commitment, cutoff_pos: u64) -> Result<(), Error> { - panic!("wat"); - } -} - /// Something that tracks pos (in an MMR). pub trait PosEntry: Readable + Writeable + Copy { /// Accessor for the underlying (MMR) pos. diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index 5458d5d734..0d69675579 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::chain::linked_list::{ListIndex, ListWrapper, RewindableListIndex}; +use crate::chain::linked_list::{ListIndex, ListWrapper, PruneableListIndex, RewindableListIndex}; use crate::chain::store::{self, ChainStore}; use crate::chain::types::CommitPos; use crate::core::global; From f73688b276b670d1eb5da0975502a6c3670c2a52 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Mon, 8 Jun 2020 14:00:58 +0100 Subject: [PATCH 44/48] add clear() to kernel_pos idx and test coverage --- chain/src/linked_list.rs | 26 +++++- chain/tests/store_kernel_pos_index.rs | 114 ++++++++++++++++++++++++++ 2 files changed, 139 insertions(+), 1 deletion(-) diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index 92f3d3416d..6c53a5c527 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -21,7 +21,7 @@ use crate::util::secp::pedersen::Commitment; use enum_primitive::FromPrimitive; use grin_store as store; use std::marker::PhantomData; -use store::{to_key, to_key_u64, Error}; +use store::{to_key, to_key_u64, Error, SerIterator}; enum_from_primitive! { #[derive(Copy, Clone, Debug, PartialEq)] @@ -131,6 +131,10 @@ pub trait RewindableListIndex { /// This allows us to efficiently maintain an index of "recent" kernel data. /// We can maintain a window of 2 weeks of recent data, discarding anything older than this. pub trait PruneableListIndex: ListIndex { + /// Clear all data from the index. + /// Used when rebuilding the index. + fn clear(&self, batch: &Batch<'_>) -> Result<(), Error>; + /// Prune old data. fn prune(&self, batch: &Batch<'_>, commit: Commitment, cutoff_pos: u64) -> Result<(), Error>; @@ -382,6 +386,26 @@ impl RewindableListIndex for MultiIndex { } impl PruneableListIndex for MultiIndex { + fn clear(&self, batch: &Batch<'_>) -> Result<(), Error> { + let mut list_count = 0; + let mut entry_count = 0; + let prefix = to_key(self.list_prefix, ""); + for (key, _) in batch.db.iter::>(&prefix)? { + let _ = batch.delete(&key); + list_count += 1; + } + let prefix = to_key(self.entry_prefix, ""); + for (key, _) in batch.db.iter::>(&prefix)? { + let _ = batch.delete(&key); + entry_count += 1; + } + debug!( + "clear: lists deleted: {}, entries deleted: {}", + list_count, entry_count + ); + Ok(()) + } + fn prune(&self, batch: &Batch<'_>, commit: Commitment, cutoff_pos: u64) -> Result<(), Error> { panic!("wat"); } diff --git a/chain/tests/store_kernel_pos_index.rs b/chain/tests/store_kernel_pos_index.rs index 0d69675579..1b74502c52 100644 --- a/chain/tests/store_kernel_pos_index.rs +++ b/chain/tests/store_kernel_pos_index.rs @@ -473,3 +473,117 @@ fn test_store_kernel_idx_multiple_commits() { clean_output_dir(chain_dir); } + +#[test] +fn test_store_kernel_idx_clear() -> Result<(), Error> { + setup_test(); + let chain_dir = ".grin_idx_clear"; + clean_output_dir(chain_dir); + + let commit = Commitment::from_vec(vec![]); + let commit2 = Commitment::from_vec(vec![1]); + + let store = ChainStore::new(chain_dir)?; + let index = store::nrd_recent_kernel_index(); + + // Add a couple of single entries to the index and commit the batch. + { + let batch = store.batch()?; + assert_eq!(index.peek_pos(&batch, commit), Ok(None)); + assert_eq!(index.get_list(&batch, commit), Ok(None)); + + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }), + Ok(()), + ); + + assert_eq!( + index.push_pos( + &batch, + commit2, + CommitPos { + pos: 10, + height: 10 + } + ), + Ok(()), + ); + + assert_eq!( + index.peek_pos(&batch, commit), + Ok(Some(CommitPos { pos: 1, height: 1 })), + ); + + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Single { + pos: CommitPos { pos: 1, height: 1 } + })), + ); + + assert_eq!( + index.peek_pos(&batch, commit2), + Ok(Some(CommitPos { + pos: 10, + height: 10 + })), + ); + + assert_eq!( + index.get_list(&batch, commit2), + Ok(Some(ListWrapper::Single { + pos: CommitPos { + pos: 10, + height: 10 + } + })), + ); + + batch.commit()?; + } + + // Clear the index and confirm everything was deleted as expected. + { + let batch = store.batch()?; + assert_eq!(index.clear(&batch), Ok(())); + assert_eq!(index.peek_pos(&batch, commit), Ok(None)); + assert_eq!(index.get_list(&batch, commit), Ok(None)); + assert_eq!(index.peek_pos(&batch, commit2), Ok(None)); + assert_eq!(index.get_list(&batch, commit2), Ok(None)); + batch.commit()?; + } + + // Add multiple entries to the index, commit the batch. + { + let batch = store.batch()?; + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }), + Ok(()), + ); + assert_eq!( + index.push_pos(&batch, commit, CommitPos { pos: 2, height: 2 }), + Ok(()), + ); + assert_eq!( + index.peek_pos(&batch, commit), + Ok(Some(CommitPos { pos: 2, height: 2 })), + ); + assert_eq!( + index.get_list(&batch, commit), + Ok(Some(ListWrapper::Multi { head: 2, tail: 1 })), + ); + batch.commit()?; + } + + // Clear the index and confirm everything was deleted as expected. + { + let batch = store.batch()?; + assert_eq!(index.clear(&batch), Ok(())); + assert_eq!(index.peek_pos(&batch, commit), Ok(None)); + assert_eq!(index.get_list(&batch, commit), Ok(None)); + batch.commit()?; + } + + clean_output_dir(chain_dir); + Ok(()) +} From de8ba232941d7865f0e34a5716b3fadf95fd2a26 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Mon, 8 Jun 2020 17:50:11 +0100 Subject: [PATCH 45/48] hook kernel_pos rebuild into node startup, compaction and fast sync --- chain/src/chain.rs | 12 +++-- chain/src/linked_list.rs | 14 ++++-- chain/src/txhashset/txhashset.rs | 80 +++++++++++++++++++++++++++++++- 3 files changed, 99 insertions(+), 7 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 8523c0bd6a..394567758f 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -196,12 +196,12 @@ impl Chain { &mut txhashset, )?; - // Initialize the output_pos index based on UTXO set. - // This is fast as we only look for stale and missing entries - // and do not need to rebuild the entire index. + // Initialize the output_pos index based on UTXO set + // and NRD kernel_pos index based recent kernel history. { let batch = store.batch()?; txhashset.init_output_pos_index(&header_pmmr, &batch)?; + txhashset.init_kernel_pos_index(&header_pmmr, &batch)?; batch.commit()?; } @@ -1010,6 +1010,9 @@ impl Chain { // Rebuild our output_pos index in the db based on fresh UTXO set. txhashset.init_output_pos_index(&header_pmmr, &batch)?; + // Rebuild our NRD kernel_pos index based on recent kernel history. + txhashset.init_kernel_pos_index(&header_pmmr, &batch)?; + // Commit all the changes to the db. batch.commit()?; @@ -1146,6 +1149,9 @@ impl Chain { // Make sure our output_pos index is consistent with the UTXO set. txhashset.init_output_pos_index(&header_pmmr, &batch)?; + // Rebuild our NRD kernel_pos index based on recent kernel history. + txhashset.init_kernel_pos_index(&header_pmmr, &batch)?; + // Commit all the above db changes. batch.commit()?; diff --git a/chain/src/linked_list.rs b/chain/src/linked_list.rs index 6c53a5c527..cb9822e0ac 100644 --- a/chain/src/linked_list.rs +++ b/chain/src/linked_list.rs @@ -21,7 +21,7 @@ use crate::util::secp::pedersen::Commitment; use enum_primitive::FromPrimitive; use grin_store as store; use std::marker::PhantomData; -use store::{to_key, to_key_u64, Error, SerIterator}; +use store::{to_key, to_key_u64, Error}; enum_from_primitive! { #[derive(Copy, Clone, Debug, PartialEq)] @@ -406,8 +406,16 @@ impl PruneableListIndex for MultiIndex { Ok(()) } - fn prune(&self, batch: &Batch<'_>, commit: Commitment, cutoff_pos: u64) -> Result<(), Error> { - panic!("wat"); + /// Pruning will be more performant than full rebuild but not yet necessary. + fn prune( + &self, + _batch: &Batch<'_>, + _commit: Commitment, + _cutoff_pos: u64, + ) -> Result<(), Error> { + unimplemented!( + "we currently rebuild index on startup/compaction, pruning not yet implemented" + ); } /// Pop off the back/tail of the linked list. diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 740d041d98..3ab064674c 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -15,6 +15,7 @@ //! Utility structs to handle the 3 MMRs (output, rangeproof, //! kernel) along the overall header MMR conveniently and transactionally. +use crate::core::consensus::WEEK_HEIGHT; use crate::core::core::committed::Committed; use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::merkle_proof::MerkleProof; @@ -25,7 +26,7 @@ use crate::core::core::{ use crate::core::global; use crate::core::ser::{PMMRable, ProtocolVersion}; use crate::error::{Error, ErrorKind}; -use crate::linked_list::{ListIndex, RewindableListIndex}; +use crate::linked_list::{ListIndex, PruneableListIndex, RewindableListIndex}; use crate::store::{self, Batch, ChainStore}; use crate::txhashset::bitmap_accumulator::BitmapAccumulator; use crate::txhashset::{RewindableKernelView, UTXOView}; @@ -379,6 +380,83 @@ impl TxHashSet { Ok(()) } + /// (Re)build thge NRD kernel_pos based on 2 weeks of recent kernel history. + pub fn init_kernel_pos_index( + &self, + header_pmmr: &PMMRHandle, + batch: &Batch<'_>, + ) -> Result<(), Error> { + // if !global::is_nrd_enabled() { + // return Ok(()) + // } + + let now = Instant::now(); + let kernel_index = store::nrd_recent_kernel_index(); + kernel_index.clear(batch)?; + + let head = batch.head()?; + let cutoff = head.height.saturating_sub(WEEK_HEIGHT * 2); + let cutoff_hash = header_pmmr.get_header_hash_by_height(cutoff)?; + let cutoff_header = batch.get_block_header(&cutoff_hash)?; + + let prev_size = if cutoff == 0 { + 0 + } else { + let prev_header = batch.get_previous_header(&cutoff_header)?; + prev_header.kernel_mmr_size + }; + + debug!( + "init_kernel_pos_index: cutoff_header: {} at {}, prev kernel_mmr_size: {}", + cutoff_header.hash(), + cutoff_header.height, + prev_size, + ); + + let kernel_pmmr = + ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos); + + let mut current_pos = prev_size + 1; + let mut current_header = cutoff_header; + let mut count = 0; + while current_pos <= self.kernel_pmmr_h.last_pos { + if pmmr::is_leaf(current_pos) { + if let Some(kernel) = kernel_pmmr.get_data(current_pos) { + match kernel.features { + KernelFeatures::NoRecentDuplicate { .. } => { + while current_pos > current_header.kernel_mmr_size { + let hash = header_pmmr + .get_header_hash_by_height(current_header.height + 1)?; + current_header = batch.get_block_header(&hash)?; + } + + let new_pos = CommitPos { + pos: current_pos, + height: current_header.height, + }; + debug!( + "pushing entry to NRD index: {:?}: {:?}", + kernel.excess(), + new_pos + ); + kernel_index.push_pos(&batch, kernel.excess(), new_pos)?; + count += 1; + } + _ => {} + } + } + } + current_pos += 1; + } + + debug!( + "init_kernel_pos_index: pushed {} entries to the index, took {}s", + count, + now.elapsed().as_secs(), + ); + Ok(()) + } + /// (Re)build the output_pos index to be consistent with the current UTXO set. /// Remove any "stale" index entries that do not correspond to outputs in the UTXO set. /// Add any missing index entries based on UTXO set. From 7df028bc863edaf32c6c759133505fd3484adf73 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Mon, 8 Jun 2020 23:12:28 +0100 Subject: [PATCH 46/48] verify full NRD history on fast sync --- chain/src/chain.rs | 18 ++++-- chain/src/txhashset/txhashset.rs | 106 +++++++++++++++++-------------- 2 files changed, 72 insertions(+), 52 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 394567758f..35a97571e8 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -201,7 +201,7 @@ impl Chain { { let batch = store.batch()?; txhashset.init_output_pos_index(&header_pmmr, &batch)?; - txhashset.init_kernel_pos_index(&header_pmmr, &batch)?; + txhashset.init_recent_kernel_pos_index(&header_pmmr, &batch)?; batch.commit()?; } @@ -960,8 +960,16 @@ impl Chain { Some(&header), )?; - // Validate the full kernel history (kernel MMR root for every block header). - self.validate_kernel_history(&header, &txhashset)?; + // Validate the full kernel history. + // Check kernel MMR root for every block header. + // Check NRD relative height rules for full kernel history. + { + self.validate_kernel_history(&header, &txhashset)?; + + let header_pmmr = self.header_pmmr.read(); + let batch = self.store.batch()?; + txhashset.verify_kernel_pos_index(&self.genesis, &header_pmmr, &batch)?; + } // all good, prepare a new batch and update all the required records debug!("txhashset_write: rewinding a 2nd time (writeable)"); @@ -1011,7 +1019,7 @@ impl Chain { txhashset.init_output_pos_index(&header_pmmr, &batch)?; // Rebuild our NRD kernel_pos index based on recent kernel history. - txhashset.init_kernel_pos_index(&header_pmmr, &batch)?; + txhashset.init_recent_kernel_pos_index(&header_pmmr, &batch)?; // Commit all the changes to the db. batch.commit()?; @@ -1150,7 +1158,7 @@ impl Chain { txhashset.init_output_pos_index(&header_pmmr, &batch)?; // Rebuild our NRD kernel_pos index based on recent kernel history. - txhashset.init_kernel_pos_index(&header_pmmr, &batch)?; + txhashset.init_recent_kernel_pos_index(&header_pmmr, &batch)?; // Commit all the above db changes. batch.commit()?; diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 3ab064674c..4ab0cbec77 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -380,11 +380,25 @@ impl TxHashSet { Ok(()) } - /// (Re)build thge NRD kernel_pos based on 2 weeks of recent kernel history. - pub fn init_kernel_pos_index( + /// (Re)build the NRD kernel_pos index based on 2 weeks of recent kernel history. + pub fn init_recent_kernel_pos_index( &self, header_pmmr: &PMMRHandle, batch: &Batch<'_>, + ) -> Result<(), Error> { + let head = batch.head()?; + let cutoff = head.height.saturating_sub(WEEK_HEIGHT * 2); + let cutoff_hash = header_pmmr.get_header_hash_by_height(cutoff)?; + let cutoff_header = batch.get_block_header(&cutoff_hash)?; + self.verify_kernel_pos_index(&cutoff_header, header_pmmr, batch) + } + + /// Verify and (re)build the NRD kernel_pos index from the provided header onwards. + pub fn verify_kernel_pos_index( + &self, + from_header: &BlockHeader, + header_pmmr: &PMMRHandle, + batch: &Batch<'_>, ) -> Result<(), Error> { // if !global::is_nrd_enabled() { // return Ok(()) @@ -394,22 +408,17 @@ impl TxHashSet { let kernel_index = store::nrd_recent_kernel_index(); kernel_index.clear(batch)?; - let head = batch.head()?; - let cutoff = head.height.saturating_sub(WEEK_HEIGHT * 2); - let cutoff_hash = header_pmmr.get_header_hash_by_height(cutoff)?; - let cutoff_header = batch.get_block_header(&cutoff_hash)?; - - let prev_size = if cutoff == 0 { + let prev_size = if from_header.height == 0 { 0 } else { - let prev_header = batch.get_previous_header(&cutoff_header)?; + let prev_header = batch.get_previous_header(&from_header)?; prev_header.kernel_mmr_size }; debug!( - "init_kernel_pos_index: cutoff_header: {} at {}, prev kernel_mmr_size: {}", - cutoff_header.hash(), - cutoff_header.height, + "verify_kernel_pos_index: header: {} at {}, prev kernel_mmr_size: {}", + from_header.hash(), + from_header.height, prev_size, ); @@ -417,7 +426,7 @@ impl TxHashSet { ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos); let mut current_pos = prev_size + 1; - let mut current_header = cutoff_header; + let mut current_header = from_header.clone(); let mut count = 0; while current_pos <= self.kernel_pmmr_h.last_pos { if pmmr::is_leaf(current_pos) { @@ -429,17 +438,11 @@ impl TxHashSet { .get_header_hash_by_height(current_header.height + 1)?; current_header = batch.get_block_header(&hash)?; } - let new_pos = CommitPos { pos: current_pos, height: current_header.height, }; - debug!( - "pushing entry to NRD index: {:?}: {:?}", - kernel.excess(), - new_pos - ); - kernel_index.push_pos(&batch, kernel.excess(), new_pos)?; + apply_kernel_rules(&kernel, new_pos, batch)?; count += 1; } _ => {} @@ -450,7 +453,7 @@ impl TxHashSet { } debug!( - "init_kernel_pos_index: pushed {} entries to the index, took {}s", + "verify_kernel_pos_index: pushed {} entries to the index, took {}s", count, now.elapsed().as_secs(), ); @@ -1132,32 +1135,8 @@ impl<'a> Extension<'a> { ) -> Result<(), Error> { for kernel in kernels { let pos = self.apply_kernel(kernel)?; - - // If NRD enabled then enforce NRD relative height rule. - // Otherwise just conntinue and apply the next kernel. - if global::is_nrd_enabled() { - let kernel_index = store::nrd_recent_kernel_index(); - if let KernelFeatures::NoRecentDuplicate { - relative_height, .. - } = kernel.features - { - debug!("checking NRD index: {:?}", kernel.excess()); - if let Some(prev) = kernel_index.peek_pos(batch, kernel.excess())? { - let diff = height.saturating_sub(prev.height); - debug!("NRD check: {}, {:?}, {:?}", height, prev, relative_height); - if diff < relative_height.into() { - return Err(ErrorKind::NRDRelativeHeight.into()); - } - } - let new_pos = CommitPos { pos, height }; - debug!( - "pushing entry to NRD index: {:?}: {:?}", - kernel.excess(), - new_pos - ); - kernel_index.push_pos(batch, kernel.excess(), new_pos)?; - } - } + let commit_pos = CommitPos { pos, height }; + apply_kernel_rules(kernel, commit_pos, batch)?; } Ok(()) } @@ -1785,3 +1764,36 @@ fn input_pos_to_rewind( } Ok(bitmap) } + +/// If NRD enabled then enforce NRD relative height rules. +fn apply_kernel_rules(kernel: &TxKernel, pos: CommitPos, batch: &Batch<'_>) -> Result<(), Error> { + if !global::is_nrd_enabled() { + return Ok(()); + } + match kernel.features { + KernelFeatures::NoRecentDuplicate { + relative_height, .. + } => { + let kernel_index = store::nrd_recent_kernel_index(); + debug!("checking NRD index: {:?}", kernel.excess()); + if let Some(prev) = kernel_index.peek_pos(batch, kernel.excess())? { + let diff = pos.height.saturating_sub(prev.height); + debug!( + "NRD check: {}, {:?}, {:?}", + pos.height, prev, relative_height + ); + if diff < relative_height.into() { + return Err(ErrorKind::NRDRelativeHeight.into()); + } + } + debug!( + "pushing entry to NRD index: {:?}: {:?}", + kernel.excess(), + pos, + ); + kernel_index.push_pos(batch, kernel.excess(), pos)?; + } + _ => {} + } + Ok(()) +} From adca6b0976ce90eca1103e4f6ab91e52a3837f89 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Tue, 9 Jun 2020 14:23:26 +0100 Subject: [PATCH 47/48] return early if nrd disabled --- chain/src/txhashset/txhashset.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 4ab0cbec77..5485628225 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -400,9 +400,9 @@ impl TxHashSet { header_pmmr: &PMMRHandle, batch: &Batch<'_>, ) -> Result<(), Error> { - // if !global::is_nrd_enabled() { - // return Ok(()) - // } + if !global::is_nrd_enabled() { + return Ok(()); + } let now = Instant::now(); let kernel_index = store::nrd_recent_kernel_index(); From 4acbf8c8c0c864336ab11a7f462ff9d50008e5e4 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Tue, 9 Jun 2020 22:18:51 +0100 Subject: [PATCH 48/48] fix header sync issue --- chain/src/pipe.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/chain/src/pipe.rs b/chain/src/pipe.rs index 66e660a228..2db6b1bf21 100644 --- a/chain/src/pipe.rs +++ b/chain/src/pipe.rs @@ -219,14 +219,7 @@ pub fn sync_block_headers( txhashset::header_extending(&mut ctx.header_pmmr, &mut ctx.batch, |ext, batch| { rewind_and_apply_header_fork(&last_header, ext, batch)?; Ok(()) - })?; - - let header_head = ctx.batch.header_head()?; - if has_more_work(last_header, &header_head) { - update_header_head(&Tip::from_header(last_header), &mut ctx.batch)?; - } - - Ok(()) + }) } /// Process a block header. Update the header MMR and corresponding header_head if this header