From 3d0e82b52d041aeb71494bb0573111516689c911 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sun, 16 Feb 2020 21:27:36 +0000 Subject: [PATCH 1/9] first pass at rewind_single_block and reworking rewind to simply iterate over blocks, rewinding each incrementally --- chain/src/store.rs | 34 ++++++---------- chain/src/txhashset/txhashset.rs | 67 +++++++++++++++++++++++++------- 2 files changed, 63 insertions(+), 38 deletions(-) diff --git a/chain/src/store.rs b/chain/src/store.rs index 5c38ff15b0..fb088c2694 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -182,7 +182,8 @@ impl<'a> Batch<'a> { /// Note: the block header is not saved to the db here, assumes this has already been done. pub fn save_block(&self, b: &Block) -> Result<(), Error> { // Build the "input bitmap" for this new block and store it in the db. - self.build_and_store_block_input_bitmap(&b)?; + let bitmap = self.build_block_input_bitmap(b); + self.save_block_input_bitmap(&b.hash(), &bitmap)?; // Save the block itself to the db. self.db @@ -315,27 +316,20 @@ impl<'a> Batch<'a> { } /// Build the input bitmap for the given block. - fn build_block_input_bitmap(&self, block: &Block) -> Result { - let bitmap = block + fn build_block_input_bitmap(&self, block: &Block) -> Bitmap { + block .inputs() .iter() .filter_map(|x| self.get_output_pos(&x.commitment()).ok()) .map(|x| x as u32) - .collect(); - Ok(bitmap) - } - - /// Build and store the input bitmap for the given block. - fn build_and_store_block_input_bitmap(&self, block: &Block) -> Result { - // Build the bitmap. - let bitmap = self.build_block_input_bitmap(block)?; - - // Save the bitmap to the db (via the batch). - self.save_block_input_bitmap(&block.hash(), &bitmap)?; - - Ok(bitmap) + .collect() } + /// TODO - This should return a Vec<(u64, u64, Commitment)> for spent outputs. + /// TODO - So we can "undo" a block easily in the output_pos. + /// Index needs pos and height for each commitment and we need to add these back in + /// as necessary. + /// /// Get the block input bitmap from the db or build the bitmap from /// the full block from the db (if the block is found). pub fn get_block_input_bitmap(&self, bh: &Hash) -> Result { @@ -345,13 +339,7 @@ impl<'a> Batch<'a> { { Ok(Bitmap::deserialize(&bytes)) } else { - match self.get_block(bh) { - Ok(block) => { - let bitmap = self.build_and_store_block_input_bitmap(&block)?; - Ok(bitmap) - } - Err(e) => Err(e), - } + panic!("should never happen, but fixme"); } } diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 7710e183a2..8cea9a22c6 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -366,6 +366,8 @@ impl TxHashSet { debug!("txhashset: starting compaction..."); let head_header = batch.head_header()?; + + // TODO - What to do with this? let rewind_rm_pos = input_pos_to_rewind(&horizon_header, &head_header, batch)?; debug!("txhashset: check_compact output mmr backend..."); @@ -1103,13 +1105,22 @@ impl<'a> Extension<'a> { // Rewound output pos will be removed from the MMR. // Rewound input (spent) pos will be added back to the MMR. let head_header = batch.get_block_header(&self.head.hash())?; - let rewind_rm_pos = input_pos_to_rewind(header, &head_header, batch)?; - self.rewind_to_pos( - header.output_mmr_size, - header.kernel_mmr_size, - &rewind_rm_pos, - )?; + if head_header.height <= header.height { + // Nothing to rewind but we do want to truncate the MMRs at header for consistency. + self.rewind_mmrs_to_pos( + header.output_mmr_size, + header.kernel_mmr_size, + &Bitmap::create(), + )?; + self.apply_to_bitmap_accumulator(&[header.output_mmr_size])?; + } else { + let mut current = head_header; + while header.height < current.height { + self.rewind_single_block(¤t, batch)?; + current = batch.get_previous_header(¤t)?; + } + } // Update our head to reflect the header we rewound to. self.head = Tip::from_header(header); @@ -1117,9 +1128,40 @@ impl<'a> Extension<'a> { Ok(()) } + // TODO - We need to "undo" the output_pos index changes here for each block + // that we rewind. + // If an output becomes unspent we must ensure it is back in the output_pos index + // with the correct pos. + // This is crucial for output commitments that have been reused. + fn rewind_single_block( + &mut self, + header: &BlockHeader, + batch: &Batch<'_>, + ) -> Result<(), Error> { + let input_bitmap = batch.get_block_input_bitmap(&header.hash())?; + + if header.height == 0 { + self.rewind_mmrs_to_pos(0, 0, &Bitmap::create())?; + } else { + let prev = batch.get_previous_header(&header)?; + self.rewind_mmrs_to_pos(prev.output_mmr_size, prev.kernel_mmr_size, &input_bitmap)?; + } + + // Update our BitmapAccumulator based on affected outputs. + // We want to "unspend" every rewound spent output. + // Treat last_pos as an affected output to ensure we rebuild far enough back. + let mut affected_pos: Vec<_> = input_bitmap.iter().map(|x| x.into()).collect(); + affected_pos.push(self.output_pmmr.last_pos); + self.apply_to_bitmap_accumulator(&affected_pos)?; + + // TODO - we need to update the output_pos index here to reflect rewound state. + + Ok(()) + } + /// Rewinds the MMRs to the provided positions, given the output and - /// kernel we want to rewind to. - fn rewind_to_pos( + /// kernel pos we want to rewind to. + fn rewind_mmrs_to_pos( &mut self, output_pos: u64, kernel_pos: u64, @@ -1134,13 +1176,6 @@ impl<'a> Extension<'a> { self.kernel_pmmr .rewind(kernel_pos, &Bitmap::create()) .map_err(&ErrorKind::TxHashSetErr)?; - - // Update our BitmapAccumulator based on affected outputs. - // We want to "unspend" every rewound spent output. - // Treat output_pos as an affected output to ensure we rebuild far enough back. - let mut affected_pos: Vec<_> = rewind_rm_pos.iter().map(|x| x as u64).collect(); - affected_pos.push(output_pos); - self.apply_to_bitmap_accumulator(&affected_pos)?; Ok(()) } @@ -1563,6 +1598,8 @@ pub fn clean_txhashset_folder(root_dir: &PathBuf) { } } +/// TODO - suspect this can be simplified *significantly*. +/// /// Given a block header to rewind to and the block header at the /// head of the current chain state, we need to calculate the positions /// of all inputs (spent outputs) we need to "undo" during a rewind. From e2ff20ab9bfcf3056d03550635c618666e0e46f2 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Mon, 17 Feb 2020 15:10:52 +0000 Subject: [PATCH 2/9] commit --- api/src/handlers/utils.rs | 4 +- chain/src/chain.rs | 8 +- chain/src/pipe.rs | 33 ++++--- chain/src/store.rs | 84 ++++++++++------- chain/src/txhashset/txhashset.rs | 157 +++++++++++++------------------ chain/src/types.rs | 37 ++++++-- chain/tests/mine_simple_chain.rs | 7 +- 7 files changed, 175 insertions(+), 155 deletions(-) diff --git a/api/src/handlers/utils.rs b/api/src/handlers/utils.rs index 905c915f7c..0770c5e031 100644 --- a/api/src/handlers/utils.rs +++ b/api/src/handlers/utils.rs @@ -56,7 +56,7 @@ pub fn get_output( match res { Ok(output_pos) => { return Ok(( - Output::new(&commit, output_pos.height, output_pos.position), + Output::new(&commit, output_pos.height, output_pos.pos), x.clone(), )); } @@ -100,7 +100,7 @@ pub fn get_output_v2( for x in outputs.iter() { let res = chain.is_unspent(x); match res { - Ok(output_pos) => match chain.get_unspent_output_at(output_pos.position) { + Ok(output_pos) => match chain.get_unspent_output_at(output_pos.pos) { Ok(output) => { let header = if include_merkle_proof && output.is_coinbase() { chain.get_header_by_height(output_pos.height).ok() diff --git a/chain/src/chain.rs b/chain/src/chain.rs index b8ba87accb..e4fceda3c0 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -30,7 +30,7 @@ use crate::store; use crate::txhashset; use crate::txhashset::{PMMRHandle, TxHashSet}; use crate::types::{ - BlockStatus, ChainAdapter, NoStatus, Options, OutputMMRPosition, Tip, TxHashsetWriteStatus, + BlockStatus, ChainAdapter, NoStatus, Options, OutputPos, Tip, TxHashsetWriteStatus, }; use crate::util::secp::pedersen::{Commitment, RangeProof}; use crate::util::RwLock; @@ -495,9 +495,8 @@ impl Chain { /// spent. This querying is done in a way that is consistent with the /// current chain state, specifically the current winning (valid, most /// work) fork. - pub fn is_unspent(&self, output_ref: &OutputIdentifier) -> Result { - let txhashset = self.txhashset.read(); - txhashset.is_unspent(output_ref) + pub fn is_unspent(&self, output_ref: &OutputIdentifier) -> Result { + self.txhashset.read().is_unspent(output_ref) } /// Retrieves an unspent output using its PMMR position @@ -1510,6 +1509,7 @@ fn setup_head( // We will update this later once we have the correct header_root. batch.save_block_header(&genesis.header)?; batch.save_block(&genesis)?; + batch.save_spent_index(&genesis.hash(), &vec![])?; batch.save_body_head(&Tip::from_header(&genesis.header))?; if !genesis.kernels().is_empty() { diff --git a/chain/src/pipe.rs b/chain/src/pipe.rs index dc37bf6f7a..b8351a2701 100644 --- a/chain/src/pipe.rs +++ b/chain/src/pipe.rs @@ -23,7 +23,7 @@ use crate::core::pow; use crate::error::{Error, ErrorKind}; use crate::store; use crate::txhashset; -use crate::types::{Options, Tip}; +use crate::types::{Options, OutputPos, Tip}; use crate::util::RwLock; use grin_store; use std::sync::Arc; @@ -121,7 +121,7 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result) -> Result, batch: &store::Batch<'_>, -) -> Result<(), Error> { - ext.extension.apply_block(block, batch)?; +) -> Result, Error> { + let spent = ext.extension.apply_block(block, batch)?; ext.extension.validate_roots(&block.header)?; ext.extension.validate_sizes(&block.header)?; - Ok(()) + Ok(spent) } -/// Officially adds the block to our chain. +/// Officially adds the block to our chain (possibly on a losing fork). +/// Adds the associated block_sums and spent_index as well. /// Header must be added separately (assume this has been done previously). -fn add_block(b: &Block, block_sums: &BlockSums, batch: &store::Batch<'_>) -> Result<(), Error> { - batch - .save_block(b) - .map_err(|e| ErrorKind::StoreErr(e, "pipe save block".to_owned()))?; +fn add_block( + b: &Block, + block_sums: &BlockSums, + spent: &Vec, + batch: &store::Batch<'_>, +) -> Result<(), Error> { + batch.save_block(b)?; batch.save_block_sums(&b.hash(), block_sums)?; + batch.save_spent_index(&b.hash(), spent)?; Ok(()) } diff --git a/chain/src/store.rs b/chain/src/store.rs index fb088c2694..9abe73e7e5 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -19,11 +19,12 @@ use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::{Block, BlockHeader, BlockSums}; use crate::core::pow::Difficulty; use crate::core::ser::ProtocolVersion; -use crate::types::Tip; +use crate::types::{OutputPos, Tip}; use crate::util::secp::pedersen::Commitment; use croaring::Bitmap; use grin_store as store; use grin_store::{option_to_not_found, to_key, Error, SerIterator}; +use std::convert::TryInto; use std::sync::Arc; const STORE_SUBPATH: &str = "chain"; @@ -35,6 +36,7 @@ const TAIL_PREFIX: u8 = b'T'; const OUTPUT_POS_PREFIX: u8 = b'p'; const BLOCK_INPUT_BITMAP_PREFIX: u8 = b'B'; const BLOCK_SUMS_PREFIX: u8 = b'M'; +const BLOCK_SPENT_PREFIX: u8 = b'S'; /// All chain-related database operations pub struct ChainStore { @@ -178,17 +180,19 @@ impl<'a> Batch<'a> { self.db.exists(&to_key(BLOCK_PREFIX, &mut h.to_vec())) } - /// Save the block and the associated input bitmap. + /// Save the block to the db. /// Note: the block header is not saved to the db here, assumes this has already been done. pub fn save_block(&self, b: &Block) -> Result<(), Error> { - // Build the "input bitmap" for this new block and store it in the db. - let bitmap = self.build_block_input_bitmap(b); - self.save_block_input_bitmap(&b.hash(), &bitmap)?; - - // Save the block itself to the db. self.db .put_ser(&to_key(BLOCK_PREFIX, &mut b.hash().to_vec())[..], b)?; + Ok(()) + } + /// We maintain a "spent" index for each full block to allow the output_pos + /// to be easily reverted during rewind. + pub fn save_spent_index(&self, h: &Hash, spent: &Vec) -> Result<(), Error> { + self.db + .put_ser(&to_key(BLOCK_SPENT_PREFIX, &mut h.to_vec())[..], spent)?; Ok(()) } @@ -218,7 +222,7 @@ impl<'a> Batch<'a> { // Not an error if these fail. { let _ = self.delete_block_sums(bh); - let _ = self.delete_block_input_bitmap(bh); + let _ = self.delete_spent_index(bh); } Ok(()) @@ -248,6 +252,12 @@ impl<'a> Batch<'a> { ) } + /// Delete the output_pos index entry for a spent output. + pub fn delete_output_pos_height(&self, commit: &Commitment) -> Result<(), Error> { + self.db + .delete(&to_key(OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec())) + } + /// Iterator over the output_pos index. pub fn output_pos_iter(&self) -> Result, Error> { let key = to_key(OUTPUT_POS_PREFIX, &mut "".to_string().into_bytes()); @@ -282,18 +292,15 @@ impl<'a> Batch<'a> { ) } - /// Save the input bitmap for the block. - fn save_block_input_bitmap(&self, bh: &Hash, bm: &Bitmap) -> Result<(), Error> { - self.db.put( - &to_key(BLOCK_INPUT_BITMAP_PREFIX, &mut bh.to_vec())[..], - &bm.serialize(), - ) - } + /// Delete the block spent index. + fn delete_spent_index(&self, bh: &Hash) -> Result<(), Error> { + // Clean up the legacy input bitmap as well. + let _ = self + .db + .delete(&to_key(BLOCK_INPUT_BITMAP_PREFIX, &mut bh.to_vec())); - /// Delete the block input bitmap. - fn delete_block_input_bitmap(&self, bh: &Hash) -> Result<(), Error> { self.db - .delete(&to_key(BLOCK_INPUT_BITMAP_PREFIX, &mut bh.to_vec())) + .delete(&to_key(BLOCK_SPENT_PREFIX, &mut bh.to_vec())) } /// Save block_sums for the block. @@ -315,34 +322,41 @@ impl<'a> Batch<'a> { self.db.delete(&to_key(BLOCK_SUMS_PREFIX, &mut bh.to_vec())) } - /// Build the input bitmap for the given block. - fn build_block_input_bitmap(&self, block: &Block) -> Bitmap { - block - .inputs() - .iter() - .filter_map(|x| self.get_output_pos(&x.commitment()).ok()) - .map(|x| x as u32) - .collect() + /// Get the block input bitmap based on our spent index. + /// Fallback to legacy block input bitmap from the db. + pub fn get_block_input_bitmap(&self, bh: &Hash) -> Result { + if let Ok(spent) = self.get_spent_index(bh) { + let bitmap = spent + .into_iter() + .map(|x| x.pos.try_into().unwrap()) + .collect(); + Ok(bitmap) + } else { + self.get_legacy_input_bitmap(bh) + } } - /// TODO - This should return a Vec<(u64, u64, Commitment)> for spent outputs. - /// TODO - So we can "undo" a block easily in the output_pos. - /// Index needs pos and height for each commitment and we need to add these back in - /// as necessary. - /// - /// Get the block input bitmap from the db or build the bitmap from - /// the full block from the db (if the block is found). - pub fn get_block_input_bitmap(&self, bh: &Hash) -> Result { + fn get_legacy_input_bitmap(&self, bh: &Hash) -> Result { if let Ok(Some(bytes)) = self .db .get(&to_key(BLOCK_INPUT_BITMAP_PREFIX, &mut bh.to_vec())) { Ok(Bitmap::deserialize(&bytes)) } else { - panic!("should never happen, but fixme"); + Err(Error::NotFoundErr("legacy block input bitmap".to_string()).into()) } } + /// Get the "spent index" from the db for the specified block. + /// If we need to rewind a block then we use this to "unspend" the spent outputs. + pub fn get_spent_index(&self, bh: &Hash) -> Result, Error> { + option_to_not_found( + self.db + .get_ser(&to_key(BLOCK_SPENT_PREFIX, &mut bh.to_vec())), + || format!("spent index: {}", bh), + ) + } + /// Commits this batch. If it's a child batch, it will be merged with the /// parent, otherwise the batch is written to db. pub fn commit(self) -> Result<(), Error> { diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 8cea9a22c6..23393feb6a 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -25,7 +25,7 @@ use crate::error::{Error, ErrorKind}; use crate::store::{Batch, ChainStore}; use crate::txhashset::bitmap_accumulator::BitmapAccumulator; use crate::txhashset::{RewindableKernelView, UTXOView}; -use crate::types::{OutputMMRPosition, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus}; +use crate::types::{OutputPos, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus}; use crate::util::secp::pedersen::{Commitment, RangeProof}; use crate::util::{file, secp_static, zip}; use croaring::Bitmap; @@ -223,17 +223,18 @@ impl TxHashSet { /// Check if an output is unspent. /// We look in the index to find the output MMR pos. /// Then we check the entry in the output MMR and confirm the hash matches. - pub fn is_unspent(&self, output_id: &OutputIdentifier) -> Result { - match self.commit_index.get_output_pos_height(&output_id.commit) { - Ok((pos, block_height)) => { + pub fn is_unspent(&self, output_id: &OutputIdentifier) -> Result { + let commit = output_id.commit; + match self.commit_index.get_output_pos_height(&commit) { + Ok((pos, height)) => { let output_pmmr: ReadonlyPMMR<'_, Output, _> = ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); if let Some(hash) = output_pmmr.get_hash(pos) { if hash == output_id.hash_with_index(pos - 1) { - Ok(OutputMMRPosition { - output_mmr_hash: hash, - position: pos, - height: block_height, + Ok(OutputPos { + pos, + height, + commit, }) } else { Err(ErrorKind::TxHashSetErr("txhashset hash mismatch".to_string()).into()) @@ -367,7 +368,6 @@ impl TxHashSet { let head_header = batch.head_header()?; - // TODO - What to do with this? let rewind_rm_pos = input_pos_to_rewind(&horizon_header, &head_header, batch)?; debug!("txhashset: check_compact output mmr backend..."); @@ -380,9 +380,6 @@ impl TxHashSet { .backend .check_compact(horizon_header.output_mmr_size, &rewind_rm_pos)?; - debug!("txhashset: compact height pos index..."); - self.compact_height_pos_index(batch)?; - debug!("txhashset: ... compaction finished"); Ok(()) @@ -433,32 +430,6 @@ impl TxHashSet { ); Ok(()) } - - fn compact_height_pos_index(&self, batch: &Batch<'_>) -> Result<(), Error> { - let now = Instant::now(); - let output_pmmr = - ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); - let last_pos = output_pmmr.unpruned_size(); - - let deleted = batch - .output_pos_iter()? - .filter(|(_, (pos, _))| { - // Note we use get_from_file() here as we want to ensure we have an entry - // in the index for *every* output still in the file, not just the "unspent" - // outputs. This is because we need to support rewind to handle fork/reorg. - // Rewind may "unspend" recently spent, but not yet pruned outputs, and the - // index must be consistent in this situation. - *pos <= last_pos && output_pmmr.get_from_file(*pos).is_none() - }) - .map(|(key, _)| batch.delete(&key)) - .count(); - debug!( - "compact_output_pos_index: deleted {} entries from the index, took {}s", - deleted, - now.elapsed().as_secs(), - ); - Ok(()) - } } /// Starts a new unit of work to extend (or rewind) the chain with additional @@ -925,18 +896,28 @@ impl<'a> Extension<'a> { } /// Apply a new block to the current txhashet extension (output, rangeproof, kernel MMRs). - pub fn apply_block(&mut self, b: &Block, batch: &Batch<'_>) -> Result<(), Error> { + pub fn apply_block(&mut self, b: &Block, batch: &Batch<'_>) -> Result, Error> { let mut affected_pos = vec![]; + let mut spent = vec![]; for out in b.outputs() { let pos = self.apply_output(out, batch)?; affected_pos.push(pos); + + // TODO - we do *not* want to actually add to batch here? + // if we are processing a fork we want this batch to rollback. batch.save_output_pos_height(&out.commitment(), pos, b.header.height)?; } for input in b.inputs() { - let pos = self.apply_input(input, batch)?; - affected_pos.push(pos); + let spent_pos = self.apply_input(input, batch)?; + affected_pos.push(spent_pos.pos); + + // TODO - we do *not* want to actually add to batch here? + // if processing a fork we want this batch to rollback. + batch.delete_output_pos_height(&spent_pos.commit)?; + + spent.push(spent_pos); } for kernel in b.kernels() { @@ -949,7 +930,7 @@ impl<'a> Extension<'a> { // Update the head of the extension to reflect the block we just applied. self.head = Tip::from_header(&b.header); - Ok(()) + Ok(spent) } fn apply_to_bitmap_accumulator(&mut self, output_pos: &[u64]) -> Result<(), Error> { @@ -971,10 +952,9 @@ impl<'a> Extension<'a> { ) } - fn apply_input(&mut self, input: &Input, batch: &Batch<'_>) -> Result { + fn apply_input(&mut self, input: &Input, batch: &Batch<'_>) -> Result { let commit = input.commitment(); - let pos_res = batch.get_output_pos(&commit); - if let Ok(pos) = pos_res { + if let Ok((pos, height)) = batch.get_output_pos_height(&commit) { // First check this input corresponds to an existing entry in the output MMR. if let Some(hash) = self.output_pmmr.get_hash(pos) { if hash != input.hash_with_index(pos - 1) { @@ -992,7 +972,11 @@ impl<'a> Extension<'a> { self.rproof_pmmr .prune(pos) .map_err(ErrorKind::TxHashSetErr)?; - Ok(pos) + Ok(OutputPos { + pos, + height, + commit, + }) } Ok(false) => Err(ErrorKind::AlreadySpent(commit).into()), Err(e) => Err(ErrorKind::TxHashSetErr(e).into()), @@ -1108,11 +1092,7 @@ impl<'a> Extension<'a> { if head_header.height <= header.height { // Nothing to rewind but we do want to truncate the MMRs at header for consistency. - self.rewind_mmrs_to_pos( - header.output_mmr_size, - header.kernel_mmr_size, - &Bitmap::create(), - )?; + self.rewind_mmrs_to_pos(header.output_mmr_size, header.kernel_mmr_size, &vec![])?; self.apply_to_bitmap_accumulator(&[header.output_mmr_size])?; } else { let mut current = head_header; @@ -1138,23 +1118,43 @@ impl<'a> Extension<'a> { header: &BlockHeader, batch: &Batch<'_>, ) -> Result<(), Error> { - let input_bitmap = batch.get_block_input_bitmap(&header.hash())?; + let spent = batch.get_spent_index(&header.hash()); + + let spent_pos: Vec<_> = if let Ok(ref spent) = spent { + spent.iter().map(|x| x.pos).collect() + } else { + warn!( + "rewind_single_block: fallback to legacy input bitmap for block {} at {}", + header.hash(), + header.height + ); + let bitmap = batch.get_block_input_bitmap(&header.hash())?; + bitmap.iter().map(|x| x.into()).collect() + }; if header.height == 0 { - self.rewind_mmrs_to_pos(0, 0, &Bitmap::create())?; + self.rewind_mmrs_to_pos(0, 0, &spent_pos)?; } else { let prev = batch.get_previous_header(&header)?; - self.rewind_mmrs_to_pos(prev.output_mmr_size, prev.kernel_mmr_size, &input_bitmap)?; + self.rewind_mmrs_to_pos(prev.output_mmr_size, prev.kernel_mmr_size, &spent_pos)?; } // Update our BitmapAccumulator based on affected outputs. // We want to "unspend" every rewound spent output. // Treat last_pos as an affected output to ensure we rebuild far enough back. - let mut affected_pos: Vec<_> = input_bitmap.iter().map(|x| x.into()).collect(); + let mut affected_pos = spent_pos.clone(); affected_pos.push(self.output_pmmr.last_pos); self.apply_to_bitmap_accumulator(&affected_pos)?; - // TODO - we need to update the output_pos index here to reflect rewound state. + // Update output_pos based on "unspending" all spent pos from this block. + // This is necessary to ensure the output_pos index correclty reflects a + // reused output commitment. For example an output at pos 1, spent, reused at pos 2. + // The output_pos index should be updated to reflect the old pos 1 when unspent. + if let Ok(spent) = spent { + for x in spent { + batch.save_output_pos_height(&x.commit, x.pos, x.height)?; + } + } Ok(()) } @@ -1165,13 +1165,14 @@ impl<'a> Extension<'a> { &mut self, output_pos: u64, kernel_pos: u64, - rewind_rm_pos: &Bitmap, + spent_pos: &[u64], ) -> Result<(), Error> { + let bitmap: Bitmap = spent_pos.into_iter().map(|x| *x as u32).collect(); self.output_pmmr - .rewind(output_pos, rewind_rm_pos) + .rewind(output_pos, &bitmap) .map_err(&ErrorKind::TxHashSetErr)?; self.rproof_pmmr - .rewind(output_pos, rewind_rm_pos) + .rewind(output_pos, &bitmap) .map_err(&ErrorKind::TxHashSetErr)?; self.kernel_pmmr .rewind(kernel_pos, &Bitmap::create()) @@ -1610,40 +1611,14 @@ fn input_pos_to_rewind( head_header: &BlockHeader, batch: &Batch<'_>, ) -> Result { - if head_header.height <= block_header.height { - return Ok(Bitmap::create()); - } - - // Batching up the block input bitmaps, and running fast_or() on every batch of 256 bitmaps. - // so to avoid maintaining a huge vec of bitmaps. - let bitmap_fast_or = |b_res, block_input_bitmaps: &mut Vec| -> Option { - if let Some(b) = b_res { - block_input_bitmaps.push(b); - if block_input_bitmaps.len() < 256 { - return None; - } - } - let bitmap = Bitmap::fast_or(&block_input_bitmaps.iter().collect::>()); - block_input_bitmaps.clear(); - block_input_bitmaps.push(bitmap.clone()); - Some(bitmap) - }; - - let mut block_input_bitmaps: Vec = vec![]; - + let mut bitmap = Bitmap::create(); let mut current = head_header.clone(); - while current.hash() != block_header.hash() { - if current.height < 1 { - break; - } - - // I/O should be minimized or eliminated here for most - // rewind scenarios. - if let Ok(b_res) = batch.get_block_input_bitmap(¤t.hash()) { - bitmap_fast_or(Some(b_res), &mut block_input_bitmaps); + while current.height > block_header.height { + // TODO - read the spent index here and explicitly fallback to input bitmap. + if let Ok(block_bitmap) = batch.get_block_input_bitmap(¤t.hash()) { + bitmap.or_inplace(&block_bitmap); } current = batch.get_previous_header(¤t)?; } - - bitmap_fast_or(None, &mut block_input_bitmaps).ok_or_else(|| ErrorKind::Bitmap.into()) + Ok(bitmap) } diff --git a/chain/src/types.rs b/chain/src/types.rs index dafe2aed76..96f3d6d699 100644 --- a/chain/src/types.rs +++ b/chain/src/types.rs @@ -20,8 +20,9 @@ use std::sync::Arc; use crate::core::core::hash::{Hash, Hashed, ZERO_HASH}; use crate::core::core::{Block, BlockHeader, HeaderVersion}; use crate::core::pow::Difficulty; -use crate::core::ser::{self, PMMRIndexHashable}; +use crate::core::ser::{self, PMMRIndexHashable, Readable, Reader, Writeable, Writer}; use crate::error::{Error, ErrorKind}; +use crate::util::secp::pedersen::Commitment; use crate::util::RwLock; bitflags! { @@ -258,16 +259,38 @@ impl OutputRoots { } } -/// A helper to hold the output pmmr position of the txhashset in order to keep them -/// readable. +/// Minimal struct representing an output commitment at a known position (and block height) +/// in an MMR. #[derive(Debug)] -pub struct OutputMMRPosition { - /// The hash at the output position in the MMR. - pub output_mmr_hash: Hash, +pub struct OutputPos { /// MMR position - pub position: u64, + pub pos: u64, /// Block height pub height: u64, + /// Output commitment + pub commit: Commitment, +} + +impl Readable for OutputPos { + fn read(reader: &mut dyn Reader) -> Result { + let pos = reader.read_u64()?; + let height = reader.read_u64()?; + let commit = Commitment::read(reader)?; + Ok(OutputPos { + pos, + height, + commit, + }) + } +} + +impl Writeable for OutputPos { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + writer.write_u64(self.pos)?; + writer.write_u64(self.height)?; + self.commit.write(writer)?; + Ok(()) + } } /// The tip of a fork. A handle to the fork ancestry from its leaf in the diff --git a/chain/tests/mine_simple_chain.rs b/chain/tests/mine_simple_chain.rs index 7d8ab8b581..b74eb80630 100644 --- a/chain/tests/mine_simple_chain.rs +++ b/chain/tests/mine_simple_chain.rs @@ -398,6 +398,7 @@ fn mine_reorg() { #[test] fn mine_forks() { + clean_output_dir(".grin2"); global::set_mining_mode(ChainTypes::AutomatedTesting); { let chain = init_chain(".grin2", pow::mine_genesis_block().unwrap()); @@ -445,6 +446,7 @@ fn mine_forks() { #[test] fn mine_losing_fork() { + clean_output_dir(".grin3"); global::set_mining_mode(ChainTypes::AutomatedTesting); let kc = ExtKeychain::from_random_seed(false).unwrap(); { @@ -481,6 +483,7 @@ fn mine_losing_fork() { #[test] fn longer_fork() { + clean_output_dir(".grin4"); global::set_mining_mode(ChainTypes::AutomatedTesting); let kc = ExtKeychain::from_random_seed(false).unwrap(); // to make it easier to compute the txhashset roots in the test, we @@ -525,11 +528,9 @@ fn longer_fork() { #[test] fn spend_in_fork_and_compact() { + clean_output_dir(".grin6"); global::set_mining_mode(ChainTypes::AutomatedTesting); util::init_test_logger(); - // Cleanup chain directory - clean_output_dir(".grin6"); - { let chain = init_chain(".grin6", pow::mine_genesis_block().unwrap()); let prev = chain.head_header().unwrap(); From 820d96d2cdd1feced6e95ddbeb85775258ee8617 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Mon, 17 Feb 2020 15:12:31 +0000 Subject: [PATCH 3/9] commit --- chain/src/txhashset/txhashset.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 23393feb6a..ccb273e249 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -1599,8 +1599,6 @@ pub fn clean_txhashset_folder(root_dir: &PathBuf) { } } -/// TODO - suspect this can be simplified *significantly*. -/// /// Given a block header to rewind to and the block header at the /// head of the current chain state, we need to calculate the positions /// of all inputs (spent outputs) we need to "undo" during a rewind. @@ -1614,7 +1612,6 @@ fn input_pos_to_rewind( let mut bitmap = Bitmap::create(); let mut current = head_header.clone(); while current.height > block_header.height { - // TODO - read the spent index here and explicitly fallback to input bitmap. if let Ok(block_bitmap) = batch.get_block_input_bitmap(¤t.hash()) { bitmap.or_inplace(&block_bitmap); } From 10d3a8108cd700598b64683bf290aabb8e998714 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Tue, 18 Feb 2020 13:44:34 +0000 Subject: [PATCH 4/9] cleanup --- chain/src/chain.rs | 22 +++++++++++++---- chain/src/txhashset/txhashset.rs | 41 ++++++++++++++++++++++++++++---- 2 files changed, 53 insertions(+), 10 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index e4fceda3c0..125246cc1f 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -199,6 +199,15 @@ impl Chain { &mut txhashset, )?; + // Initialize the output_pos index based on UTXO set. + // This is fast as we only look for stale and missing entries + // and do not need to rebuild the entire index. + { + let batch = store.batch()?; + txhashset.init_output_pos_index(&header_pmmr, &batch)?; + batch.commit()?; + } + let chain = Chain { db_root, store, @@ -972,7 +981,7 @@ impl Chain { } // Rebuild our output_pos index in the db based on fresh UTXO set. - txhashset.init_output_pos_index(&header_pmmr, &mut batch)?; + txhashset.init_output_pos_index(&header_pmmr, &batch)?; // Commit all the changes to the db. batch.commit()?; @@ -1014,7 +1023,7 @@ impl Chain { fn remove_historical_blocks( &self, header_pmmr: &txhashset::PMMRHandle, - batch: &mut store::Batch<'_>, + batch: &store::Batch<'_>, ) -> Result<(), Error> { if self.archive_mode { return Ok(()); @@ -1088,7 +1097,7 @@ impl Chain { // Take a write lock on the txhashet and start a new writeable db batch. let header_pmmr = self.header_pmmr.read(); let mut txhashset = self.txhashset.write(); - let mut batch = self.store.batch()?; + let batch = self.store.batch()?; // Compact the txhashset itself (rewriting the pruned backend files). { @@ -1099,14 +1108,17 @@ impl Chain { let horizon_hash = header_pmmr.get_header_hash_by_height(horizon_height)?; let horizon_header = batch.get_block_header(&horizon_hash)?; - txhashset.compact(&horizon_header, &mut batch)?; + txhashset.compact(&horizon_header, &batch)?; } // If we are not in archival mode remove historical blocks from the db. if !self.archive_mode { - self.remove_historical_blocks(&header_pmmr, &mut batch)?; + self.remove_historical_blocks(&header_pmmr, &batch)?; } + // Make sure our output_pos index is consistent with the UTXO set. + txhashset.init_output_pos_index(&header_pmmr, &batch)?; + // Commit all the above db changes. batch.commit()?; diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index ccb273e249..020474ac57 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -362,7 +362,7 @@ impl TxHashSet { pub fn compact( &mut self, horizon_header: &BlockHeader, - batch: &mut Batch<'_>, + batch: &Batch<'_>, ) -> Result<(), Error> { debug!("txhashset: starting compaction..."); @@ -385,27 +385,58 @@ impl TxHashSet { Ok(()) } - /// Initialize the output pos index based on current UTXO set. - /// This is a costly operation performed only when we receive a full new chain state. + /// (Re)build the output_pos index to be consistent with the current UTXO set. + /// Remove any "stale" index entries that do not correspond to outputs in the UTXO set. + /// Add any missing index entries based on UTXO set. pub fn init_output_pos_index( &self, header_pmmr: &PMMRHandle, - batch: &mut Batch<'_>, + batch: &Batch<'_>, ) -> Result<(), Error> { let now = Instant::now(); let output_pmmr = ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); + // Iterate over the current output_pos index, removing any entries that + // do not point to to the correct output. + let mut removed_count = 0; + for (key, (pos, _)) in batch.output_pos_iter()? { + if let Some(out) = output_pmmr.get_data(pos) { + if let Ok(pos_via_mmr) = batch.get_output_pos(&out.commitment()) { + if pos == pos_via_mmr { + continue; + } + } + } + batch.delete(&key)?; + removed_count += 1; + } + debug!( + "init_output_pos_index: removed {} stale index entries", + removed_count + ); + let mut outputs_pos: Vec<(Commitment, u64)> = vec![]; for pos in output_pmmr.leaf_pos_iter() { if let Some(out) = output_pmmr.get_data(pos) { outputs_pos.push((out.commit, pos)); } } + + debug!("init_output_pos_index: {} utxos", outputs_pos.len()); + + outputs_pos.retain(|x| batch.get_output_pos_height(&x.0).is_err()); + + debug!( + "init_output_pos_index: {} utxos with missing index entries", + outputs_pos.len() + ); + if outputs_pos.is_empty() { return Ok(()); } + let total_outputs = outputs_pos.len(); let max_height = batch.head()?.height; @@ -424,7 +455,7 @@ impl TxHashSet { } } debug!( - "init_height_pos_index: {} UTXOs, took {}s", + "init_height_pos_index: added entries for {} utxos, took {}s", total_outputs, now.elapsed().as_secs(), ); From 29b360cdaac45e0620686f1d532b216e395bd10e Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Tue, 18 Feb 2020 17:00:11 +0000 Subject: [PATCH 5/9] add test coverage for output_pos index transactional semantics during rewind --- chain/tests/mine_simple_chain.rs | 123 ++++++++++++++++++++++++++++++- 1 file changed, 119 insertions(+), 4 deletions(-) diff --git a/chain/tests/mine_simple_chain.rs b/chain/tests/mine_simple_chain.rs index b74eb80630..7211b72e0b 100644 --- a/chain/tests/mine_simple_chain.rs +++ b/chain/tests/mine_simple_chain.rs @@ -526,6 +526,84 @@ fn longer_fork() { clean_output_dir(".grin4"); } +#[test] +fn spend_rewind_spend() { + global::set_mining_mode(ChainTypes::AutomatedTesting); + util::init_test_logger(); + clean_output_dir(".grin_spend_rewind_spend"); + + { + let chain = init_chain( + ".grin_spend_rewind_spend", + pow::mine_genesis_block().unwrap(), + ); + let prev = chain.head_header().unwrap(); + let kc = ExtKeychain::from_random_seed(false).unwrap(); + let pb = ProofBuilder::new(&kc); + + let mut head = prev; + + // mine the first block and keep track of the block_hash + // so we can spend the coinbase later + let b = prepare_block_key_idx(&kc, &head, &chain, 2, 1); + let out_id = OutputIdentifier::from_output(&b.outputs()[0]); + assert!(out_id.features.is_coinbase()); + head = b.header.clone(); + chain + .process_block(b.clone(), chain::Options::SKIP_POW) + .unwrap(); + + // now mine three further blocks + for n in 3..6 { + let b = prepare_block(&kc, &head, &chain, n); + head = b.header.clone(); + chain.process_block(b, chain::Options::SKIP_POW).unwrap(); + } + + // Make a note of this header as we will rewind back to here later. + let rewind_to = head.clone(); + + let key_id_coinbase = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier(); + let key_id30 = ExtKeychainPath::new(1, 30, 0, 0, 0).to_identifier(); + + let tx1 = build::transaction( + KernelFeatures::Plain { fee: 20000 }, + vec![ + build::coinbase_input(consensus::REWARD, key_id_coinbase.clone()), + build::output(consensus::REWARD - 20000, key_id30.clone()), + ], + &kc, + &pb, + ) + .unwrap(); + + let b = prepare_block_tx(&kc, &head, &chain, 6, vec![&tx1]); + head = b.header.clone(); + chain + .process_block(b.clone(), chain::Options::SKIP_POW) + .unwrap(); + chain.validate(false).unwrap(); + + // Now mine another block, reusing the private key for the coinbase we just spent. + { + let b = prepare_block_key_idx(&kc, &head, &chain, 7, 1); + chain.process_block(b, chain::Options::SKIP_POW).unwrap(); + } + + // Now mine a competing block also spending the same coinbase output from earlier. + // Rewind back prior to the tx that spends it to "unspend" it. + { + let b = prepare_block_tx(&kc, &rewind_to, &chain, 6, vec![&tx1]); + chain + .process_block(b.clone(), chain::Options::SKIP_POW) + .unwrap(); + chain.validate(false).unwrap(); + } + } + + clean_output_dir(".grin_spend_rewind_spend"); +} + #[test] fn spend_in_fork_and_compact() { clean_output_dir(".grin6"); @@ -731,15 +809,31 @@ fn output_header_mappings() { clean_output_dir(".grin_header_for_output"); } +// Use diff as both diff *and* key_idx for convenience (deterministic private key for test blocks) fn prepare_block(kc: &K, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block where K: Keychain, { - let mut b = prepare_block_nosum(kc, prev, diff, vec![]); + let key_idx = diff as u32; + prepare_block_key_idx(kc, prev, chain, diff, key_idx) +} + +fn prepare_block_key_idx( + kc: &K, + prev: &BlockHeader, + chain: &Chain, + diff: u64, + key_idx: u32, +) -> Block +where + K: Keychain, +{ + let mut b = prepare_block_nosum(kc, prev, diff, key_idx, vec![]); chain.set_txhashset_roots(&mut b).unwrap(); b } +// Use diff as both diff *and* key_idx for convenience (deterministic private key for test blocks) fn prepare_block_tx( kc: &K, prev: &BlockHeader, @@ -750,17 +844,38 @@ fn prepare_block_tx( where K: Keychain, { - let mut b = prepare_block_nosum(kc, prev, diff, txs); + let key_idx = diff as u32; + prepare_block_tx_key_idx(kc, prev, chain, diff, key_idx, txs) +} + +fn prepare_block_tx_key_idx( + kc: &K, + prev: &BlockHeader, + chain: &Chain, + diff: u64, + key_idx: u32, + txs: Vec<&Transaction>, +) -> Block +where + K: Keychain, +{ + let mut b = prepare_block_nosum(kc, prev, diff, key_idx, txs); chain.set_txhashset_roots(&mut b).unwrap(); b } -fn prepare_block_nosum(kc: &K, prev: &BlockHeader, diff: u64, txs: Vec<&Transaction>) -> Block +fn prepare_block_nosum( + kc: &K, + prev: &BlockHeader, + diff: u64, + key_idx: u32, + txs: Vec<&Transaction>, +) -> Block where K: Keychain, { let proof_size = global::proofsize(); - let key_id = ExtKeychainPath::new(1, diff as u32, 0, 0, 0).to_identifier(); + let key_id = ExtKeychainPath::new(1, key_idx, 0, 0, 0).to_identifier(); let fees = txs.iter().map(|tx| tx.fee()).sum(); let reward = From 9fcfb17d527dee5fa3b1984452644a10c3eb6c88 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Tue, 18 Feb 2020 17:20:52 +0000 Subject: [PATCH 6/9] commit --- chain/src/txhashset/txhashset.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 020474ac57..d4f1a434c4 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -1139,16 +1139,13 @@ impl<'a> Extension<'a> { Ok(()) } - // TODO - We need to "undo" the output_pos index changes here for each block - // that we rewind. - // If an output becomes unspent we must ensure it is back in the output_pos index - // with the correct pos. - // This is crucial for output commitments that have been reused. + // Rewind the MMRs, the bitmap accumulator and the output_pos index. fn rewind_single_block( &mut self, header: &BlockHeader, batch: &Batch<'_>, ) -> Result<(), Error> { + // The spent index allows us to conveniently "unspend" everything in a block. let spent = batch.get_spent_index(&header.hash()); let spent_pos: Vec<_> = if let Ok(ref spent) = spent { @@ -1177,6 +1174,12 @@ impl<'a> Extension<'a> { affected_pos.push(self.output_pmmr.last_pos); self.apply_to_bitmap_accumulator(&affected_pos)?; + // Remove any entries from the output_pos created by the block being rewound. + let block = batch.get_block(&header.hash())?; + for out in block.outputs() { + batch.delete_output_pos_height(&out.commitment())?; + } + // Update output_pos based on "unspending" all spent pos from this block. // This is necessary to ensure the output_pos index correclty reflects a // reused output commitment. For example an output at pos 1, spent, reused at pos 2. From 00edeaa91c78e4130cc76948f8c1e3012eed44c1 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Wed, 19 Feb 2020 13:29:05 +0000 Subject: [PATCH 7/9] do not store commitments in spent_index just use the order of the inputs in the block --- chain/src/chain.rs | 4 +-- chain/src/pipe.rs | 6 ++--- chain/src/store.rs | 6 ++--- chain/src/txhashset/txhashset.rs | 44 ++++++++++++-------------------- chain/src/types.rs | 23 +++++++++-------- 5 files changed, 38 insertions(+), 45 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 125246cc1f..0c2f38fd1b 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -30,7 +30,7 @@ use crate::store; use crate::txhashset; use crate::txhashset::{PMMRHandle, TxHashSet}; use crate::types::{ - BlockStatus, ChainAdapter, NoStatus, Options, OutputPos, Tip, TxHashsetWriteStatus, + BlockStatus, ChainAdapter, CommitPos, NoStatus, Options, Tip, TxHashsetWriteStatus, }; use crate::util::secp::pedersen::{Commitment, RangeProof}; use crate::util::RwLock; @@ -504,7 +504,7 @@ impl Chain { /// spent. This querying is done in a way that is consistent with the /// current chain state, specifically the current winning (valid, most /// work) fork. - pub fn is_unspent(&self, output_ref: &OutputIdentifier) -> Result { + pub fn is_unspent(&self, output_ref: &OutputIdentifier) -> Result { self.txhashset.read().is_unspent(output_ref) } diff --git a/chain/src/pipe.rs b/chain/src/pipe.rs index b8351a2701..2161dd71c5 100644 --- a/chain/src/pipe.rs +++ b/chain/src/pipe.rs @@ -23,7 +23,7 @@ use crate::core::pow; use crate::error::{Error, ErrorKind}; use crate::store; use crate::txhashset; -use crate::types::{Options, OutputPos, Tip}; +use crate::types::{CommitPos, Options, Tip}; use crate::util::RwLock; use grin_store; use std::sync::Arc; @@ -431,7 +431,7 @@ fn apply_block_to_txhashset( block: &Block, ext: &mut txhashset::ExtensionPair<'_>, batch: &store::Batch<'_>, -) -> Result, Error> { +) -> Result, Error> { let spent = ext.extension.apply_block(block, batch)?; ext.extension.validate_roots(&block.header)?; ext.extension.validate_sizes(&block.header)?; @@ -444,7 +444,7 @@ fn apply_block_to_txhashset( fn add_block( b: &Block, block_sums: &BlockSums, - spent: &Vec, + spent: &Vec, batch: &store::Batch<'_>, ) -> Result<(), Error> { batch.save_block(b)?; diff --git a/chain/src/store.rs b/chain/src/store.rs index 9abe73e7e5..3ea7c94c2e 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -19,7 +19,7 @@ use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::{Block, BlockHeader, BlockSums}; use crate::core::pow::Difficulty; use crate::core::ser::ProtocolVersion; -use crate::types::{OutputPos, Tip}; +use crate::types::{CommitPos, Tip}; use crate::util::secp::pedersen::Commitment; use croaring::Bitmap; use grin_store as store; @@ -190,7 +190,7 @@ impl<'a> Batch<'a> { /// We maintain a "spent" index for each full block to allow the output_pos /// to be easily reverted during rewind. - pub fn save_spent_index(&self, h: &Hash, spent: &Vec) -> Result<(), Error> { + pub fn save_spent_index(&self, h: &Hash, spent: &Vec) -> Result<(), Error> { self.db .put_ser(&to_key(BLOCK_SPENT_PREFIX, &mut h.to_vec())[..], spent)?; Ok(()) @@ -349,7 +349,7 @@ impl<'a> Batch<'a> { /// Get the "spent index" from the db for the specified block. /// If we need to rewind a block then we use this to "unspend" the spent outputs. - pub fn get_spent_index(&self, bh: &Hash) -> Result, Error> { + pub fn get_spent_index(&self, bh: &Hash) -> Result, Error> { option_to_not_found( self.db .get_ser(&to_key(BLOCK_SPENT_PREFIX, &mut bh.to_vec())), diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index d4f1a434c4..140d3f3ce8 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -25,7 +25,7 @@ use crate::error::{Error, ErrorKind}; use crate::store::{Batch, ChainStore}; use crate::txhashset::bitmap_accumulator::BitmapAccumulator; use crate::txhashset::{RewindableKernelView, UTXOView}; -use crate::types::{OutputPos, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus}; +use crate::types::{CommitPos, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus}; use crate::util::secp::pedersen::{Commitment, RangeProof}; use crate::util::{file, secp_static, zip}; use croaring::Bitmap; @@ -223,7 +223,7 @@ impl TxHashSet { /// Check if an output is unspent. /// We look in the index to find the output MMR pos. /// Then we check the entry in the output MMR and confirm the hash matches. - pub fn is_unspent(&self, output_id: &OutputIdentifier) -> Result { + pub fn is_unspent(&self, output_id: &OutputIdentifier) -> Result { let commit = output_id.commit; match self.commit_index.get_output_pos_height(&commit) { Ok((pos, height)) => { @@ -231,11 +231,7 @@ impl TxHashSet { ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); if let Some(hash) = output_pmmr.get_hash(pos) { if hash == output_id.hash_with_index(pos - 1) { - Ok(OutputPos { - pos, - height, - commit, - }) + Ok(CommitPos { pos, height }) } else { Err(ErrorKind::TxHashSetErr("txhashset hash mismatch".to_string()).into()) } @@ -927,27 +923,28 @@ impl<'a> Extension<'a> { } /// Apply a new block to the current txhashet extension (output, rangeproof, kernel MMRs). - pub fn apply_block(&mut self, b: &Block, batch: &Batch<'_>) -> Result, Error> { + /// Returns a vec of commit_pos representing the pos and height of the outputs spent + /// by this block. + pub fn apply_block(&mut self, b: &Block, batch: &Batch<'_>) -> Result, Error> { let mut affected_pos = vec![]; let mut spent = vec![]; + // Apply the output to the output and rangeproof MMRs. + // Add pos to affected_pos to update the accumulator later on. + // Add the new output to the output_pos index. for out in b.outputs() { let pos = self.apply_output(out, batch)?; affected_pos.push(pos); - - // TODO - we do *not* want to actually add to batch here? - // if we are processing a fork we want this batch to rollback. batch.save_output_pos_height(&out.commitment(), pos, b.header.height)?; } + // Remove the output from the output and rangeproof MMRs. + // Add spent_pos to affected_pos to update the accumulator later on. + // Remove the spent output from the output_pos index. for input in b.inputs() { let spent_pos = self.apply_input(input, batch)?; affected_pos.push(spent_pos.pos); - - // TODO - we do *not* want to actually add to batch here? - // if processing a fork we want this batch to rollback. - batch.delete_output_pos_height(&spent_pos.commit)?; - + batch.delete_output_pos_height(&input.commitment())?; spent.push(spent_pos); } @@ -965,9 +962,6 @@ impl<'a> Extension<'a> { } fn apply_to_bitmap_accumulator(&mut self, output_pos: &[u64]) -> Result<(), Error> { - // if self.output_pmmr.is_empty() || output_pos.is_empty() { - // return Ok(()); - // } let mut output_idx: Vec<_> = output_pos .iter() .map(|x| pmmr::n_leaves(*x).saturating_sub(1)) @@ -983,7 +977,7 @@ impl<'a> Extension<'a> { ) } - fn apply_input(&mut self, input: &Input, batch: &Batch<'_>) -> Result { + fn apply_input(&mut self, input: &Input, batch: &Batch<'_>) -> Result { let commit = input.commitment(); if let Ok((pos, height)) = batch.get_output_pos_height(&commit) { // First check this input corresponds to an existing entry in the output MMR. @@ -1003,11 +997,7 @@ impl<'a> Extension<'a> { self.rproof_pmmr .prune(pos) .map_err(ErrorKind::TxHashSetErr)?; - Ok(OutputPos { - pos, - height, - commit, - }) + Ok(CommitPos { pos, height }) } Ok(false) => Err(ErrorKind::AlreadySpent(commit).into()), Err(e) => Err(ErrorKind::TxHashSetErr(e).into()), @@ -1185,8 +1175,8 @@ impl<'a> Extension<'a> { // reused output commitment. For example an output at pos 1, spent, reused at pos 2. // The output_pos index should be updated to reflect the old pos 1 when unspent. if let Ok(spent) = spent { - for x in spent { - batch.save_output_pos_height(&x.commit, x.pos, x.height)?; + for (x, y) in block.inputs().into_iter().zip(spent) { + batch.save_output_pos_height(&x.commitment(), y.pos, y.height)?; } } diff --git a/chain/src/types.rs b/chain/src/types.rs index 96f3d6d699..be2d612bed 100644 --- a/chain/src/types.rs +++ b/chain/src/types.rs @@ -271,24 +271,27 @@ pub struct OutputPos { pub commit: Commitment, } -impl Readable for OutputPos { - fn read(reader: &mut dyn Reader) -> Result { +/// Minimal struct representing a known MMR position and associated block height. +#[derive(Debug)] +pub struct CommitPos { + /// MMR position + pub pos: u64, + /// Block height + pub height: u64, +} + +impl Readable for CommitPos { + fn read(reader: &mut dyn Reader) -> Result { let pos = reader.read_u64()?; let height = reader.read_u64()?; - let commit = Commitment::read(reader)?; - Ok(OutputPos { - pos, - height, - commit, - }) + Ok(CommitPos { pos, height }) } } -impl Writeable for OutputPos { +impl Writeable for CommitPos { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { writer.write_u64(self.pos)?; writer.write_u64(self.height)?; - self.commit.write(writer)?; Ok(()) } } From 5ccf710f3210f8917989e10662881380676ba195 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 22 Feb 2020 10:04:27 +0000 Subject: [PATCH 8/9] compare key with commitment when cleaning output_pos index --- chain/src/store.rs | 8 ++++++++ chain/src/txhashset/txhashset.rs | 7 +++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/chain/src/store.rs b/chain/src/store.rs index 3ea7c94c2e..2dfe79bfc1 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -258,6 +258,14 @@ impl<'a> Batch<'a> { .delete(&to_key(OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec())) } + /// When using the output_pos iterator we have access to the index keys but not the + /// original commitment that the key is constructed from. So we need a way of comparing + /// a key with another commitment without reconstructing the commitment from the key bytes. + pub fn is_match_output_pos_key(&self, key: &[u8], commit: &Commitment) -> bool { + let commit_key = to_key(OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec()); + commit_key == key + } + /// Iterator over the output_pos index. pub fn output_pos_iter(&self) -> Result, Error> { let key = to_key(OUTPUT_POS_PREFIX, &mut "".to_string().into_bytes()); diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 140d3f3ce8..3d3332cb53 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -395,12 +395,15 @@ impl TxHashSet { ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); // Iterate over the current output_pos index, removing any entries that - // do not point to to the correct output. + // do not point to to the expected output. let mut removed_count = 0; for (key, (pos, _)) in batch.output_pos_iter()? { if let Some(out) = output_pmmr.get_data(pos) { if let Ok(pos_via_mmr) = batch.get_output_pos(&out.commitment()) { - if pos == pos_via_mmr { + // If the pos matches and the index key matches the commitment + // then keep the entry, other we want to clean it up. + if pos == pos_via_mmr && batch.is_match_output_pos_key(&key, &out.commitment()) + { continue; } } From caab410cb1f811ba4941f3678a936b102e9218e1 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 22 Feb 2020 10:08:00 +0000 Subject: [PATCH 9/9] remove unused OutputPos struct --- chain/src/types.rs | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/chain/src/types.rs b/chain/src/types.rs index be2d612bed..f1ff4e7145 100644 --- a/chain/src/types.rs +++ b/chain/src/types.rs @@ -259,18 +259,6 @@ impl OutputRoots { } } -/// Minimal struct representing an output commitment at a known position (and block height) -/// in an MMR. -#[derive(Debug)] -pub struct OutputPos { - /// MMR position - pub pos: u64, - /// Block height - pub height: u64, - /// Output commitment - pub commit: Commitment, -} - /// Minimal struct representing a known MMR position and associated block height. #[derive(Debug)] pub struct CommitPos {