Skip to content

Commit

Permalink
feat: add zero conf tx (#3043)
Browse files Browse the repository at this point in the history
  • Loading branch information
stringhandler committed Jul 5, 2021
2 parents 10ea7e3 + ed70ecf commit 742dd9e
Show file tree
Hide file tree
Showing 14 changed files with 970 additions and 151 deletions.
21 changes: 14 additions & 7 deletions base_layer/core/src/chain_storage/blockchain_database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -940,13 +940,19 @@ pub fn calculate_mmr_roots<T: BlockchainBackend>(db: &T, block: &Block) -> Resul
}

for input in body.inputs().iter() {
let index = db
.fetch_mmr_leaf_index(MmrTree::Utxo, &input.output_hash())?
.ok_or_else(|| ChainStorageError::ValueNotFound {
entity: "UTXO".to_string(),
field: "hash".to_string(),
value: input.output_hash().to_hex(),
})?;
// Search the DB for the output leaf index so that it can be marked as spent/deleted.
// If the output hash is not found, check the current output_mmr. This allows zero-conf transactions
let index =
match db.fetch_mmr_leaf_index(MmrTree::Utxo, &input.output_hash())? {
Some(index) => index,
None => output_mmr.find_leaf_index(&input.output_hash())?.ok_or_else(|| {
ChainStorageError::ValueNotFound {
entity: "UTXO".to_string(),
field: "hash".to_string(),
value: input.output_hash().to_hex(),
}
})?,
};
input_mmr.push(input.hash())?;

if !output_mmr.delete(index) {
Expand All @@ -965,6 +971,7 @@ pub fn calculate_mmr_roots<T: BlockchainBackend>(db: &T, block: &Block) -> Resul
kernel_mmr_size: kernel_mmr.get_leaf_count()? as u64,
input_mr: input_mmr.get_merkle_root()?,
output_mr: output_mmr.get_merkle_root()?,
// witness mmr size and output mmr size should be the same size
output_mmr_size: witness_mmr.get_leaf_count()? as u64,
witness_mr: witness_mmr.get_merkle_root()?,
};
Expand Down
2 changes: 1 addition & 1 deletion base_layer/core/src/mempool/mempool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ impl Mempool {
/// Only transactions that fit into a block will be returned
pub fn retrieve(&self, total_weight: u64) -> Result<Vec<Arc<Transaction>>, MempoolError> {
self.pool_storage
.read()
.write()
.map_err(|e| MempoolError::BackendError(e.to_string()))?
.retrieve(total_weight)
}
Expand Down
21 changes: 14 additions & 7 deletions base_layer/core/src/mempool/mempool_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,12 +73,17 @@ impl MempoolStorage {
);
match self.validator.validate(&tx) {
Ok(()) => {
self.unconfirmed_pool.insert(tx)?;
self.unconfirmed_pool.insert(tx, None)?;
Ok(TxStorageResponse::UnconfirmedPool)
},
Err(ValidationError::UnknownInputs) => {
warn!(target: LOG_TARGET, "Validation failed due to unknown inputs");
Ok(TxStorageResponse::NotStoredOrphan)
Err(ValidationError::UnknownInputs(dependent_outputs)) => {
if self.unconfirmed_pool.verify_outputs_exist(&dependent_outputs) {
self.unconfirmed_pool.insert(tx, Some(dependent_outputs))?;
Ok(TxStorageResponse::UnconfirmedPool)
} else {
warn!(target: LOG_TARGET, "Validation failed due to unknown inputs");
Ok(TxStorageResponse::NotStoredOrphan)
}
},
Err(ValidationError::ContainsSTxO) => {
warn!(target: LOG_TARGET, "Validation failed due to already spent output");
Expand Down Expand Up @@ -109,7 +114,7 @@ impl MempoolStorage {
// Move published txs to ReOrgPool and discard double spends
self.reorg_pool.insert_txs(
self.unconfirmed_pool
.remove_published_and_discard_double_spends(&published_block),
.remove_published_and_discard_deprecated_transactions(&published_block),
)?;

Ok(())
Expand Down Expand Up @@ -191,8 +196,10 @@ impl MempoolStorage {

/// Returns a list of transaction ranked by transaction priority up to a given weight.
/// Will only return transactions that will fit into a block
pub fn retrieve(&self, total_weight: u64) -> Result<Vec<Arc<Transaction>>, MempoolError> {
Ok(self.unconfirmed_pool.highest_priority_txs(total_weight)?)
pub fn retrieve(&mut self, total_weight: u64) -> Result<Vec<Arc<Transaction>>, MempoolError> {
let results = self.unconfirmed_pool.highest_priority_txs(total_weight)?;
self.insert_txs(results.transactions_to_insert)?;
Ok(results.retrieved_transactions)
}

/// Check if the specified transaction is stored in the Mempool.
Expand Down
23 changes: 17 additions & 6 deletions base_layer/core/src/mempool/priority/prioritized_transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,11 @@
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

use crate::{mempool::priority::PriorityError, transactions::transaction::Transaction};
use std::{convert::TryFrom, sync::Arc};
use crate::{
mempool::priority::PriorityError,
transactions::{transaction::Transaction, types::HashOutput},
};
use std::sync::Arc;
use tari_crypto::tari_utilities::message_format::MessageFormat;

/// Create a unique unspent transaction priority based on the transaction fee, maturity of the oldest input UTXO and the
Expand Down Expand Up @@ -54,20 +57,28 @@ impl Clone for FeePriority {
}

/// A prioritized transaction includes a transaction and the calculated priority of the transaction.
#[derive(Clone)]
pub struct PrioritizedTransaction {
pub transaction: Arc<Transaction>,
pub priority: FeePriority,
pub weight: u64,
pub depended_output_hashes: Vec<HashOutput>,
}

impl TryFrom<Transaction> for PrioritizedTransaction {
type Error = PriorityError;

fn try_from(transaction: Transaction) -> Result<Self, Self::Error> {
impl PrioritizedTransaction {
pub fn convert_from_transaction(
transaction: Transaction,
dependent_outputs: Option<Vec<HashOutput>>,
) -> Result<PrioritizedTransaction, PriorityError> {
let depended_output_hashes = match dependent_outputs {
Some(v) => v,
None => Vec::new(),
};
Ok(Self {
priority: FeePriority::try_from(&transaction)?,
weight: transaction.calculate_weight(),
transaction: Arc::new(transaction),
depended_output_hashes,
})
}
}
Loading

0 comments on commit 742dd9e

Please sign in to comment.