Skip to content

Commit

Permalink
feat(consensus)!: add tari script byte size limit check to validation (
Browse files Browse the repository at this point in the history
…#3640)

Description
---
- Adds consensus rules for maximum script byte size

Breaking changes:
If an existing script is larger than 2048, that block/output will fail to validate on upgraded nodes. I have not explicitly checked this but the largest scripts used currently on testnet are one-sided payments and they are well within the limit.

Motivation and Context
---
See [@CjS77 comment](tari-project/tari-crypto#65 (comment))

TODO: Ideally, in order to not negatively impact validation performance, we should not have to serialise the script again to gain its byte size.

How Has This Been Tested?
---
New unit test, basic manual test
  • Loading branch information
sdbondi committed Dec 6, 2021
1 parent e59d194 commit 53a5174
Show file tree
Hide file tree
Showing 14 changed files with 125 additions and 19 deletions.
2 changes: 1 addition & 1 deletion applications/tari_base_node/src/builder.rs
Expand Up @@ -221,7 +221,7 @@ async fn build_node_context(
let factories = CryptoFactories::default();
let randomx_factory = RandomXFactory::new(config.max_randomx_vms);
let validators = Validators::new(
BodyOnlyValidator::default(),
BodyOnlyValidator::new(rules.clone()),
HeaderValidator::new(rules.clone()),
OrphanBlockValidator::new(
rules.clone(),
Expand Down
2 changes: 1 addition & 1 deletion applications/tari_base_node/src/recovery.rs
Expand Up @@ -98,7 +98,7 @@ pub async fn run_recovery(node_config: &GlobalConfig) -> Result<(), anyhow::Erro
let factories = CryptoFactories::default();
let randomx_factory = RandomXFactory::new(node_config.max_randomx_vms);
let validators = Validators::new(
BodyOnlyValidator::default(),
BodyOnlyValidator::new(rules.clone()),
HeaderValidator::new(rules.clone()),
OrphanBlockValidator::new(
rules.clone(),
Expand Down
Expand Up @@ -70,6 +70,8 @@ pub enum HorizonSyncError {
MerkleMountainRangeError(#[from] MerkleMountainRangeError),
#[error("Connectivity error: {0}")]
ConnectivityError(#[from] ConnectivityError),
#[error("Validation error: {0}")]
ValidationError(#[from] ValidationError),
}

impl From<TryFromIntError> for HorizonSyncError {
Expand Down
Expand Up @@ -43,6 +43,7 @@ use crate::{
transaction_kernel::TransactionKernel,
transaction_output::TransactionOutput,
},
validation::helpers,
};
use croaring::Bitmap;
use futures::{stream::FuturesUnordered, StreamExt};
Expand Down Expand Up @@ -374,6 +375,11 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> {

let mut output_mmr = MerkleMountainRange::<HashDigest, _>::new(output_pruned_set);
let mut witness_mmr = MerkleMountainRange::<HashDigest, _>::new(witness_pruned_set);
let mut constants = self
.shared
.consensus_rules
.consensus_constants(current_header.height())
.clone();

while let Some(response) = output_stream.next().await {
let res: SyncUtxosResponse = response?;
Expand Down Expand Up @@ -401,6 +407,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> {
);
height_utxo_counter += 1;
let output = TransactionOutput::try_from(output).map_err(HorizonSyncError::ConversionError)?;
helpers::check_tari_script_byte_size(&output.script, constants.get_max_script_byte_size())?;
unpruned_outputs.push(output.clone());

output_mmr.push(output.hash())?;
Expand Down Expand Up @@ -535,6 +542,11 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> {
break;
} else {
current_header = db.fetch_chain_header(current_header.height() + 1).await?;
constants = self
.shared
.consensus_rules
.consensus_constants(current_header.height())
.clone();
debug!(
target: LOG_TARGET,
"Expecting to receive the next UTXO set {}-{} for header #{}",
Expand Down
19 changes: 19 additions & 0 deletions base_layer/core/src/consensus/consensus_constants.rs
Expand Up @@ -67,6 +67,8 @@ pub struct ConsensusConstants {
faucet_value: MicroTari,
/// Transaction Weight params
transaction_weight: TransactionWeight,
/// Maximum byte size of TariScript
max_script_byte_size: usize,
}

/// This is just a convenience wrapper to put all the info into a hashmap per diff algo
Expand Down Expand Up @@ -167,6 +169,11 @@ impl ConsensusConstants {
self.median_timestamp_count
}

/// The maximum serialized byte size of TariScript
pub fn get_max_script_byte_size(&self) -> usize {
self.max_script_byte_size
}

/// This is the min initial difficulty that can be requested for the pow
pub fn min_pow_difficulty(&self, pow_algo: PowAlgorithm) -> Difficulty {
match self.proof_of_work.get(&pow_algo) {
Expand Down Expand Up @@ -226,6 +233,7 @@ impl ConsensusConstants {
proof_of_work: algos,
faucet_value: (5000 * 4000) * T,
transaction_weight: TransactionWeight::v2(),
max_script_byte_size: 2048,
}]
}

Expand Down Expand Up @@ -260,6 +268,7 @@ impl ConsensusConstants {
proof_of_work: algos,
faucet_value: (5000 * 4000) * T,
transaction_weight: TransactionWeight::v1(),
max_script_byte_size: 2048,
}]
}

Expand Down Expand Up @@ -321,6 +330,7 @@ impl ConsensusConstants {
proof_of_work: algos,
faucet_value: (5000 * 4000) * T,
transaction_weight: TransactionWeight::v1(),
max_script_byte_size: 2048,
},
ConsensusConstants {
effective_from_height: 1400,
Expand All @@ -337,6 +347,7 @@ impl ConsensusConstants {
proof_of_work: algos2,
faucet_value: (5000 * 4000) * T,
transaction_weight: TransactionWeight::v1(),
max_script_byte_size: 2048,
},
]
}
Expand Down Expand Up @@ -371,6 +382,7 @@ impl ConsensusConstants {
proof_of_work: algos,
faucet_value: (5000 * 4000) * T,
transaction_weight: TransactionWeight::v1(),
max_script_byte_size: 2048,
}]
}

Expand Down Expand Up @@ -407,6 +419,7 @@ impl ConsensusConstants {
proof_of_work: algos,
faucet_value: (5000 * 4000) * T,
transaction_weight: TransactionWeight::v2(),
max_script_byte_size: 2048,
}]
}

Expand Down Expand Up @@ -441,6 +454,7 @@ impl ConsensusConstants {
proof_of_work: algos,
faucet_value: MicroTari::from(0),
transaction_weight: TransactionWeight::v2(),
max_script_byte_size: 2048,
}]
}
}
Expand Down Expand Up @@ -478,6 +492,11 @@ impl ConsensusConstantsBuilder {
self
}

pub fn with_max_script_byte_size(mut self, byte_size: usize) -> Self {
self.consensus.max_script_byte_size = byte_size;
self
}

pub fn with_max_block_transaction_weight(mut self, weight: u64) -> Self {
self.consensus.max_block_transaction_weight = weight;
self
Expand Down
2 changes: 1 addition & 1 deletion base_layer/core/src/test_helpers/blockchain.rs
Expand Up @@ -138,7 +138,7 @@ pub fn create_store_with_consensus_and_validators_and_config(
pub fn create_store_with_consensus(rules: ConsensusManager) -> BlockchainDatabase<TempDatabase> {
let factories = CryptoFactories::default();
let validators = Validators::new(
BodyOnlyValidator::default(),
BodyOnlyValidator::new(rules.clone()),
MockValidator::new(true),
OrphanBlockValidator::new(rules.clone(), false, factories),
);
Expand Down
Expand Up @@ -324,6 +324,7 @@ impl<B: BlockchainBackend + 'static> BlockValidator<B> {
.map(|outputs| {
let range_proof_prover = self.factories.range_proof.clone();
let db = self.db.inner().clone();
let max_script_size = self.rules.consensus_constants(height).get_max_script_byte_size();
task::spawn_blocking(move || {
let db = db.db_read_access()?;
let mut aggregate_sender_offset = PublicKey::default();
Expand Down Expand Up @@ -351,6 +352,8 @@ impl<B: BlockchainBackend + 'static> BlockValidator<B> {
aggregate_sender_offset = aggregate_sender_offset + &output.sender_offset_public_key;
}

helpers::check_tari_script_byte_size(&output.script, max_script_size)?;

output.verify_metadata_signature()?;
if !bypass_range_proof_verification {
output.verify_range_proof(&range_proof_prover)?;
Expand Down
18 changes: 15 additions & 3 deletions base_layer/core/src/validation/block_validators/body_only.rs
Expand Up @@ -25,6 +25,7 @@ use crate::{
blocks::ChainBlock,
chain_storage,
chain_storage::BlockchainBackend,
consensus::ConsensusManager,
crypto::tari_utilities::hex::Hex,
validation::{helpers, PostOrphanBodyValidation, ValidationError},
};
Expand All @@ -36,8 +37,15 @@ use tari_common_types::chain_metadata::ChainMetadata;

/// This validator checks whether a block satisfies *all* consensus rules. If a block passes this validator, it is the
/// next block on the blockchain.
#[derive(Default)]
pub struct BodyOnlyValidator;
pub struct BodyOnlyValidator {
rules: ConsensusManager,
}

impl BodyOnlyValidator {
pub fn new(rules: ConsensusManager) -> Self {
Self { rules }
}
}

impl<B: BlockchainBackend> PostOrphanBodyValidation<B> for BodyOnlyValidator {
/// The consensus checks that are done (in order of cheapest to verify to most expensive):
Expand Down Expand Up @@ -66,7 +74,11 @@ impl<B: BlockchainBackend> PostOrphanBodyValidation<B> for BodyOnlyValidator {

let block_id = format!("block #{} ({})", block.header().height, block.hash().to_hex());
helpers::check_inputs_are_utxos(backend, &block.block().body)?;
helpers::check_not_duplicate_txos(backend, &block.block().body)?;
helpers::check_outputs(
backend,
self.rules.consensus_constants(block.height()),
&block.block().body,
)?;
trace!(
target: LOG_TARGET,
"Block validation: All inputs and outputs are valid for {}",
Expand Down
25 changes: 25 additions & 0 deletions base_layer/core/src/validation/block_validators/test.rs
Expand Up @@ -21,6 +21,7 @@
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

use std::sync::Arc;
use tari_crypto::script;

use tari_common::configuration::Network;
use tari_test_utils::unpack_enum;
Expand Down Expand Up @@ -189,3 +190,27 @@ async fn it_checks_txo_sort_order() {
let err = validator.validate_block_body(block.block().clone()).await.unwrap_err();
assert!(matches!(err, ValidationError::UnsortedOrDuplicateOutput));
}

#[tokio::test]
async fn it_limits_the_script_byte_size() {
let rules = ConsensusManager::builder(Network::LocalNet)
.add_consensus_constants(
ConsensusConstantsBuilder::new(Network::LocalNet)
.with_coinbase_lockheight(0)
.with_max_script_byte_size(0)
.build(),
)
.build();
let (mut blockchain, validator) = setup_with_rules(rules);

let (_, coinbase_a) = blockchain.add_next_tip("A", Default::default());

let mut schema1 = txn_schema!(from: vec![coinbase_a], to: vec![50 * T, 12 * T]);
schema1.script = script!(Nop Nop Nop);
let (txs, _) = schema_to_transaction(&[schema1]);
let txs = txs.into_iter().map(|t| Arc::try_unwrap(t).unwrap()).collect::<Vec<_>>();
let (block, _) = blockchain.create_next_tip(BlockSpec::new().with_transactions(txs).finish());

let err = validator.validate_block_body(block.block().clone()).await.unwrap_err();
assert!(matches!(err, ValidationError::TariScriptExceedsMaxSize { .. }));
}
5 changes: 5 additions & 0 deletions base_layer/core/src/validation/error.rs
Expand Up @@ -91,6 +91,11 @@ pub enum ValidationError {
AsyncTaskFailed(#[from] task::JoinError),
#[error("Bad block with hash {hash} found")]
BadBlockFound { hash: String },
#[error("Script exceeded maximum script size, expected less than {max_script_size} but was {actual_script_size}")]
TariScriptExceedsMaxSize {
max_script_size: usize,
actual_script_size: usize,
},
}

// ChainStorageError has a ValidationError variant, so to prevent a cyclic dependency we use a string representation in
Expand Down
33 changes: 30 additions & 3 deletions base_layer/core/src/validation/helpers.rs
Expand Up @@ -23,7 +23,13 @@
use crate::{
blocks::{Block, BlockHeader, BlockHeaderValidationError, BlockValidationError},
chain_storage::{BlockchainBackend, MmrRoots, MmrTree},
consensus::{emission::Emission, ConsensusConstants, ConsensusManager},
consensus::{
emission::Emission,
ConsensusConstants,
ConsensusEncodingSized,
ConsensusEncodingWrapper,
ConsensusManager,
},
crypto::{commitment::HomomorphicCommitmentFactory, tari_utilities::hex::to_hex},
proof_of_work::{
monero_difficulty,
Expand All @@ -48,6 +54,7 @@ use std::cmp::Ordering;
use tari_common_types::types::{Commitment, CommitmentFactory, PublicKey};
use tari_crypto::{
keys::PublicKey as PublicKeyTrait,
script::TariScript,
tari_utilities::{epoch_time::EpochTime, hash::Hashable, hex::Hex},
};

Expand Down Expand Up @@ -400,14 +407,34 @@ pub fn check_input_is_utxo<B: BlockchainBackend>(db: &B, input: &TransactionInpu
Err(ValidationError::UnknownInput)
}

/// This function checks that the outputs do not already exist in the UTxO set.
pub fn check_not_duplicate_txos<B: BlockchainBackend>(db: &B, body: &AggregateBody) -> Result<(), ValidationError> {
/// This function checks:
/// 1. the byte size of TariScript does not exceed the maximum
/// 2. that the outputs do not already exist in the UTxO set.
pub fn check_outputs<B: BlockchainBackend>(
db: &B,
constants: &ConsensusConstants,
body: &AggregateBody,
) -> Result<(), ValidationError> {
let max_script_size = constants.get_max_script_byte_size();
for output in body.outputs() {
check_tari_script_byte_size(&output.script, max_script_size)?;
check_not_duplicate_txo(db, output)?;
}
Ok(())
}

/// Checks the byte size of TariScript is less than or equal to the given size, otherwise returns an error.
pub fn check_tari_script_byte_size(script: &TariScript, max_script_size: usize) -> Result<(), ValidationError> {
let script_size = ConsensusEncodingWrapper::wrap(script).consensus_encode_exact_size();
if script_size > max_script_size {
return Err(ValidationError::TariScriptExceedsMaxSize {
max_script_size,
actual_script_size: script_size,
});
}
Ok(())
}

/// This function checks that the outputs do not already exist in the UTxO set.
pub fn check_not_duplicate_txo<B: BlockchainBackend>(
db: &B,
Expand Down
5 changes: 3 additions & 2 deletions base_layer/core/src/validation/transaction_validators.rs
Expand Up @@ -26,7 +26,7 @@ use crate::{
chain_storage::{BlockchainBackend, BlockchainDatabase},
transactions::{transaction_entities::Transaction, CryptoFactories},
validation::{
helpers::{check_inputs_are_utxos, check_not_duplicate_txos},
helpers::{check_inputs_are_utxos, check_outputs},
MempoolTransactionValidation,
ValidationError,
},
Expand Down Expand Up @@ -117,9 +117,10 @@ impl<B: BlockchainBackend> TxInputAndMaturityValidator<B> {

impl<B: BlockchainBackend> MempoolTransactionValidation for TxInputAndMaturityValidator<B> {
fn validate(&self, tx: &Transaction) -> Result<(), ValidationError> {
let constants = self.db.consensus_constants()?;
let db = self.db.db_read_access()?;
check_inputs_are_utxos(&*db, tx.body())?;
check_not_duplicate_txos(&*db, tx.body())?;
check_outputs(&*db, constants, tx.body())?;

let tip_height = db.fetch_chain_metadata()?.height_of_longest_chain();
verify_timelocks(tx, tip_height)?;
Expand Down
12 changes: 6 additions & 6 deletions base_layer/core/tests/block_validation.rs
Expand Up @@ -81,7 +81,7 @@ fn test_genesis_block() {
let rules = ConsensusManager::builder(network).build();
let backend = create_test_db();
let validators = Validators::new(
BodyOnlyValidator::default(),
BodyOnlyValidator::new(rules.clone()),
HeaderValidator::new(rules.clone()),
OrphanBlockValidator::new(rules.clone(), false, factories),
);
Expand Down Expand Up @@ -268,7 +268,7 @@ fn test_orphan_validator() {
let backend = create_test_db();
let orphan_validator = OrphanBlockValidator::new(rules.clone(), false, factories.clone());
let validators = Validators::new(
BodyOnlyValidator::default(),
BodyOnlyValidator::new(rules.clone()),
HeaderValidator::new(rules.clone()),
orphan_validator.clone(),
);
Expand Down Expand Up @@ -385,10 +385,10 @@ fn test_orphan_body_validation() {
.with_block(genesis.clone())
.build();
let backend = create_test_db();
let body_only_validator = BodyOnlyValidator::default();
let body_only_validator = BodyOnlyValidator::new(rules.clone());
let header_validator = HeaderValidator::new(rules.clone());
let validators = Validators::new(
BodyOnlyValidator::default(),
BodyOnlyValidator::new(rules.clone()),
HeaderValidator::new(rules.clone()),
OrphanBlockValidator::new(rules.clone(), false, factories.clone()),
);
Expand Down Expand Up @@ -584,7 +584,7 @@ fn test_header_validation() {
let backend = create_test_db();
let header_validator = HeaderValidator::new(rules.clone());
let validators = Validators::new(
BodyOnlyValidator::default(),
BodyOnlyValidator::new(rules.clone()),
HeaderValidator::new(rules.clone()),
OrphanBlockValidator::new(rules.clone(), false, factories.clone()),
);
Expand Down Expand Up @@ -693,7 +693,7 @@ async fn test_block_sync_body_validator() {
let backend = create_test_db();

let validators = Validators::new(
BodyOnlyValidator::default(),
BodyOnlyValidator::new(rules.clone()),
HeaderValidator::new(rules.clone()),
OrphanBlockValidator::new(rules.clone(), false, factories.clone()),
);
Expand Down

0 comments on commit 53a5174

Please sign in to comment.