diff --git a/src/burnchains/mod.rs b/src/burnchains/mod.rs index c8c41a7450b..51508971990 100644 --- a/src/burnchains/mod.rs +++ b/src/burnchains/mod.rs @@ -308,6 +308,10 @@ impl PoxConstants { PoxConstants::new(10, 5, 3, 25) } + pub fn reward_slots(&self) -> u32 { + self.reward_cycle_length + } + pub fn mainnet_default() -> PoxConstants { PoxConstants::new(1000, 240, 192, 25) } diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index ca9e6ba68df..a65bae045f8 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -1,5 +1,5 @@ use std::collections::VecDeque; -use std::convert::TryInto; +use std::convert::{TryFrom, TryInto}; use std::time::Duration; use burnchains::{ @@ -12,13 +12,18 @@ use chainstate::burn::{ BlockHeaderHash, BlockSnapshot, ConsensusHash, }; use chainstate::stacks::{ + boot::STACKS_BOOT_CODE_CONTRACT_ADDRESS, db::{ClarityTx, StacksChainState, StacksHeaderInfo}, events::StacksTransactionReceipt, Error as ChainstateError, StacksAddress, StacksBlock, StacksBlockHeader, StacksBlockId, }; use monitoring::increment_stx_blocks_processed_counter; use util::db::Error as DBError; -use vm::{costs::ExecutionCost, types::PrincipalData}; +use vm::{ + costs::ExecutionCost, + types::{PrincipalData, QualifiedContractIdentifier}, + Value, +}; pub mod comm; use chainstate::stacks::index::MarfTrieId; @@ -161,10 +166,26 @@ impl RewardSetProvider for OnChainRewardSetProvider { sortdb: &SortitionDB, block_id: &StacksBlockId, ) -> Result, Error> { - let res = + let registered_addrs = chainstate.get_reward_addresses(burnchain, sortdb, current_burn_height, block_id)?; - let addresses = res.iter().map(|a| a.0).collect::>(); - Ok(addresses) + + let liquid_ustx = StacksChainState::get_stacks_block_header_info_by_index_block_hash( + chainstate.headers_db(), + block_id, + )? + .expect("CORRUPTION: Failed to look up block header info for PoX anchor block") + .total_liquid_ustx; + + let threshold = StacksChainState::get_reward_threshold( + &burnchain.pox_constants, + ®istered_addrs, + liquid_ustx, + ); + + Ok(StacksChainState::make_reward_set( + threshold, + registered_addrs, + )) } } @@ -196,7 +217,32 @@ impl<'a, T: BlockEventDispatcher> stacks_chain_id, chain_state_path, initial_balances, - boot_block_exec, + |clarity_tx| { + let burnchain = burnchain.clone(); + let contract = QualifiedContractIdentifier::parse(&format!( + "{}.pox", + STACKS_BOOT_CODE_CONTRACT_ADDRESS + )) + .expect("Failed to construct boot code contract address"); + let sender = PrincipalData::from(contract.clone()); + + clarity_tx.connection().as_transaction(|conn| { + conn.run_contract_call( + &sender, + &contract, + "set-burnchain-parameters", + &[ + Value::UInt(burnchain.first_block_height as u128), + Value::UInt(burnchain.pox_constants.prepare_length as u128), + Value::UInt(burnchain.pox_constants.reward_cycle_length as u128), + Value::UInt(burnchain.pox_constants.pox_rejection_fraction as u128), + ], + |_, _| false, + ) + .expect("Failed to set burnchain parameters in PoX contract"); + }); + boot_block_exec(clarity_tx) + }, block_limit, ) .unwrap(); diff --git a/src/chainstate/stacks/boot/mod.rs b/src/chainstate/stacks/boot/mod.rs index 4e0e357787f..5fc86595661 100644 --- a/src/chainstate/stacks/boot/mod.rs +++ b/src/chainstate/stacks/boot/mod.rs @@ -24,9 +24,10 @@ use chainstate::stacks::StacksBlockHeader; use address::AddressHashMode; use burnchains::bitcoin::address::BitcoinAddress; -use burnchains::Address; +use burnchains::{Address, PoxConstants}; use chainstate::burn::db::sortdb::SortitionDB; +use core::{POX_MAXIMAL_SCALING, POX_THRESHOLD_STEPS_USTX}; use vm::types::{ PrincipalData, QualifiedContractIdentifier, SequenceData, StandardPrincipalData, TupleData, @@ -42,6 +43,7 @@ use vm::representations::ContractName; use util::hash::Hash160; use std::boxed::Box; +use std::cmp; use std::convert::TryFrom; use std::convert::TryInto; @@ -183,6 +185,74 @@ impl StacksChainState { .map(|value| value.expect_bool()) } + /// Given a threshold and set of registered addresses, return a reward set where + /// every entry address has stacked more than the threshold, and addresses + /// are repeated floor(stacked_amt / threshold) times. + /// If an address appears in `addresses` multiple times, then the address's associated amounts + /// are summed. + pub fn make_reward_set( + threshold: u128, + mut addresses: Vec<(StacksAddress, u128)>, + ) -> Vec { + let mut reward_set = vec![]; + // the way that we sum addresses relies on sorting. + addresses.sort_by_key(|k| k.0.bytes.0); + while let Some((address, mut stacked_amt)) = addresses.pop() { + // peak at the next address in the set, and see if we need to sum + while addresses.last().map(|x| &x.0) == Some(&address) { + let (_, additional_amt) = addresses + .pop() + .expect("BUG: first() returned some, but pop() is none."); + stacked_amt = stacked_amt + .checked_add(additional_amt) + .expect("CORRUPTION: Stacker stacked > u128 max amount"); + } + let slots_taken = u32::try_from(stacked_amt / threshold) + .expect("CORRUPTION: Stacker claimed > u32::max() reward slots"); + info!( + "Slots taken by {} = {}, on stacked_amt = {}", + &address, slots_taken, stacked_amt + ); + for _i in 0..slots_taken { + reward_set.push(address.clone()); + } + } + reward_set + } + + pub fn get_reward_threshold( + pox_settings: &PoxConstants, + addresses: &[(StacksAddress, u128)], + liquid_ustx: u128, + ) -> u128 { + let participation = addresses + .iter() + .fold(0, |agg, (_, stacked_amt)| agg + stacked_amt); + + assert!( + participation <= liquid_ustx, + "CORRUPTION: More stacking participation than liquid STX" + ); + + // set the lower limit on reward scaling at 25% of liquid_ustx + // (i.e., liquid_ustx / POX_MAXIMAL_SCALING) + let scale_by = cmp::max(participation, liquid_ustx / POX_MAXIMAL_SCALING as u128); + + let reward_slots = pox_settings.reward_slots() as u128; + let threshold_precise = scale_by / reward_slots; + // compute the threshold as nearest 10k > threshold_precise + let ceil_amount = match threshold_precise % POX_THRESHOLD_STEPS_USTX { + 0 => 0, + remainder => POX_THRESHOLD_STEPS_USTX - remainder, + }; + let threshold = threshold_precise + ceil_amount; + info!( + "PoX participation threshold is {}, from {}", + threshold, threshold_precise + ); + threshold + } + /// Each address will have at least (get-stacking-minimum) tokens. pub fn get_reward_addresses( &mut self, @@ -255,8 +325,6 @@ impl StacksChainState { ret.push((StacksAddress::new(version, hash), total_ustx)); } - ret.sort_by_key(|k| k.0.bytes.0); - Ok(ret) } } @@ -285,6 +353,7 @@ pub mod test { use util::*; + use core::*; use vm::contracts::Contract; use vm::types::*; @@ -293,6 +362,107 @@ pub mod test { use util::hash::to_hex; + #[test] + fn make_reward_set_units() { + let threshold = 1_000; + let addresses = vec![ + ( + StacksAddress::from_string("STVK1K405H6SK9NKJAP32GHYHDJ98MMNP8Y6Z9N0").unwrap(), + 1500, + ), + ( + StacksAddress::from_string("ST76D2FMXZ7D2719PNE4N71KPSX84XCCNCMYC940").unwrap(), + 500, + ), + ( + StacksAddress::from_string("STVK1K405H6SK9NKJAP32GHYHDJ98MMNP8Y6Z9N0").unwrap(), + 1500, + ), + ( + StacksAddress::from_string("ST76D2FMXZ7D2719PNE4N71KPSX84XCCNCMYC940").unwrap(), + 400, + ), + ]; + assert_eq!( + StacksChainState::make_reward_set(threshold, addresses).len(), + 3 + ); + } + + #[test] + fn get_reward_threshold_units() { + // when the liquid amount = the threshold step, + // the threshold should always be the step size. + let liquid = POX_THRESHOLD_STEPS_USTX; + assert_eq!( + StacksChainState::get_reward_threshold(&PoxConstants::new(1000, 1, 1, 1), &[], liquid), + POX_THRESHOLD_STEPS_USTX + ); + assert_eq!( + StacksChainState::get_reward_threshold( + &PoxConstants::new(1000, 1, 1, 1), + &[(rand_addr(), liquid)], + liquid + ), + POX_THRESHOLD_STEPS_USTX + ); + + let liquid = 200_000_000 * MICROSTACKS_PER_STACKS as u128; + // with zero participation, should scale to 25% of liquid + assert_eq!( + StacksChainState::get_reward_threshold(&PoxConstants::new(1000, 1, 1, 1), &[], liquid), + 50_000 * MICROSTACKS_PER_STACKS as u128 + ); + // should be the same at 25% participation + assert_eq!( + StacksChainState::get_reward_threshold( + &PoxConstants::new(1000, 1, 1, 1), + &[(rand_addr(), liquid / 4)], + liquid + ), + 50_000 * MICROSTACKS_PER_STACKS as u128 + ); + // but not at 30% participation + assert_eq!( + StacksChainState::get_reward_threshold( + &PoxConstants::new(1000, 1, 1, 1), + &[ + (rand_addr(), liquid / 4), + (rand_addr(), 10_000_000 * (MICROSTACKS_PER_STACKS as u128)) + ], + liquid + ), + 60_000 * MICROSTACKS_PER_STACKS as u128 + ); + + // bump by just a little bit, should go to the next threshold step + assert_eq!( + StacksChainState::get_reward_threshold( + &PoxConstants::new(1000, 1, 1, 1), + &[ + (rand_addr(), liquid / 4), + (rand_addr(), (MICROSTACKS_PER_STACKS as u128)) + ], + liquid + ), + 60_000 * MICROSTACKS_PER_STACKS as u128 + ); + + // bump by just a little bit, should go to the next threshold step + assert_eq!( + StacksChainState::get_reward_threshold( + &PoxConstants::new(1000, 1, 1, 1), + &[(rand_addr(), liquid)], + liquid + ), + 200_000 * MICROSTACKS_PER_STACKS as u128 + ); + } + + fn rand_addr() -> StacksAddress { + key_to_stacks_addr(&StacksPrivateKey::new()) + } + fn key_to_stacks_addr(key: &StacksPrivateKey) -> StacksAddress { StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, @@ -739,7 +909,12 @@ pub mod test { block_id: &StacksBlockId, ) -> Result, Error> { let burn_block_height = get_par_burn_block_height(state, block_id); - state.get_reward_addresses(burnchain, sortdb, burn_block_height, block_id) + state + .get_reward_addresses(burnchain, sortdb, burn_block_height, block_id) + .and_then(|mut addrs| { + addrs.sort_by_key(|k| k.0.bytes.0); + Ok(addrs) + }) } fn get_parent_tip( diff --git a/src/chainstate/stacks/boot/pox.clar b/src/chainstate/stacks/boot/pox.clar index 42649925932..dd5a6cd5fdd 100644 --- a/src/chainstate/stacks/boot/pox.clar +++ b/src/chainstate/stacks/boot/pox.clar @@ -34,7 +34,7 @@ ;; This function can only be called once, when it boots up (define-public (set-burnchain-parameters (first-burn-height uint) (prepare-cycle-length uint) (reward-cycle-length uint) (rejection-fraction uint)) (begin - (asserts! (and is-in-regtest (not (var-get configured))) (err ERR_NOT_ALLOWED)) + (asserts! (not (var-get configured)) (err ERR_NOT_ALLOWED)) (var-set first-burnchain-block-height first-burn-height) (var-set pox-prepare-cycle-length prepare-cycle-length) (var-set pox-reward-cycle-length reward-cycle-length) diff --git a/src/chainstate/stacks/db/headers.rs b/src/chainstate/stacks/db/headers.rs index 1dd1ec89f01..fc0dd849b1d 100644 --- a/src/chainstate/stacks/db/headers.rs +++ b/src/chainstate/stacks/db/headers.rs @@ -36,7 +36,8 @@ use vm::costs::ExecutionCost; use util::db::Error as db_error; use util::db::{ - query_count, query_row, query_row_columns, query_rows, DBConn, FromColumn, FromRow, + query_count, query_row, query_row_columns, query_row_panic, query_rows, DBConn, FromColumn, + FromRow, }; use core::FIRST_BURNCHAIN_CONSENSUS_HASH; @@ -278,14 +279,10 @@ impl StacksChainState { index_block_hash: &StacksBlockId, ) -> Result, Error> { let sql = "SELECT * FROM block_headers WHERE index_block_hash = ?1".to_string(); - let mut rows = query_rows::(conn, &sql, &[&index_block_hash]) - .map_err(Error::DBError)?; - let cnt = rows.len(); - if cnt > 1 { - unreachable!("FATAL: multiple rows for the same block hash") // should be unreachable, since index_block_hash is unique - } - - Ok(rows.pop()) + query_row_panic(conn, &sql, &[&index_block_hash], || { + "FATAL: multiple rows for the same block hash".to_string() + }) + .map_err(Error::DBError) } /// Get an ancestor block header diff --git a/src/core/mod.rs b/src/core/mod.rs index 1eb8c509d42..47e370aed4a 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -61,8 +61,18 @@ pub const BURNCHAIN_BOOT_CONSENSUS_HASH: ConsensusHash = ConsensusHash([0xff; 20 pub const CHAINSTATE_VERSION: &'static str = "23.0.0.0"; +pub const MICROSTACKS_PER_STACKS: u32 = 1_000_000; + pub const POX_PREPARE_WINDOW_LENGTH: u32 = 240; pub const POX_REWARD_CYCLE_LENGTH: u32 = 1000; +/// The maximum amount that PoX rewards can be scaled by. +/// That is, if participation is very low, rewards are: +/// POX_MAXIMAL_SCALING x (rewards with 100% participation) +/// Set a 4x, this implies the lower bound of participation for scaling +/// is 25% +pub const POX_MAXIMAL_SCALING: u128 = 4; +/// This is the amount that PoX threshold adjustments are stepped by. +pub const POX_THRESHOLD_STEPS_USTX: u128 = 10_000 * (MICROSTACKS_PER_STACKS as u128); /// Synchronize burn transactions from the Bitcoin blockchain pub fn sync_burnchain_bitcoin( diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index d704af243d5..1c64200b68a 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -53,12 +53,21 @@ pub struct BitcoinRegtestController { burnchain_db: Option, chain_tip: Option, use_coordinator: Option, + burnchain_config: Option, } const DUST_UTXO_LIMIT: u64 = 5500; impl BitcoinRegtestController { pub fn new(config: Config, coordinator_channel: Option) -> Self { + BitcoinRegtestController::with_burnchain(config, coordinator_channel, None) + } + + pub fn with_burnchain( + config: Config, + coordinator_channel: Option, + burnchain_config: Option, + ) -> Self { std::fs::create_dir_all(&config.node.get_burnchain_path()) .expect("Unable to create workdir"); @@ -93,11 +102,12 @@ impl BitcoinRegtestController { Self { use_coordinator: coordinator_channel, - config: config, + config, indexer_config, db: None, burnchain_db: None, chain_tip: None, + burnchain_config, } } @@ -122,22 +132,28 @@ impl BitcoinRegtestController { Self { use_coordinator: None, - config: config, + config, indexer_config, db: None, burnchain_db: None, chain_tip: None, + burnchain_config: None, } } fn setup_burnchain(&self) -> (Burnchain, BitcoinNetworkType) { let (network_name, network_type) = self.config.burnchain.get_bitcoin_network(); - let working_dir = self.config.get_burn_db_path(); - match Burnchain::new(&working_dir, &self.config.burnchain.chain, &network_name) { - Ok(burnchain) => (burnchain, network_type), - Err(e) => { - error!("Failed to instantiate burnchain: {}", e); - panic!() + match &self.burnchain_config { + Some(burnchain) => (burnchain.clone(), network_type), + None => { + let working_dir = self.config.get_burn_db_path(); + match Burnchain::new(&working_dir, &self.config.burnchain.chain, &network_name) { + Ok(burnchain) => (burnchain, network_type), + Err(e) => { + error!("Failed to instantiate burnchain: {}", e); + panic!() + } + } } } } @@ -312,6 +328,90 @@ impl BitcoinRegtestController { Ok((burnchain_tip, burnchain_height)) } + #[cfg(test)] + pub fn get_all_utxos(&self, public_key: &Secp256k1PublicKey) -> Vec { + // Configure UTXO filter + let pkh = Hash160::from_data(&public_key.to_bytes()) + .to_bytes() + .to_vec(); + let (_, network_id) = self.config.burnchain.get_bitcoin_network(); + let address = + BitcoinAddress::from_bytes(network_id, BitcoinAddressType::PublicKeyHash, &pkh) + .expect("Public key incorrect"); + let filter_addresses = vec![address.to_b58()]; + let _result = BitcoinRPCRequest::import_public_key(&self.config, &public_key); + + sleep_ms(1000); + + let min_conf = 0; + let max_conf = 9999999; + let minimum_amount = ParsedUTXO::sat_to_serialized_btc(1); + + let payload = BitcoinRPCRequest { + method: "listunspent".to_string(), + params: vec![ + min_conf.into(), + max_conf.into(), + filter_addresses.clone().into(), + true.into(), + json!({ "minimumAmount": minimum_amount }), + ], + id: "stacks".to_string(), + jsonrpc: "2.0".to_string(), + }; + + let mut res = BitcoinRPCRequest::send(&self.config, payload).unwrap(); + let mut result_vec = vec![]; + + if let Some(ref mut object) = res.as_object_mut() { + match object.get_mut("result") { + Some(serde_json::Value::Array(entries)) => { + while let Some(entry) = entries.pop() { + let parsed_utxo: ParsedUTXO = match serde_json::from_value(entry) { + Ok(utxo) => utxo, + Err(err) => { + warn!("Failed parsing UTXO: {}", err); + continue; + } + }; + let amount = match parsed_utxo.get_sat_amount() { + Some(amount) => amount, + None => continue, + }; + + if amount < 1 { + continue; + } + + let script_pub_key = match parsed_utxo.get_script_pub_key() { + Some(script_pub_key) => script_pub_key, + None => { + continue; + } + }; + + let txid = match parsed_utxo.get_txid() { + Some(amount) => amount, + None => continue, + }; + + result_vec.push(UTXO { + txid, + vout: parsed_utxo.vout, + script_pub_key, + amount, + }); + } + } + _ => { + warn!("Failed to get UTXOs"); + } + } + } + + result_vec + } + pub fn get_utxos( &self, public_key: &Secp256k1PublicKey, @@ -382,7 +482,7 @@ impl BitcoinRegtestController { let total_unspent: u64 = utxos.iter().map(|o| o.amount).sum(); if total_unspent < amount_required { - debug!( + warn!( "Total unspent {} < {} for {:?}", total_unspent, amount_required, diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index f997b63b2de..57abb707374 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -355,6 +355,9 @@ impl Config { .wait_time_for_microblocks .unwrap_or(default_node_config.wait_time_for_microblocks), prometheus_bind: node.prometheus_bind, + pox_sync_sample_secs: node + .pox_sync_sample_secs + .unwrap_or(default_node_config.pox_sync_sample_secs), }; node_config.set_bootstrap_node(node.bootstrap_node); node_config @@ -412,6 +415,9 @@ impl Config { .burnchain_op_tx_fee .unwrap_or(default_burnchain_config.burnchain_op_tx_fee), process_exit_at_block_height: burnchain.process_exit_at_block_height, + poll_time_secs: burnchain + .poll_time_secs + .unwrap_or(default_burnchain_config.poll_time_secs), } } None => default_burnchain_config, @@ -719,6 +725,7 @@ pub struct BurnchainConfig { pub local_mining_public_key: Option, pub burnchain_op_tx_fee: u64, pub process_exit_at_block_height: Option, + pub poll_time_secs: u64, } impl BurnchainConfig { @@ -741,6 +748,7 @@ impl BurnchainConfig { local_mining_public_key: None, burnchain_op_tx_fee: MINIMUM_DUST_FEE, process_exit_at_block_height: None, + poll_time_secs: 30, // TODO: this is a testnet specific value. } } @@ -791,6 +799,7 @@ pub struct BurnchainConfigFile { pub local_mining_public_key: Option, pub burnchain_op_tx_fee: Option, pub process_exit_at_block_height: Option, + pub poll_time_secs: Option, } #[derive(Clone, Debug, Default)] @@ -808,6 +817,7 @@ pub struct NodeConfig { pub mine_microblocks: bool, pub wait_time_for_microblocks: u64, pub prometheus_bind: Option, + pub pox_sync_sample_secs: u64, } impl NodeConfig { @@ -841,6 +851,7 @@ impl NodeConfig { mine_microblocks: false, wait_time_for_microblocks: 15000, prometheus_bind: None, + pox_sync_sample_secs: 30, } } @@ -939,6 +950,7 @@ pub struct NodeConfigFile { pub mine_microblocks: Option, pub wait_time_for_microblocks: Option, pub prometheus_bind: Option, + pub pox_sync_sample_secs: Option, } #[derive(Clone, Deserialize, Default)] diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index 62d04ceef62..a9cab5da31d 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -139,7 +139,7 @@ fn main() { || conf.burnchain.mode == "xenon" { let mut run_loop = neon::RunLoop::new(conf); - run_loop.start(num_round); + run_loop.start(num_round, None); } else { println!("Burnchain mode '{}' not supported", conf.burnchain.mode); } diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 39691f9b39c..b6151eb9442 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -85,6 +85,7 @@ pub struct NeonGenesisNode { pub config: Config, keychain: Keychain, event_dispatcher: EventDispatcher, + burnchain: Burnchain, } #[cfg(test)] @@ -596,19 +597,13 @@ impl InitializedNeonNode { miner: bool, blocks_processed: BlocksProcessedCounter, coord_comms: CoordinatorChannels, + burnchain: Burnchain, ) -> InitializedNeonNode { // we can call _open_ here rather than _connect_, since connect is first called in // make_genesis_block let sortdb = SortitionDB::open(&config.get_burn_db_file_path(), false) .expect("Error while instantiating sortition db"); - let burnchain = Burnchain::new( - &config.get_burn_db_path(), - &config.burnchain.chain, - "regtest", - ) - .expect("Error while instantiating burnchain"); - let view = { let ic = sortdb.index_conn(); let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(&ic) @@ -1036,6 +1031,7 @@ impl InitializedNeonNode { /// Process a state coming from the burnchain, by extracting the validated KeyRegisterOp /// and inspecting if a sortition was won. + /// `ibd`: boolean indicating whether or not we are in the initial block download pub fn process_burnchain_state( &mut self, sortdb: &SortitionDB, @@ -1117,7 +1113,12 @@ impl InitializedNeonNode { impl NeonGenesisNode { /// Instantiate and initialize a new node, given a config - pub fn new(config: Config, mut event_dispatcher: EventDispatcher, boot_block_exec: F) -> Self + pub fn new( + config: Config, + mut event_dispatcher: EventDispatcher, + burnchain: Burnchain, + boot_block_exec: F, + ) -> Self where F: FnOnce(&mut ClarityTx) -> (), { @@ -1151,6 +1152,7 @@ impl NeonGenesisNode { keychain, config, event_dispatcher, + burnchain, } } @@ -1172,6 +1174,7 @@ impl NeonGenesisNode { true, blocks_processed, coord_comms, + self.burnchain, ) } @@ -1193,6 +1196,7 @@ impl NeonGenesisNode { false, blocks_processed, coord_comms, + self.burnchain, ) } } diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index b471beeab28..01d137b0cc2 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -83,15 +83,18 @@ impl RunLoop { /// It will start the burnchain (separate thread), set-up a channel in /// charge of coordinating the new blocks coming from the burnchain and /// the nodes, taking turns on tenures. - pub fn start(&mut self, _expected_num_rounds: u64) { + pub fn start(&mut self, _expected_num_rounds: u64, burnchain_opt: Option) { let (coordinator_receivers, coordinator_senders) = self .coordinator_channels .take() .expect("Run loop already started, can only start once after initialization."); // Initialize and start the burnchain. - let mut burnchain = - BitcoinRegtestController::new(self.config.clone(), Some(coordinator_senders.clone())); + let mut burnchain = BitcoinRegtestController::with_burnchain( + self.config.clone(), + Some(coordinator_senders.clone()), + burnchain_opt, + ); let pox_constants = burnchain.get_pox_constants(); let is_miner = if self.config.node.miner { @@ -136,7 +139,6 @@ impl RunLoop { .iter() .map(|e| (e.address.clone(), e.amount)) .collect(); - let burnchain_poll_time = 30; // TODO: this is testnet-specific // setup dispatcher let mut event_dispatcher = EventDispatcher::new(); @@ -145,17 +147,7 @@ impl RunLoop { } let mut coordinator_dispatcher = event_dispatcher.clone(); - let burnchain_config = match Burnchain::new( - &self.config.get_burn_db_path(), - &self.config.burnchain.chain, - "regtest", - ) { - Ok(burnchain) => burnchain, - Err(e) => { - error!("Failed to instantiate burnchain: {}", e); - panic!() - } - }; + let burnchain_config = burnchain.get_burnchain(); let chainstate_path = self.config.get_chainstate_path(); let coordinator_burnchain_config = burnchain_config.clone(); @@ -178,7 +170,12 @@ impl RunLoop { let mut block_height = burnchain_tip.block_snapshot.block_height; // setup genesis - let node = NeonGenesisNode::new(self.config.clone(), event_dispatcher, |_| {}); + let node = NeonGenesisNode::new( + self.config.clone(), + event_dispatcher, + burnchain_config.clone(), + |_| {}, + ); let mut node = if is_miner { node.into_initialized_leader_node( burnchain_tip.clone(), @@ -212,8 +209,8 @@ impl RunLoop { mainnet, chainid, chainstate_path, - burnchain_poll_time, - self.config.connection_options.timeout, + self.config.burnchain.poll_time_secs, + self.config.node.pox_sync_sample_secs, ) .unwrap(); let mut burnchain_height = 1; diff --git a/testnet/stacks-node/src/syncctl.rs b/testnet/stacks-node/src/syncctl.rs index b46093b355b..e0becb83297 100644 --- a/testnet/stacks-node/src/syncctl.rs +++ b/testnet/stacks-node/src/syncctl.rs @@ -43,6 +43,8 @@ pub struct PoxSyncWatchdog { chainstate: StacksChainState, } +const PER_SAMPLE_WAIT_MS: u64 = 1000; + impl PoxSyncWatchdog { pub fn new( mainnet: bool, @@ -350,7 +352,7 @@ impl PoxSyncWatchdog { &self.max_samples ); } - sleep_ms(1000); + sleep_ms(PER_SAMPLE_WAIT_MS); continue; } @@ -359,7 +361,7 @@ impl PoxSyncWatchdog { { // still waiting for that first block in this reward cycle debug!("PoX watchdog: Still warming up: waiting until {}s for first Stacks block download (estimated download time: {}s)...", expected_first_block_deadline, self.estimated_block_download_time); - sleep_ms(1000); + sleep_ms(PER_SAMPLE_WAIT_MS); continue; } @@ -406,7 +408,7 @@ impl PoxSyncWatchdog { { debug!("PoX watchdog: Still processing blocks; waiting until at least min({},{})s before burnchain synchronization (estimated block-processing time: {}s)", get_epoch_time_secs() + 1, expected_last_block_deadline, self.estimated_block_process_time); - sleep_ms(1000); + sleep_ms(PER_SAMPLE_WAIT_MS); continue; } @@ -418,7 +420,7 @@ impl PoxSyncWatchdog { flat_attachable, flat_processed, &attachable_deviants, &processed_deviants); if !flat_attachable || !flat_processed { - sleep_ms(1000); + sleep_ms(PER_SAMPLE_WAIT_MS); continue; } } else { @@ -429,7 +431,7 @@ impl PoxSyncWatchdog { debug!("PoX watchdog: In steady-state; waiting until at least {} before burnchain synchronization", self.steady_state_resync_ts); steady_state = true; } - sleep_ms(1000); + sleep_ms(PER_SAMPLE_WAIT_MS); continue; } } diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 87a72fc2330..1490486ab76 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -2,12 +2,13 @@ use super::{ make_contract_call, make_contract_publish, make_contract_publish_microblock_only, make_microblock, make_stacks_transfer_mblock_only, to_addr, ADDR_4, SK_1, }; -use stacks::burnchains::{Address, PublicKey}; +use stacks::burnchains::{Address, PoxConstants, PublicKey}; use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::stacks::{ db::StacksChainState, StacksAddress, StacksBlock, StacksBlockHeader, StacksPrivateKey, StacksPublicKey, StacksTransaction, }; +use stacks::core; use stacks::net::StacksMessageCodec; use stacks::util::secp256k1::Secp256k1PublicKey; use stacks::vm::costs::ExecutionCost; @@ -44,6 +45,9 @@ fn neon_integration_test_conf() -> (Config, StacksAddress) { Some(keychain.generate_op_signer().get_public_key().to_hex()); conf.burnchain.commit_anchor_block_within = 0; + conf.burnchain.poll_time_secs = 1; + conf.node.pox_sync_sample_secs = 1; + let miner_account = keychain.origin_address().unwrap(); (conf, miner_account) @@ -212,7 +216,7 @@ fn bitcoind_integration_test() { let channel = run_loop.get_coordinator_channel().unwrap(); - thread::spawn(move || run_loop.start(0)); + thread::spawn(move || run_loop.start(0, None)); // give the run loop some time to start up! wait_for_runloop(&blocks_processed); @@ -290,7 +294,7 @@ fn microblock_integration_test() { let channel = run_loop.get_coordinator_channel().unwrap(); - thread::spawn(move || run_loop.start(0)); + thread::spawn(move || run_loop.start(0, None)); // give the run loop some time to start up! wait_for_runloop(&blocks_processed); @@ -577,7 +581,7 @@ fn size_check_integration_test() { let client = reqwest::blocking::Client::new(); let channel = run_loop.get_coordinator_channel().unwrap(); - thread::spawn(move || run_loop.start(0)); + thread::spawn(move || run_loop.start(0, None)); // give the run loop some time to start up! wait_for_runloop(&blocks_processed); @@ -701,6 +705,12 @@ fn pox_integration_test() { let spender_sk = StacksPrivateKey::new(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); + let spender_2_sk = StacksPrivateKey::new(); + let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); + + let spender_3_sk = StacksPrivateKey::new(); + let spender_3_addr: PrincipalData = to_addr(&spender_3_sk).into(); + let pox_pubkey = Secp256k1PublicKey::from_hex( "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) @@ -711,14 +721,33 @@ fn pox_integration_test() { .to_vec(), ); + let pox_2_pubkey = Secp256k1PublicKey::from_private(&StacksPrivateKey::new()); + let pox_2_pubkey_hash = bytes_to_hex( + &Hash160::from_data(&pox_2_pubkey.to_bytes()) + .to_bytes() + .to_vec(), + ); + let (mut conf, miner_account) = neon_integration_test_conf(); - let total_bal = 10_000_000_000; - let stacked_bal = 1_000_000_000; + let first_bal = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + let second_bal = 2_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + let third_bal = 2_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + let stacked_bal = 1_000_000_000 * (core::MICROSTACKS_PER_STACKS as u128); conf.initial_balances.push(InitialBalance { address: spender_addr.clone(), - amount: total_bal, + amount: first_bal, + }); + + conf.initial_balances.push(InitialBalance { + address: spender_2_addr.clone(), + amount: second_bal, + }); + + conf.initial_balances.push(InitialBalance { + address: spender_3_addr.clone(), + amount: third_bal, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -730,16 +759,19 @@ fn pox_integration_test() { let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); let http_origin = format!("http://{}", &conf.node.rpc_bind); + let mut burnchain_config = btc_regtest_controller.get_burnchain(); + burnchain_config.pox_constants = PoxConstants::new(10, 5, 4, 5); + btc_regtest_controller.bootstrap_chain(201); eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf); + let mut run_loop = neon::RunLoop::new(conf.clone()); let blocks_processed = run_loop.get_blocks_processed_arc(); let client = reqwest::blocking::Client::new(); let channel = run_loop.get_coordinator_channel().unwrap(); - thread::spawn(move || run_loop.start(0)); + thread::spawn(move || run_loop.start(0, Some(burnchain_config))); // give the run loop some time to start up! wait_for_runloop(&blocks_processed); @@ -779,7 +811,7 @@ fn pox_integration_test() { .unwrap(); assert_eq!( u128::from_str_radix(&res.balance[2..], 16).unwrap(), - total_bal as u128 + first_bal as u128 ); assert_eq!(res.nonce, 0); @@ -825,6 +857,93 @@ fn pox_integration_test() { panic!(""); } + // now let's have sender_2 and sender_3 stack to pox addr 2 in + // two different txs, and make sure that they sum together in the reward set. + + let tx = make_contract_call( + &spender_2_sk, + 0, + 243, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox", + "stack-stx", + &[ + Value::UInt(stacked_bal / 2), + execute(&format!( + "{{ hashbytes: 0x{}, version: 0x00 }}", + pox_2_pubkey_hash + )) + .unwrap() + .unwrap(), + Value::UInt(3), + ], + ); + + // okay, let's push that stacking transaction! + let path = format!("{}/v2/transactions", &http_origin); + let res = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(tx.clone()) + .send() + .unwrap(); + eprintln!("{:#?}", res); + if res.status().is_success() { + let res: String = res.json().unwrap(); + assert_eq!( + res, + StacksTransaction::consensus_deserialize(&mut &tx[..]) + .unwrap() + .txid() + .to_string() + ); + } else { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } + + let tx = make_contract_call( + &spender_3_sk, + 0, + 243, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox", + "stack-stx", + &[ + Value::UInt(stacked_bal / 2), + execute(&format!( + "{{ hashbytes: 0x{}, version: 0x00 }}", + pox_2_pubkey_hash + )) + .unwrap() + .unwrap(), + Value::UInt(3), + ], + ); + + // okay, let's push that stacking transaction! + let path = format!("{}/v2/transactions", &http_origin); + let res = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(tx.clone()) + .send() + .unwrap(); + eprintln!("{:#?}", res); + if res.status().is_success() { + let res: String = res.json().unwrap(); + assert_eq!( + res, + StacksTransaction::consensus_deserialize(&mut &tx[..]) + .unwrap() + .txid() + .to_string() + ); + } else { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } + // now let's mine a couple blocks, and then check the sender's nonce. // at the end of mining three blocks, there should be _one_ transaction from the microblock // only set that got mined (since the block before this one was empty, a microblock can @@ -852,18 +971,39 @@ fn pox_integration_test() { assert_eq!(res.nonce, 1, "Spender address nonce should be 1"); } - // now let's mine until the next reward cycle starts ... - for _i in 0..35 { + let mut sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {}", sort_height); + // now let's mine until the next reward cycle finishes ... + + while sort_height < 229 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {}", sort_height); } - // we should have received a Bitcoin commitment - let utxos = btc_regtest_controller - .get_utxos(&pox_pubkey, 1) - .expect("Should have been able to retrieve UTXOs for PoX recipient"); + // we should have received _three_ Bitcoin commitments, because our commitment was 3 * threshold + let utxos = btc_regtest_controller.get_all_utxos(&pox_pubkey); eprintln!("Got UTXOs: {}", utxos.len()); - assert!(utxos.len() > 0, "Should have received an output during PoX"); + assert_eq!( + utxos.len(), + 3, + "Should have received three outputs during PoX reward cycle" + ); + + // we should have received _three_ Bitcoin commitments to pox_2_pubkey, because our commitment was 3 * threshold + // note: that if the reward set "summing" isn't implemented, this recipient would only have received _2_ slots, + // because each `stack-stx` call only received enough to get 1 slot individually. + let utxos = btc_regtest_controller.get_all_utxos(&pox_2_pubkey); + + eprintln!("Got UTXOs: {}", utxos.len()); + assert_eq!( + utxos.len(), + 3, + "Should have received three outputs during PoX reward cycle" + ); + + // okay, the threshold for participation should be channel.stop_chains_coordinator(); }