Skip to content

Commit

Permalink
Merge pull request #1 from zone117x/feature/genesis-contract-abi
Browse files Browse the repository at this point in the history
Feature/genesis contract abi
  • Loading branch information
zone117x committed Apr 3, 2020
2 parents cd13956 + e5a1f88 commit 9c64290
Show file tree
Hide file tree
Showing 51 changed files with 2,133 additions and 699 deletions.
6 changes: 5 additions & 1 deletion Cargo.toml
Expand Up @@ -35,16 +35,20 @@ rust-ini = "0.13"
rand = "=0.7.2"
serde = "1"
serde_derive = "1"
serde_json = "1.0"
sha3 = "0.8.2"
ripemd160 = "0.8.0"
regex = "1"
mio = "0.6.16"
libc = "0.2"
lazy_static = "1.4.0"
toml = "0.5.6"
sha2 = { version = "0.8.0", optional = true }
sha2-asm = { version="0.5.3", optional = true }

[dependencies.serde_json]
version = "1.0"
features = ["arbitrary_precision"]

[dependencies.secp256k1]
version = "0.11.5"
features = ["serde"]
Expand Down
32 changes: 32 additions & 0 deletions Stacks.toml
@@ -0,0 +1,32 @@
[node]
name = "helium-node"

[burnchain]
chain = "bitcoin"
mode = "regtest"
block_time = 5000

[mempool]
path = "/home/ludovic/mempool"

[[events_observer]]
port = 8080
address = "127.0.0.1"
events_keys = [
"STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A.store::print",
"STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A.contract.ft-token",
"STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A.contract.nft-token",
"stx"
]

[[mstx_balance]]
address = "STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A"
amount = 10000

[[mstx_balance]]
address = "ST1JA3KG2CQY67FZ071BSHMT18CQPCQVMNZ6A7XE6"
amount = 100000

[[mstx_balance]]
address = "ST2VHM28V9E5QCRD6C73215KAPSBKQGPWTEE5CMQT"
amount = 10000
10 changes: 5 additions & 5 deletions src/burnchains/burnchain.rs
Expand Up @@ -593,10 +593,10 @@ impl Burnchain {

snapshot.index_root = index_root;

info!("OPS-HASH({}): {}", this_block_height, &snapshot.ops_hash);
info!("INDEX-ROOT({}): {}", this_block_height, &snapshot.index_root);
info!("SORTITION-HASH({}): {}", this_block_height, &snapshot.sortition_hash);
info!("CONSENSUS({}): {}", this_block_height, &snapshot.consensus_hash);
debug!("OPS-HASH({}): {}", this_block_height, &snapshot.ops_hash);
debug!("INDEX-ROOT({}): {}", this_block_height, &snapshot.index_root);
debug!("SORTITION-HASH({}): {}", this_block_height, &snapshot.sortition_hash);
debug!("CONSENSUS({}): {}", this_block_height, &snapshot.consensus_hash);
Ok(snapshot)
}

Expand All @@ -608,7 +608,7 @@ impl Burnchain {
/// * commit the results of the sortition
/// Returns the BlockSnapshot created from this block.
pub fn process_block_ops<'a>(tx: &mut BurnDBTx<'a>, burnchain: &Burnchain, parent_snapshot: &BlockSnapshot, block_header: &BurnchainBlockHeader, blockstack_txs: &Vec<BlockstackOperationType>) -> Result<BlockSnapshot, burnchain_error> {
info!("BEGIN({}) block ({},{})", block_header.block_height, block_header.block_hash, block_header.parent_block_hash);
debug!("BEGIN({}) block ({},{})", block_header.block_height, block_header.block_hash, block_header.parent_block_hash);
debug!("Append {} operation(s) from block {} {}", blockstack_txs.len(), block_header.block_height, &block_header.block_hash);

// check each transaction, and filter out only the ones that are valid
Expand Down
6 changes: 3 additions & 3 deletions src/chainstate/burn/db/burndb.rs
Expand Up @@ -602,17 +602,17 @@ impl BurnDB {
fn store_burnchain_transaction<'a>(tx: &mut BurnDBTx<'a>, blockstack_op: &BlockstackOperationType) -> Result<(), db_error> {
match blockstack_op {
BlockstackOperationType::LeaderKeyRegister(ref op) => {
info!("ACCEPTED({}) leader key register {} at {},{}", op.block_height, &op.txid, op.block_height, op.vtxindex);
debug!("ACCEPTED({}) leader key register {} at {},{}", op.block_height, &op.txid, op.block_height, op.vtxindex);
BurnDB::insert_leader_key(tx, op)
.expect("FATAL: failed to store leader key to Sqlite");
},
BlockstackOperationType::LeaderBlockCommit(ref op) => {
info!("ACCEPTED({}) leader block commit {} at {},{}", op.block_height, &op.txid, op.block_height, op.vtxindex);
debug!("ACCEPTED({}) leader block commit {} at {},{}", op.block_height, &op.txid, op.block_height, op.vtxindex);
BurnDB::insert_block_commit(tx, op)
.expect("FATAL: failed to store leader block commit to Sqlite");
},
BlockstackOperationType::UserBurnSupport(ref op) => {
info!("ACCEPTED({}) user burn support {} at {},{}", op.block_height, &op.txid, op.block_height, op.vtxindex);
debug!("ACCEPTED({}) user burn support {} at {},{}", op.block_height, &op.txid, op.block_height, op.vtxindex);
BurnDB::insert_user_burn(tx, op)
.expect("FATAL: failed to store user burn support to Sqlite");
}
Expand Down
2 changes: 1 addition & 1 deletion src/chainstate/burn/distribution.rs
Expand Up @@ -197,7 +197,7 @@ impl BurnSamplePoint {
burn_sample[idx].user_burns.push(user_burn.clone());
},
None => {
info!("User burn {} ({},{}) of {} for key={}, block={} has no matching block commit",
debug!("User burn {} ({},{}) of {} for key={}, block={} has no matching block commit",
&user_burn.txid, user_burn.block_height, user_burn.vtxindex, user_burn.burn_fee,
user_burn.public_key.to_hex(), &user_burn.block_header_hash_160);
continue;
Expand Down
8 changes: 4 additions & 4 deletions src/chainstate/burn/sortition.rs
Expand Up @@ -171,7 +171,7 @@ impl BlockSnapshot {
let ops_hash = OpsHash::from_txids(txids);
let ch = ConsensusHash::from_parent_block_data(tx, &ops_hash, block_height - 1, first_block_height, &block_header.parent_block_hash, burn_total)?;

info!("SORTITION({}): NO BLOCK CHOSEN", block_height);
debug!("SORTITION({}): NO BLOCK CHOSEN", block_height);

Ok(BlockSnapshot {
block_height: block_height,
Expand Down Expand Up @@ -218,7 +218,7 @@ impl BlockSnapshot {

if burn_dist.len() == 0 {
// no burns happened
info!("No burns happened in block {} {:?}", block_height, &block_hash);
debug!("No burns happened in block {} {:?}", block_height, &block_hash);
return BlockSnapshot::make_snapshot_no_sortition(tx, parent_snapshot, block_header, first_block_height, last_burn_total, &next_sortition_hash, &txids);
}

Expand All @@ -228,7 +228,7 @@ impl BlockSnapshot {
Some(total) => {
if total == 0 {
// no one burned, so no sortition
info!("No transactions submitted burns in block {} {:?}", block_height, &block_hash);
debug!("No transactions submitted burns in block {} {:?}", block_height, &block_hash);
return BlockSnapshot::make_snapshot_no_sortition(tx, parent_snapshot, block_header, first_block_height, last_burn_total, &next_sortition_hash, &txids);
}
else {
Expand Down Expand Up @@ -266,7 +266,7 @@ impl BlockSnapshot {
let next_ops_hash = OpsHash::from_txids(&txids);
let next_ch = ConsensusHash::from_parent_block_data(tx, &next_ops_hash, block_height - 1, first_block_height, &block_header.parent_block_hash, next_burn_total)?;

info!("SORTITION({}): WINNER IS {:?} (from {:?})", block_height, &winning_block.block_header_hash, &winning_block.txid);
debug!("SORTITION({}): WINNER IS {:?} (from {:?})", block_height, &winning_block.block_header_hash, &winning_block.txid);

Ok(BlockSnapshot {
block_height: block_height,
Expand Down
1 change: 1 addition & 0 deletions src/chainstate/stacks/db/accounts.rs
Expand Up @@ -183,6 +183,7 @@ impl StacksChainState {
let cur_balance = db.get_account_stx_balance(principal);
let final_balance = cur_balance.checked_add(amount as u128).expect("FATAL: account balance overflow");
db.set_account_stx_balance(principal, final_balance as u128);
info!("{} credited: {} uSTX", principal, final_balance);
Ok(())
}).expect("FATAL: failed to credit account")
}
Expand Down
57 changes: 31 additions & 26 deletions src/chainstate/stacks/db/blocks.rs
Expand Up @@ -624,8 +624,7 @@ impl StacksChainState {
}

/// Get a list of all anchored blocks' hashes, and their burnchain headers
#[cfg(test)]
pub fn list_blocks(blocks_conn: &DBConn, blocks_dir: &String) -> Result<Vec<(BurnchainHeaderHash, BlockHeaderHash)>, Error> {
pub fn list_blocks(blocks_conn: &DBConn) -> Result<Vec<(BurnchainHeaderHash, BlockHeaderHash)>, Error> {
let list_block_sql = "SELECT * FROM staging_blocks".to_string();
let mut blocks = query_rows::<StagingBlock, _>(blocks_conn, &list_block_sql, NO_PARAMS)
.map_err(Error::DBError)?;
Expand All @@ -636,7 +635,7 @@ impl StacksChainState {
/// Get a list of all microblocks' hashes, and their anchored blocks' hashes
#[cfg(test)]
pub fn list_microblocks(blocks_conn: &DBConn, blocks_dir: &String) -> Result<Vec<(BurnchainHeaderHash, BlockHeaderHash, Vec<BlockHeaderHash>)>, Error> {
let mut blocks = StacksChainState::list_blocks(blocks_conn, blocks_dir)?;
let mut blocks = StacksChainState::list_blocks(blocks_conn)?;
let mut ret = vec![];

for (burn_hash, block_hash) in blocks.drain(..) {
Expand Down Expand Up @@ -1814,7 +1813,7 @@ impl StacksChainState {
if signed_microblocks.len() == 0 {
if anchored_block_header.parent_microblock == EMPTY_MICROBLOCK_PARENT_HASH && anchored_block_header.parent_microblock_sequence == 0 {
// expected empty
warn!("No microblocks between {} and {}", parent_anchored_block_header.block_hash(), anchored_block_header.block_hash());
debug!("No microblocks between {} and {}", parent_anchored_block_header.block_hash(), anchored_block_header.block_hash());
return Some((0, None));
}
else {
Expand Down Expand Up @@ -2307,33 +2306,37 @@ impl StacksChainState {
/// Process a stream of microblocks
/// Return the fees and burns.
/// TODO: if we find an invalid Stacks microblock, then punish the miner who produced it
pub fn process_microblocks_transactions<'a>(clarity_tx: &mut ClarityTx<'a>, microblocks: &Vec<StacksMicroblock>) -> Result<(u128, u128), (Error, BlockHeaderHash)> {
pub fn process_microblocks_transactions<'a>(clarity_tx: &mut ClarityTx<'a>, microblocks: &Vec<StacksMicroblock>) -> Result<(u128, u128, Vec<StacksTransactionReceipt>), (Error, BlockHeaderHash)> {
let mut fees = 0u128;
let mut burns = 0u128;
let mut receipts = vec![];
for microblock in microblocks.iter() {
for tx in microblock.txs.iter() {
let (tx_fee, tx_burns) = StacksChainState::process_transaction(clarity_tx, tx)
let (tx_fee, tx_receipt) = StacksChainState::process_transaction(clarity_tx, tx)
.map_err(|e| (e, microblock.block_hash()))?;

fees = fees.checked_add(tx_fee as u128).expect("Fee overflow");
burns = burns.checked_add(tx_burns as u128).expect("Burns overflow");
burns = burns.checked_add(tx_receipt.stx_burned as u128).expect("Burns overflow");
receipts.push(tx_receipt);
}
}
Ok((fees, burns))
Ok((fees, burns, receipts))
}

/// Process a single anchored block.
/// Return the fees and burns.
fn process_block_transactions<'a>(clarity_tx: &mut ClarityTx<'a>, block: &StacksBlock) -> Result<(u128, u128), Error> {
fn process_block_transactions<'a>(clarity_tx: &mut ClarityTx<'a>, block: &StacksBlock) -> Result<(u128, u128, Vec<StacksTransactionReceipt>), Error> {
let mut fees = 0u128;
let mut burns = 0u128;
let mut receipts = vec![];
for tx in block.txs.iter() {
let (tx_fee, tx_burns) = StacksChainState::process_transaction(clarity_tx, tx)?;
let (tx_fee, tx_receipt) = StacksChainState::process_transaction(clarity_tx, tx)?;
fees = fees.checked_add(tx_fee as u128).expect("Fee overflow");
burns = burns.checked_add(tx_burns as u128).expect("Burns overflow");
burns = burns.checked_add(tx_receipt.stx_burned as u128).expect("Burns overflow");
receipts.push(tx_receipt);
}
Ok((fees, burns))
}
Ok((fees, burns, receipts))
}

/// Process a single matured miner reward.
/// Grant it STX tokens in the miner trust fund contract from the chain's boot code.
Expand Down Expand Up @@ -2431,7 +2434,7 @@ impl StacksChainState {
microblocks: &Vec<StacksMicroblock>, // parent microblocks
burnchain_commit_burn: u64,
burnchain_sortition_burn: u64,
user_burns: &Vec<StagingUserBurnSupport>) -> Result<StacksHeaderInfo, Error>
user_burns: &Vec<StagingUserBurnSupport>) -> Result<(StacksHeaderInfo, Vec<StacksTransactionReceipt>), Error>
{

debug!("Process block {:?} with {} transactions", &block.block_hash().to_hex(), block.txs.len());
Expand All @@ -2444,7 +2447,7 @@ impl StacksChainState {
StacksChainState::find_mature_miner_rewards(&mut chainstate_tx.headers_tx, parent_chain_tip)?
};

let scheduled_miner_reward = {
let (scheduled_miner_reward, txs_receipts) = {
let (parent_burn_header_hash, parent_block_hash) =
if block.header.is_genesis() {
// has to be the sentinal hashes if this block has no parent
Expand Down Expand Up @@ -2477,16 +2480,16 @@ impl StacksChainState {
let mut clarity_tx = StacksChainState::chainstate_block_begin(chainstate_tx, clarity_instance, &parent_burn_header_hash, &parent_block_hash, &MINER_BLOCK_BURN_HEADER_HASH, &MINER_BLOCK_HEADER_HASH);

// process microblock stream
let (microblock_fees, _microblock_burns) = match StacksChainState::process_microblocks_transactions(&mut clarity_tx, &microblocks) {
let (microblock_fees, _microblock_burns, mut microblock_txs_receipts) = match StacksChainState::process_microblocks_transactions(&mut clarity_tx, &microblocks) {
Err((e, offending_mblock_header_hash)) => {
let msg = format!("Invalid Stacks microblocks {},{} (offender {}): {:?}", block.header.parent_microblock, block.header.parent_microblock_sequence, offending_mblock_header_hash, &e);
warn!("{}", &msg);

clarity_tx.rollback_block();
return Err(Error::InvalidStacksMicroblock(msg, offending_mblock_header_hash));
},
Ok((fees, burns)) => {
(fees, burns)
Ok((fees, burns, events)) => {
(fees, burns, events)
}
};

Expand All @@ -2496,15 +2499,15 @@ impl StacksChainState {
last_microblock_hash, last_microblock_seq, microblocks.len());

// process anchored block
let (block_fees, block_burns) = match StacksChainState::process_block_transactions(&mut clarity_tx, &block) {
let (block_fees, block_burns, mut txs_receipts) = match StacksChainState::process_block_transactions(&mut clarity_tx, &block) {
Err(e) => {
let msg = format!("Invalid Stacks block {}: {:?}", block.block_hash(), &e);
warn!("{}", &msg);

clarity_tx.rollback_block();
return Err(Error::InvalidStacksBlock(msg));
},
Ok((block_fees, block_burns)) => (block_fees, block_burns)
Ok((block_fees, block_burns, txs_receipts)) => (block_fees, block_burns, txs_receipts)
};

// grant matured miner rewards
Expand Down Expand Up @@ -2542,7 +2545,9 @@ impl StacksChainState {
0xffffffffffffffff) // TODO: calculate total compute budget and scale up
.expect("FATAL: parsed and processed a block without a coinbase");

scheduled_miner_reward
txs_receipts.append(&mut microblock_txs_receipts);

(scheduled_miner_reward, txs_receipts)
};

let microblock_tail_opt = match microblocks.len() {
Expand All @@ -2561,7 +2566,7 @@ impl StacksChainState {
user_burns)
.expect("FATAL: failed to advance chain tip");

Ok(new_tip)
Ok((new_tip, txs_receipts))
}

/// Find and process the next staging block.
Expand All @@ -2571,7 +2576,7 @@ impl StacksChainState {
///
/// Occurs as a single, atomic transaction against the (marf'ed) headers database and
/// (un-marf'ed) staging block database, as well as against the chunk store.
pub fn process_next_staging_block(&mut self) -> Result<(Option<StacksHeaderInfo>, Option<TransactionPayload>), Error> {
pub fn process_next_staging_block(&mut self) -> Result<(Option<(StacksHeaderInfo, Vec<StacksTransactionReceipt>)>, Option<TransactionPayload>), Error> {
let (mut chainstate_tx, clarity_instance) = self.chainstate_tx_begin()?;

let blocks_path = chainstate_tx.blocks_tx.get_blocks_path().clone();
Expand Down Expand Up @@ -2689,7 +2694,7 @@ impl StacksChainState {
// attach the block to the chain state and calculate the next chain tip.
// Execute the confirmed microblocks' transactions against the chain state, and then
// execute the anchored block's transactions against the chain state.
let next_chain_tip = match StacksChainState::append_block(&mut chainstate_tx,
let (next_chain_tip, receipts) = match StacksChainState::append_block(&mut chainstate_tx,
clarity_instance,
&parent_block_header_info,
&next_staging_block.burn_header_hash,
Expand Down Expand Up @@ -2748,13 +2753,13 @@ impl StacksChainState {
chainstate_tx.commit()
.map_err(Error::DBError)?;

Ok((Some(next_chain_tip), None))
Ok((Some((next_chain_tip, receipts)), None))
}

/// Process some staging blocks, up to max_blocks.
/// Return new chain tips, and optionally any poison microblock payloads for each chain tip
/// found.
pub fn process_blocks(&mut self, max_blocks: usize) -> Result<Vec<(Option<StacksHeaderInfo>, Option<TransactionPayload>)>, Error> {
pub fn process_blocks(&mut self, max_blocks: usize) -> Result<Vec<(Option<(StacksHeaderInfo, Vec<StacksTransactionReceipt>)>, Option<TransactionPayload>)>, Error> {
let mut ret = vec![];

if max_blocks == 0 {
Expand Down

0 comments on commit 9c64290

Please sign in to comment.