diff --git a/qa/rpc-tests/bigblocks.py b/qa/rpc-tests/bigblocks.py new file mode 100755 index 0000000000000..a24abec5ad7a6 --- /dev/null +++ b/qa/rpc-tests/bigblocks.py @@ -0,0 +1,279 @@ +#!/usr/bin/env python2 +# Copyright (c) 2014 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +# +# Test mining and broadcast of larger-than-1MB-blocks +# +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +from decimal import Decimal + +CACHE_DIR = "cache_bigblock" + +# regression test / testnet fork params: +FORK_TIME = 1438387200 +FORK_BLOCK_VERSION = 0x20000004 +FORK_GRACE_PERIOD = 60*60*24 + +class BigBlockTest(BitcoinTestFramework): + + def setup_chain(self): + print("Initializing test directory "+self.options.tmpdir) + print("Be patient, this test can take 5 or more minutes to run.") + + if not os.path.isdir(os.path.join(CACHE_DIR, "node0")): + print("Creating initial chain") + + for i in range(4): + initialize_datadir(CACHE_DIR, i) # Overwrite port/rpcport in bitcoin.conf + + first_block_time = FORK_TIME - 200 * 10*60 + + # Node 0 tries to create as-big-as-possible blocks. + # Node 1 creates really small, old-version blocks + # Node 2 creates empty up-version blocks + # Node 3 creates empty, old-version blocks + self.nodes = [] + # Use node0 to mine blocks for input splitting + self.nodes.append(start_node(0, CACHE_DIR, ["-blockmaxsize=8000000", "-debug=net", + "-mocktime=%d"%(first_block_time,), + "-blockversion=%d"%(FORK_BLOCK_VERSION,)])) + self.nodes.append(start_node(1, CACHE_DIR, ["-blockmaxsize=50000", "-debug=net", + "-mocktime=%d"%(first_block_time,), + "-blockversion=3"])) + self.nodes.append(start_node(2, CACHE_DIR, ["-blockmaxsize=1000", + "-mocktime=%d"%(first_block_time,), + "-blockversion=%d"%(FORK_BLOCK_VERSION,)])) + self.nodes.append(start_node(3, CACHE_DIR, ["-blockmaxsize=1000", + "-mocktime=%d"%(first_block_time,), + "-blockversion=3"])) + + set_node_times(self.nodes, first_block_time) + + connect_nodes_bi(self.nodes, 0, 1) + connect_nodes_bi(self.nodes, 1, 2) + connect_nodes_bi(self.nodes, 2, 3) + connect_nodes_bi(self.nodes, 3, 0) + + self.is_network_split = False + self.sync_all() + + # Have node0 and node1 alternate finding blocks + # before the fork time, so it's 50% / 50% vote + block_time = first_block_time + for i in range(0,200): + miner = i%2 + set_node_times(self.nodes, block_time) + self.nodes[miner].generate(1) + assert(self.sync_blocks(self.nodes[0:2])) + block_time = block_time + 10*60 + + # Generate 1200 addresses + addresses = [ self.nodes[3].getnewaddress() for i in range(0,1200) ] + + amount = Decimal("0.00125") + + send_to = { } + for address in addresses: + send_to[address] = amount + + tx_file = open(os.path.join(CACHE_DIR, "txdata"), "w") + + # Create four megabytes worth of transactions ready to be + # mined: + print("Creating 100 40K transactions (4MB)") + for node in range(0,2): + for i in range(0,50): + txid = self.nodes[node].sendmany("", send_to, 1) + txdata = self.nodes[node].getrawtransaction(txid) + tx_file.write(txdata+"\n") + tx_file.close() + + stop_nodes(self.nodes) + wait_bitcoinds() + self.nodes = [] + for i in range(4): + os.remove(log_filename(CACHE_DIR, i, "debug.log")) + os.remove(log_filename(CACHE_DIR, i, "db.log")) + os.remove(log_filename(CACHE_DIR, i, "peers.dat")) + os.remove(log_filename(CACHE_DIR, i, "fee_estimates.dat")) + + + for i in range(4): + from_dir = os.path.join(CACHE_DIR, "node"+str(i)) + to_dir = os.path.join(self.options.tmpdir, "node"+str(i)) + shutil.copytree(from_dir, to_dir) + initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf + + def sync_blocks(self, rpc_connections, wait=1, max_wait=30): + """ + Wait until everybody has the same block count + """ + for i in range(0,max_wait): + if i > 0: time.sleep(wait) + counts = [ x.getblockcount() for x in rpc_connections ] + if counts == [ counts[0] ]*len(counts): + return True + return False + + def setup_network(self): + self.nodes = [] + last_block_time = FORK_TIME - 10*60 + + self.nodes.append(start_node(0, self.options.tmpdir, ["-blockmaxsize=8000000", "-debug=net", + "-mocktime=%d"%(last_block_time,), + "-blockversion=%d"%(FORK_BLOCK_VERSION,)])) + self.nodes.append(start_node(1, self.options.tmpdir, ["-blockmaxsize=50000", "-debug=net", + "-mocktime=%d"%(last_block_time,), + "-blockversion=3"])) + self.nodes.append(start_node(2, self.options.tmpdir, ["-blockmaxsize=1000", + "-mocktime=%d"%(last_block_time,), + "-blockversion=%d"%(FORK_BLOCK_VERSION,)])) + self.nodes.append(start_node(3, self.options.tmpdir, ["-blockmaxsize=1000", + "-mocktime=%d"%(last_block_time,), + "-blockversion=3"])) + connect_nodes_bi(self.nodes, 0, 1) + connect_nodes_bi(self.nodes, 1, 2) + connect_nodes_bi(self.nodes, 2, 3) + connect_nodes_bi(self.nodes, 3, 0) + + # Populate node0's mempool with cached pre-created transactions: + with open(os.path.join(CACHE_DIR, "txdata"), "r") as f: + for line in f: + self.nodes[0].sendrawtransaction(line.rstrip()) + + def copy_mempool(self, from_node, to_node): + txids = from_node.getrawmempool() + for txid in txids: + txdata = from_node.getrawtransaction(txid) + to_node.sendrawtransaction(txdata) + + def TestMineBig(self, expect_big): + # Test if node0 will mine big blocks. + b1hash = self.nodes[0].generate(1)[0] + b1 = self.nodes[0].getblock(b1hash, True) + assert(self.sync_blocks(self.nodes)) + + if expect_big: + assert(b1['size'] > 1000*1000) + + # Have node1 mine on top of the block, + # to make sure it goes along with the fork + b2hash = self.nodes[1].generate(1)[0] + b2 = self.nodes[1].getblock(b2hash, True) + assert(b2['previousblockhash'] == b1hash) + assert(self.sync_blocks(self.nodes)) + + else: + assert(b1['size'] < 1000*1000) + + # Reset chain to before b1hash: + for node in self.nodes: + node.invalidateblock(b1hash) + assert(self.sync_blocks(self.nodes)) + + + def run_test(self): + # nodes 0 and 1 have 50 mature 50-BTC coinbase transactions. + # Spend them with 50 transactions, each that has + # 1,200 outputs (so they're about 41K big). + + print("Testing fork conditions") + + # Fork is controlled by block timestamp and miner super-majority; + # large blocks may only be created after a supermajority of miners + # produce up-version blocks plus a grace period AND after a + # hard-coded earliest-possible date. + + # At this point the chain is 200 blocks long + # alternating between version=3 and version=FORK_BLOCK_VERSION + # blocks. + + # NOTE: the order of these test is important! + # set_node_times must advance time. Local time moving + # backwards causes problems. + + # Time starts a little before earliest fork time + set_node_times(self.nodes, FORK_TIME - 100) + + # No supermajority, and before earliest fork time: + self.TestMineBig(False) + + # node2 creates empty up-version blocks; creating + # 50 in a row makes 75 of previous 100 up-version + # (which is the -regtest activation condition) + t_delta = FORK_GRACE_PERIOD/50 + blocks = [] + for i in range(50): + set_node_times(self.nodes, FORK_TIME + t_delta*i - 1) + blocks.append(self.nodes[2].generate(1)[0]) + assert(self.sync_blocks(self.nodes)) + + # Earliest time for a big block is the timestamp of the + # supermajority block plus grace period: + lastblock = self.nodes[0].getblock(blocks[-1], True) + t_fork = lastblock["time"] + FORK_GRACE_PERIOD + + self.TestMineBig(False) # Supermajority... but before grace period end + + # Test right around the switchover time. + set_node_times(self.nodes, t_fork-1) + self.TestMineBig(False) + + # Note that node's local times are irrelevant, block timestamps + # are all that count-- so node0 will mine a big block with timestamp in the + # future from the perspective of the other nodes, but as long as + # it's timestamp is not too far in the future (2 hours) it will be + # accepted. + self.nodes[0].setmocktime(t_fork) + self.TestMineBig(True) + + # Shutdown then restart node[0], it should + # remember supermajority state and produce a big block. + stop_node(self.nodes[0], 0) + self.nodes[0] = start_node(0, self.options.tmpdir, ["-blockmaxsize=8000000", "-debug=net", + "-mocktime=%d"%(t_fork,), + "-blockversion=%d"%(FORK_BLOCK_VERSION,)]) + self.copy_mempool(self.nodes[1], self.nodes[0]) + connect_nodes_bi(self.nodes, 0, 1) + connect_nodes_bi(self.nodes, 0, 3) + self.TestMineBig(True) + + # Test re-orgs past the activation block (blocks[-1]) + # + # Shutdown node[0] again: + stop_node(self.nodes[0], 0) + + # Mine a longer chain with two version=3 blocks: + self.nodes[3].invalidateblock(blocks[-1]) + v3blocks = self.nodes[3].generate(2) + assert(self.sync_blocks(self.nodes[1:])) + + # Restart node0, it should re-org onto longer chain, reset + # activation time, and refuse to mine a big block: + self.nodes[0] = start_node(0, self.options.tmpdir, ["-blockmaxsize=8000000", "-debug=net", + "-mocktime=%d"%(t_fork,), + "-blockversion=%d"%(FORK_BLOCK_VERSION,)]) + self.copy_mempool(self.nodes[1], self.nodes[0]) + connect_nodes_bi(self.nodes, 0, 1) + connect_nodes_bi(self.nodes, 0, 3) + assert(self.sync_blocks(self.nodes)) + self.TestMineBig(False) + + # Mine 4 FORK_BLOCK_VERSION blocks and set the time past the + # grace period: bigger block OK: + self.nodes[2].generate(4) + assert(self.sync_blocks(self.nodes)) + set_node_times(self.nodes, t_fork + FORK_GRACE_PERIOD) + self.TestMineBig(True) + + + print("Cached test chain and transactions left in %s"%(CACHE_DIR)) + print(" (remove that directory if you will not run this test again)") + + +if __name__ == '__main__': + BigBlockTest().main() diff --git a/qa/rpc-tests/bipdersig-p2p.py b/qa/rpc-tests/bipdersig-p2p.py index 41717377b217a..467d7cfce7327 100755 --- a/qa/rpc-tests/bipdersig-p2p.py +++ b/qa/rpc-tests/bipdersig-p2p.py @@ -35,10 +35,10 @@ def unDERify(tx): Connect to a single node. Mine 2 (version 2) blocks (save the coinbases for later). Generate 98 more version 2 blocks, verify the node accepts. -Mine 749 version 3 blocks, verify the node accepts. -Check that the new DERSIG rules are not enforced on the 750th version 3 block. -Check that the new DERSIG rules are enforced on the 751st version 3 block. -Mine 199 new version blocks. +Mine 74 version 3 blocks, verify the node accepts. +Check that the new DERSIG rules are not enforced on the 75th version 3 block. +Check that the new DERSIG rules are enforced on the 76th version 3 block. +Mine 19 new version blocks. Mine 1 old-version block. Mine 1 new version block. Mine 1 old version block, see that the node rejects. @@ -91,9 +91,9 @@ def get_tests(self): self.tip = block.sha256 yield TestInstance(test_blocks, sync_every_block=False) - ''' Mine 749 version 3 blocks ''' + ''' Mine 74 version 3 blocks ''' test_blocks = [] - for i in xrange(749): + for i in xrange(74): block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1) block.nVersion = 3 block.rehash() @@ -104,7 +104,7 @@ def get_tests(self): yield TestInstance(test_blocks, sync_every_block=False) ''' - Check that the new DERSIG rules are not enforced in the 750th + Check that the new DERSIG rules are not enforced in the 75th version 3 block. ''' spendtx = self.create_transaction(self.nodes[0], @@ -124,7 +124,7 @@ def get_tests(self): yield TestInstance([[block, True]]) ''' - Check that the new DERSIG rules are enforced in the 751st version 3 + Check that the new DERSIG rules are enforced in the 76th version 3 block. ''' spendtx = self.create_transaction(self.nodes[0], @@ -141,9 +141,9 @@ def get_tests(self): self.last_block_time += 1 yield TestInstance([[block, False]]) - ''' Mine 199 new version blocks on last valid tip ''' + ''' Mine 19 new version blocks on last valid tip ''' test_blocks = [] - for i in xrange(199): + for i in xrange(19): block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1) block.nVersion = 3 block.rehash() diff --git a/qa/rpc-tests/bipdersig.py b/qa/rpc-tests/bipdersig.py deleted file mode 100755 index 243f816f65264..0000000000000 --- a/qa/rpc-tests/bipdersig.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python2 -# Copyright (c) 2014 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -# -# Test the BIP66 changeover logic -# - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -import os -import shutil - -class BIP66Test(BitcoinTestFramework): - - def setup_network(self): - self.nodes = [] - self.nodes.append(start_node(0, self.options.tmpdir, [])) - self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=2"])) - self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=3"])) - connect_nodes(self.nodes[1], 0) - connect_nodes(self.nodes[2], 0) - self.is_network_split = False - self.sync_all() - - def run_test(self): - cnt = self.nodes[0].getblockcount() - - # Mine some old-version blocks - self.nodes[1].generate(100) - self.sync_all() - if (self.nodes[0].getblockcount() != cnt + 100): - raise AssertionError("Failed to mine 100 version=2 blocks") - - # Mine 750 new-version blocks - for i in xrange(15): - self.nodes[2].generate(50) - self.sync_all() - if (self.nodes[0].getblockcount() != cnt + 850): - raise AssertionError("Failed to mine 750 version=3 blocks") - - # TODO: check that new DERSIG rules are not enforced - - # Mine 1 new-version block - self.nodes[2].generate(1) - self.sync_all() - if (self.nodes[0].getblockcount() != cnt + 851): - raise AssertionFailure("Failed to mine a version=3 blocks") - - # TODO: check that new DERSIG rules are enforced - - # Mine 198 new-version blocks - for i in xrange(2): - self.nodes[2].generate(99) - self.sync_all() - if (self.nodes[0].getblockcount() != cnt + 1049): - raise AssertionError("Failed to mine 198 version=3 blocks") - - # Mine 1 old-version block - self.nodes[1].generate(1) - self.sync_all() - if (self.nodes[0].getblockcount() != cnt + 1050): - raise AssertionError("Failed to mine a version=2 block after 949 version=3 blocks") - - # Mine 1 new-version blocks - self.nodes[2].generate(1) - self.sync_all() - if (self.nodes[0].getblockcount() != cnt + 1051): - raise AssertionError("Failed to mine a version=3 block") - - # Mine 1 old-version blocks - try: - self.nodes[1].generate(1) - raise AssertionError("Succeeded to mine a version=2 block after 950 version=3 blocks") - except JSONRPCException: - pass - self.sync_all() - if (self.nodes[0].getblockcount() != cnt + 1051): - raise AssertionError("Accepted a version=2 block after 950 version=3 blocks") - - # Mine 1 new-version blocks - self.nodes[2].generate(1) - self.sync_all() - if (self.nodes[0].getblockcount() != cnt + 1052): - raise AssertionError("Failed to mine a version=3 block") - -if __name__ == '__main__': - BIP66Test().main() diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 099714811716d..abe0049019d07 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -40,6 +40,7 @@ BITCOIN_TESTS =\ test/base58_tests.cpp \ test/base64_tests.cpp \ test/bip32_tests.cpp \ + test/block_size_tests.cpp \ test/bloom_tests.cpp \ test/checkblock_tests.cpp \ test/Checkpoints_tests.cpp \ @@ -59,6 +60,7 @@ BITCOIN_TESTS =\ test/pmt_tests.cpp \ test/policyestimator_tests.cpp \ test/pow_tests.cpp \ + test/ReceiveMsgBytes_tests.cpp \ test/rpc_tests.cpp \ test/sanity_tests.cpp \ test/scheduler_tests.cpp \ diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp index 45990f6bd8aa2..872a1b1888eff 100644 --- a/src/bitcoin-tx.cpp +++ b/src/bitcoin-tx.cpp @@ -187,7 +187,9 @@ static void MutateTxAddInput(CMutableTransaction& tx, const string& strInput) uint256 txid(uint256S(strTxid)); static const unsigned int minTxOutSz = 9; - static const unsigned int maxVout = MAX_BLOCK_SIZE / minTxOutSz; + // Don't know if the reduce max transaction size fork has activated yet or not; + // assume it has if after the earliest fork time. + unsigned int maxVout = Params().GetConsensus().MaxTransactionSize(GetTime(), GetTime()) / minTxOutSz; // extract and validate vout string strVout = strInput.substr(pos + 1, string::npos); diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 7785417518630..d34193378ac3a 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -53,6 +53,19 @@ class CMainParams : public CChainParams { nMinerThreads = 0; nPruneAfterHeight = 100000; + // Timestamps for forking consensus rule changes: + // Allow bigger blocks + // Limit transactions to 100,000 bytes + consensus.nEarliestSizeForkTime = 1452470400; // 11 Jan 2016 00:00:00 UTC + // 1MB max blocks before 11 Jan 2016 + // Then, if miner consensus: 8MB max, doubling every two years + consensus.nMaxSizePreFork = 1000*1000; // 1MB max pre-fork + consensus.nSizeDoubleEpoch = 60*60*24*365*2; // two years + consensus.nMaxSizeBase = 8*1000*1000; // 8MB + consensus.nMaxSizeDoublings = 10; + consensus.nActivateSizeForkMajority = 750; // 75% of hashpower to activate fork + consensus.nSizeForkGracePeriod = 60*60*24*14; // two week grace period after activation + /** * Build the genesis block. Note that the output of its generation * transaction cannot be spent since it did not originally exist in the @@ -150,6 +163,16 @@ class CTestNetParams : public CMainParams { nMinerThreads = 0; nPruneAfterHeight = 1000; + // 1MB max blocks before 1 Aug 2015 + // Then, if miner consensus: 8MB max, doubling every two years + consensus.nMaxSizePreFork = 1000*1000; // 1MB max pre-fork + consensus.nEarliestSizeForkTime = 1438387200; // 1 Aug 2015 00:00:00 UTC + consensus.nSizeDoubleEpoch = 60*60*24*365*2; // two years + consensus.nMaxSizeBase = 8*1000*1000; // 8MB + consensus.nMaxSizeDoublings = 10; + consensus.nActivateSizeForkMajority = 75; // 75 of 100 to activate fork + consensus.nSizeForkGracePeriod = 60*60*24; // 1-day grace period + //! Modify the testnet genesis block so the timestamp is valid for a later start. genesis.nTime = 1296688602; genesis.nNonce = 414098458; @@ -198,9 +221,10 @@ class CRegTestParams : public CTestNetParams { CRegTestParams() { strNetworkID = "regtest"; consensus.nSubsidyHalvingInterval = 150; - consensus.nMajorityEnforceBlockUpgrade = 750; - consensus.nMajorityRejectBlockOutdated = 950; - consensus.nMajorityWindow = 1000; + // Make forks on regtest the same as mainnet but 10x easier, to speed up the regression tests. + consensus.nMajorityEnforceBlockUpgrade = 75; + consensus.nMajorityRejectBlockOutdated = 95; + consensus.nMajorityWindow = 100; consensus.powLimit = uint256S("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); pchMessageStart[0] = 0xfa; pchMessageStart[1] = 0xbf; diff --git a/src/chainparams.h b/src/chainparams.h index 8044b553e19a2..91892428b5509 100644 --- a/src/chainparams.h +++ b/src/chainparams.h @@ -12,6 +12,7 @@ #include "primitives/block.h" #include "protocol.h" +#include #include struct CDNSSeedData { @@ -71,6 +72,7 @@ class CChainParams const std::vector& Base58Prefix(Base58Type type) const { return base58Prefixes[type]; } const std::vector& FixedSeeds() const { return vFixedSeeds; } const Checkpoints::CCheckpointData& Checkpoints() const { return checkpointData; } + protected: CChainParams() {} @@ -79,6 +81,7 @@ class CChainParams //! Raw pub key bytes for the broadcast alert signing key. std::vector vAlertPubKey; int nDefaultPort; + int nMinerThreads; uint64_t nPruneAfterHeight; std::vector vSeeds; diff --git a/src/consensus/params.h b/src/consensus/params.h index c480a1cce1983..28eac18b51b99 100644 --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -25,6 +25,46 @@ struct Params { int64_t nPowTargetSpacing; int64_t nPowTargetTimespan; int64_t DifficultyAdjustmentInterval() const { return nPowTargetTimespan / nPowTargetSpacing; } + + /** Maximum block size parameters */ + uint32_t nMaxSizePreFork; + uint64_t nEarliestSizeForkTime; + uint32_t nSizeDoubleEpoch; + uint64_t nMaxSizeBase; + uint8_t nMaxSizeDoublings; + int nActivateSizeForkMajority; + uint64_t nSizeForkGracePeriod; + + /** Maximum block size of a block with timestamp nBlockTimestamp */ + uint64_t MaxBlockSize(uint64_t nBlockTimestamp, uint64_t nSizeForkActivationTime) const { + if (nBlockTimestamp < nEarliestSizeForkTime || nBlockTimestamp < nSizeForkActivationTime) + return nMaxSizePreFork; + if (nBlockTimestamp >= nEarliestSizeForkTime + nSizeDoubleEpoch * nMaxSizeDoublings) + return nMaxSizeBase << nMaxSizeDoublings; + + // Piecewise-linear-between-doublings growth. Calculated based on a fixed + // timestamp and not the activation time so the maximum size is + // predictable, and so the activation time can be completely removed in + // a future version of this code after the fork is complete. + uint64_t timeDelta = nBlockTimestamp - nEarliestSizeForkTime; + uint64_t doublings = timeDelta / nSizeDoubleEpoch; + uint64_t remain = timeDelta % nSizeDoubleEpoch; + uint64_t interpolate = (nMaxSizeBase << doublings) * remain / nSizeDoubleEpoch; + uint64_t nMaxSize = (nMaxSizeBase << doublings) + interpolate; + return nMaxSize; + } + /** Maximum number of signature ops in a block with timestamp nBlockTimestamp */ + uint64_t MaxBlockSigops(uint64_t nBlockTimestamp, uint64_t nSizeForkActivationTime) const { + return MaxBlockSize(nBlockTimestamp, nSizeForkActivationTime)/50; + } + /** Maximum size of a transaction in a block with timestamp nBlockTimestamp */ + uint64_t MaxTransactionSize(uint64_t nBlockTimestamp, uint64_t nSizeForkActivationTime) const { + if (nBlockTimestamp < nEarliestSizeForkTime || nBlockTimestamp < nSizeForkActivationTime) + return nMaxSizePreFork; + return 100*1000; + } + int ActivateSizeForkMajority() const { return nActivateSizeForkMajority; } + uint64_t SizeForkGracePeriod() const { return nSizeForkGracePeriod; } }; } // namespace Consensus diff --git a/src/main.cpp b/src/main.cpp index 6c4cfe75aae38..cbb56d6ca5c52 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -27,6 +27,7 @@ #include #include +#include #include #include #include @@ -63,6 +64,8 @@ size_t nCoinCacheUsage = 5000 * 300; uint64_t nPruneTarget = 0; bool fAlerts = DEFAULT_ALERTS; +SizeForkTime sizeForkTime(std::numeric_limits::max()); + /** Fees smaller than this (in satoshi) are considered zero fee (for relaying and mining) */ CFeeRate minRelayTxFee = CFeeRate(1000); @@ -76,11 +79,19 @@ map mapOrphanTransactions; map > mapOrphanTransactionsByPrev; void EraseOrphansFor(NodeId peer); +static bool SanityCheckMessage(CNode* peer, const CNetMessage& msg); + /** - * Returns true if there are nRequired or more blocks of minVersion or above - * in the last Consensus::Params::nMajorityWindow blocks, starting at pstart and going backwards. + * Returns true if there are nRequired or more blocks with a version that matches + * versionOrBitmask in the last Consensus::Params::nMajorityWindow blocks, + * starting at pstart and going backwards. + * + * A bitmask is used to be compatible with Pieter Wuille's "Version bits" + * proposal, so it is possible for multiple forks to be in-progress + * at the same time. A simple >= version field is used for forks that + * predate this proposal. */ -static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams); +static bool IsSuperMajority(int versionOrBitmask, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams, bool useBitMask = true); static void CheckBlockIndex(); /** Constant stuff for coinbase transactions we create: */ @@ -237,16 +248,63 @@ struct CNodeState { } }; -/** Map maintaining per-node state. Requires cs_main. */ -map mapNodeState; +// Class that maintains per-node state, and +// acts as a RAII smart-pointer that make sure +// the state stays consistent. +class NodeStatePtr { +private: + static CCriticalSection cs_mapNodeState; + static map mapNodeState; + CNodeState* s; + NodeId id; +public: + static void insert(NodeId nodeid, const CNode *pnode) { + LOCK(cs_mapNodeState); + CNodeState &state = mapNodeState.insert(std::make_pair(nodeid, CNodeState())).first->second; + state.name = pnode->addrName; + state.address = pnode->addr; + } -// Requires cs_main. -CNodeState *State(NodeId pnode) { - map::iterator it = mapNodeState.find(pnode); - if (it == mapNodeState.end()) - return NULL; - return &it->second; -} + NodeStatePtr(NodeId nodeid) { + LOCK(cs_mapNodeState); + map::iterator it = mapNodeState.find(nodeid); + if (it == mapNodeState.end()) + s = NULL; + else { + s = &it->second; + id = nodeid; + cs_mapNodeState.lock(); + } + } + ~NodeStatePtr() { + if (s) + cs_mapNodeState.unlock(); + } + bool IsNull() const { return s == NULL; } + + CNodeState* operator ->() { return s; } + const CNodeState* operator ->() const { return s; } + + void erase() { + if (s) { + mapNodeState.erase(id); + s = NULL; + cs_mapNodeState.unlock(); + } + } + + static void clear() { + LOCK(cs_mapNodeState); + mapNodeState.clear(); + } + +private: + // disallow copy/assignment + NodeStatePtr(const NodeStatePtr&) {} + NodeStatePtr& operator=(const NodeStatePtr& p) { return *this; } +}; +CCriticalSection NodeStatePtr::cs_mapNodeState; +map NodeStatePtr::mapNodeState; int GetHeight() { @@ -254,7 +312,7 @@ int GetHeight() return chainActive.Height(); } -void UpdatePreferredDownload(CNode* node, CNodeState* state) +void UpdatePreferredDownload(CNode* node, NodeStatePtr& state) { nPreferredDownload -= state->fPreferredDownload; @@ -271,15 +329,12 @@ int64_t GetBlockTimeout(int64_t nTime, int nValidatedQueuedBefore, const Consens } void InitializeNode(NodeId nodeid, const CNode *pnode) { - LOCK(cs_main); - CNodeState &state = mapNodeState.insert(std::make_pair(nodeid, CNodeState())).first->second; - state.name = pnode->addrName; - state.address = pnode->addr; + NodeStatePtr::insert(nodeid, pnode); } void FinalizeNode(NodeId nodeid) { LOCK(cs_main); - CNodeState *state = State(nodeid); + NodeStatePtr state(nodeid); if (state->fSyncStarted) nSyncStarted--; @@ -293,15 +348,16 @@ void FinalizeNode(NodeId nodeid) { EraseOrphansFor(nodeid); nPreferredDownload -= state->fPreferredDownload; - mapNodeState.erase(nodeid); + state.erase(); } // Requires cs_main. // Returns a bool indicating whether we requested this block. bool MarkBlockAsReceived(const uint256& hash) { + AssertLockHeld(cs_main); map::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); if (itInFlight != mapBlocksInFlight.end()) { - CNodeState *state = State(itInFlight->second.first); + NodeStatePtr state(itInFlight->second.first); nQueuedValidatedHeaders -= itInFlight->second.second->fValidatedHeaders; state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders; state->vBlocksInFlight.erase(itInFlight->second.second); @@ -315,12 +371,14 @@ bool MarkBlockAsReceived(const uint256& hash) { // Requires cs_main. void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, CBlockIndex *pindex = NULL) { - CNodeState *state = State(nodeid); - assert(state != NULL); + AssertLockHeld(cs_main); // Make sure it's not listed somewhere already. MarkBlockAsReceived(hash); + NodeStatePtr state(nodeid); + assert(!state.IsNull()); + int64_t nNow = GetTimeMicros(); QueuedBlock newentry = {hash, pindex, nNow, pindex != NULL, GetBlockTimeout(nNow, nQueuedValidatedHeaders, consensusParams)}; nQueuedValidatedHeaders += newentry.fValidatedHeaders; @@ -331,9 +389,8 @@ void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Pa } /** Check whether the last unknown block a peer advertized is not yet known. */ -void ProcessBlockAvailability(NodeId nodeid) { - CNodeState *state = State(nodeid); - assert(state != NULL); +static void ProcessBlockAvailability(NodeStatePtr& state) { + AssertLockHeld(cs_main); if (!state->hashLastUnknownBlock.IsNull()) { BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock); @@ -347,10 +404,11 @@ void ProcessBlockAvailability(NodeId nodeid) { /** Update tracking information about which blocks a peer is assumed to have. */ void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) { - CNodeState *state = State(nodeid); - assert(state != NULL); + AssertLockHeld(cs_main); + NodeStatePtr state(nodeid); + assert(!state.IsNull()); - ProcessBlockAvailability(nodeid); + ProcessBlockAvailability(state); BlockMap::iterator it = mapBlockIndex.find(hash); if (it != mapBlockIndex.end() && it->second->nChainWork > 0) { @@ -388,12 +446,13 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vectorpindexBestKnownBlock == NULL || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork) { // This peer has nothing interesting. @@ -469,9 +528,8 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vectornMisbehavior; stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1; @@ -486,6 +544,7 @@ bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) { void RegisterNodeSignals(CNodeSignals& nodeSignals) { nodeSignals.GetHeight.connect(&GetHeight); + nodeSignals.SanityCheckMessages.connect(&SanityCheckMessage); nodeSignals.ProcessMessages.connect(&ProcessMessages); nodeSignals.SendMessages.connect(&SendMessages); nodeSignals.InitializeNode.connect(&InitializeNode); @@ -495,6 +554,7 @@ void RegisterNodeSignals(CNodeSignals& nodeSignals) void UnregisterNodeSignals(CNodeSignals& nodeSignals) { nodeSignals.GetHeight.disconnect(&GetHeight); + nodeSignals.SanityCheckMessages.disconnect(&SanityCheckMessage); nodeSignals.ProcessMessages.disconnect(&ProcessMessages); nodeSignals.SendMessages.disconnect(&SendMessages); nodeSignals.InitializeNode.disconnect(&InitializeNode); @@ -796,7 +856,7 @@ unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& in -bool CheckTransaction(const CTransaction& tx, CValidationState &state) +bool CheckTransaction(const CTransaction& tx, CValidationState &state, uint64_t nMaxTxSize) { // Basic checks that don't depend on any context if (tx.vin.empty()) @@ -806,7 +866,8 @@ bool CheckTransaction(const CTransaction& tx, CValidationState &state) return state.DoS(10, error("CheckTransaction(): vout empty"), REJECT_INVALID, "bad-txns-vout-empty"); // Size limits - if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE) + size_t txSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION); + if (txSize > nMaxTxSize) return state.DoS(100, error("CheckTransaction(): size limits failed"), REJECT_INVALID, "bad-txns-oversize"); @@ -890,7 +951,7 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa if (pfMissingInputs) *pfMissingInputs = false; - if (!CheckTransaction(tx, state)) + if (!CheckTransaction(tx, state, Params().GetConsensus().MaxTransactionSize(GetAdjustedTime(), sizeForkTime.load()))) return error("AcceptToMemoryPool: CheckTransaction failed"); // Coinbase is only valid in a block, not as a loose transaction @@ -1302,14 +1363,13 @@ void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip) CheckForkWarningConditions(); } -// Requires cs_main. void Misbehaving(NodeId pnode, int howmuch) { if (howmuch == 0) return; - CNodeState *state = State(pnode); - if (state == NULL) + NodeStatePtr state(pnode); + if (state.IsNull()) return; state->nMisbehavior += howmuch; @@ -1341,9 +1401,10 @@ void static InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state int nDoS = 0; if (state.IsInvalid(nDoS)) { std::map::iterator it = mapBlockSource.find(pindex->GetBlockHash()); - if (it != mapBlockSource.end() && State(it->second)) { + NodeStatePtr nodeState(it->second); + if (it != mapBlockSource.end() && !nodeState.IsNull()) { CBlockReject reject = {state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), pindex->GetBlockHash()}; - State(it->second)->rejects.push_back(reject); + nodeState->rejects.push_back(reject); if (nDoS > 0) Misbehaving(it->second, nDoS); } @@ -1788,6 +1849,12 @@ static int64_t nTimeIndex = 0; static int64_t nTimeCallbacks = 0; static int64_t nTimeTotal = 0; +static bool DidBlockTriggerSizeFork(const CBlock &block, const CBlockIndex *pindex, const CChainParams &chainparams) { + return (block.nVersion & SIZE_FORK_VERSION) && + (pblocktree->ForkActivated(SIZE_FORK_VERSION) == uint256()) && + IsSuperMajority(SIZE_FORK_VERSION, pindex, chainparams.GetConsensus().ActivateSizeForkMajority(), chainparams.GetConsensus(), true /* use bitmask */); +} + bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& view, bool fJustCheck) { const CChainParams& chainparams = Params(); @@ -1870,7 +1937,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin nInputs += tx.vin.size(); nSigOps += GetLegacySigOpCount(tx); - if (nSigOps > MAX_BLOCK_SIGOPS) + if (nSigOps > chainparams.GetConsensus().MaxBlockSigops(block.GetBlockTime(), sizeForkTime.load())) return state.DoS(100, error("ConnectBlock(): too many sigops"), REJECT_INVALID, "bad-blk-sigops"); @@ -1886,7 +1953,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin // this is to prevent a "rogue miner" from creating // an incredibly-expensive-to-validate block. nSigOps += GetP2SHSigOpCount(tx, view); - if (nSigOps > MAX_BLOCK_SIGOPS) + if (nSigOps > chainparams.GetConsensus().MaxBlockSigops(block.GetBlockTime(), sizeForkTime.load())) return state.DoS(100, error("ConnectBlock(): too many sigops"), REJECT_INVALID, "bad-blk-sigops"); } @@ -1963,6 +2030,14 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin int64_t nTime4 = GetTimeMicros(); nTimeCallbacks += nTime4 - nTime3; LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime4 - nTime3), nTimeCallbacks * 0.000001); + if (DidBlockTriggerSizeFork(block, pindex, chainparams)) { + uint64_t tAllowBigger = block.nTime + chainparams.GetConsensus().SizeForkGracePeriod(); + LogPrintf("%s: Max block size fork activating at time %d, bigger blocks allowed at time %d\n", + __func__, block.nTime, tAllowBigger); + pblocktree->ActivateFork(SIZE_FORK_VERSION, pindex->GetBlockHash()); + sizeForkTime.store(tAllowBigger); + } + return true; } @@ -2157,6 +2232,14 @@ bool static DisconnectTip(CValidationState &state) { } mempool.removeCoinbaseSpends(pcoinsTip, pindexDelete->nHeight); mempool.check(pcoinsTip); + + // Re-org past the size fork, reset activation condition: + if (pblocktree->ForkActivated(SIZE_FORK_VERSION) == pindexDelete->GetBlockHash()) { + LogPrintf("%s: re-org past size fork\n", __func__); + pblocktree->ActivateFork(SIZE_FORK_VERSION, uint256()); + sizeForkTime.store(std::numeric_limits::max()); + } + // Update chainActive and related variables. UpdateTip(pindexDelete->pprev); // Let wallets know transactions went from 1-confirmed to @@ -2589,7 +2672,7 @@ bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAdd } if (!fKnown) { - while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) { + while (vinfoBlockFile[nFile].nSize + nAddSize >= Params().GetConsensus().MaxBlockSize(nTime, sizeForkTime.load())*MIN_BLOCKFILE_BLOCKS) { LogPrintf("Leaving block file %i: %s\n", nFile, vinfoBlockFile[nFile].ToString()); FlushBlockFile(true); nFile++; @@ -2707,7 +2790,11 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bo // because we receive the wrong transactions for it. // Size limits - if (block.vtx.empty() || block.vtx.size() > MAX_BLOCK_SIZE || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE) + uint64_t nMaxBlockSize = Params().GetConsensus().MaxBlockSize(block.GetBlockTime(), sizeForkTime.load()); + uint64_t nMaxTxSize = Params().GetConsensus().MaxTransactionSize(block.GetBlockTime(), sizeForkTime.load()); + if (block.vtx.empty() || + block.vtx.size()*MIN_TRANSACTION_SIZE > nMaxBlockSize || + ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION) > nMaxBlockSize) return state.DoS(100, error("CheckBlock(): size limits failed"), REJECT_INVALID, "bad-blk-length"); @@ -2722,7 +2809,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bo // Check transactions BOOST_FOREACH(const CTransaction& tx, block.vtx) - if (!CheckTransaction(tx, state)) + if (!CheckTransaction(tx, state, nMaxTxSize)) return error("CheckBlock(): CheckTransaction failed"); unsigned int nSigOps = 0; @@ -2730,7 +2817,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bo { nSigOps += GetLegacySigOpCount(tx); } - if (nSigOps > MAX_BLOCK_SIGOPS) + if (nSigOps > Params().GetConsensus().MaxBlockSigops(block.GetBlockTime(), sizeForkTime.load())) return state.DoS(100, error("CheckBlock(): out-of-bounds SigOpCount"), REJECT_INVALID, "bad-blk-sigops", true); @@ -2907,12 +2994,13 @@ bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex, return true; } -static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams) +static bool IsSuperMajority(int versionOrBitmask, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams, bool useBitMask) { unsigned int nFound = 0; for (int i = 0; i < consensusParams.nMajorityWindow && nFound < nRequired && pstart != NULL; i++) { - if (pstart->nVersion >= minVersion) + if ((useBitMask && ((pstart->nVersion & versionOrBitmask) == versionOrBitmask)) || + (!useBitMask && (pstart->nVersion >= versionOrBitmask))) ++nFound; pstart = pstart->pprev; } @@ -3155,6 +3243,15 @@ bool static LoadBlockIndexDB() if (!pblocktree->LoadBlockIndexGuts()) return false; + // If the max-block-size fork threshold was reached, update + // chainparams so big blocks are allowed: + uint256 sizeForkHash = pblocktree->ForkActivated(SIZE_FORK_VERSION); + if (sizeForkHash != uint256()) { + BlockMap::iterator it = mapBlockIndex.find(sizeForkHash); + assert(it != mapBlockIndex.end()); + sizeForkTime.store(it->second->GetBlockTime() + chainparams.GetConsensus().SizeForkGracePeriod()); + } + boost::this_thread::interruption_point(); // Calculate nChainWork @@ -3368,7 +3465,7 @@ void UnloadBlockIndex() nPreferredDownload = 0; setDirtyBlockIndex.clear(); setDirtyFileInfo.clear(); - mapNodeState.clear(); + NodeStatePtr::clear(); BOOST_FOREACH(BlockMap::value_type& entry, mapBlockIndex) { delete entry.second; @@ -3437,7 +3534,8 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp) int nLoaded = 0; try { // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor - CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SIZE, MAX_BLOCK_SIZE+8, SER_DISK, CLIENT_VERSION); + uint64_t nMaxBlocksize = chainparams.GetConsensus().MaxBlockSize(GetAdjustedTime(), sizeForkTime.load()); + CBufferedFile blkdat(fileIn, 2*nMaxBlocksize, nMaxBlocksize+8, SER_DISK, CLIENT_VERSION); uint64_t nRewind = blkdat.GetPos(); while (!blkdat.eof()) { boost::this_thread::interruption_point(); @@ -3456,7 +3554,7 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp) continue; // read size blkdat >> nSize; - if (nSize < 80 || nSize > MAX_BLOCK_SIZE) + if (nSize < 80 || nSize > nMaxBlocksize) continue; } catch (const std::exception&) { // no valid block header found; don't complain @@ -3784,6 +3882,37 @@ std::string GetWarnings(const std::string& strFor) // Messages // +static std::map maxMessageSizes = boost::assign::map_list_of + ("getaddr",0) + ("mempool",0) + ("ping",8) + ("pong",8) + ("verack", 0) + ; + +bool static SanityCheckMessage(CNode* peer, const CNetMessage& msg) +{ + const std::string& strCommand = msg.hdr.GetCommand(); + if (strCommand == "block") { + uint64_t maxSize = Params().GetConsensus().MaxBlockSize(GetAdjustedTime() + 2 * 60 * 60, sizeForkTime.load()); + if (msg.hdr.nMessageSize > maxSize) { + LogPrint("net", "Oversized %s message from peer=%i\n", SanitizeString(strCommand), peer->GetId()); + return false; + } + } + else if (msg.hdr.nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH || + (maxMessageSizes.count(strCommand) && msg.hdr.nMessageSize > maxMessageSizes[strCommand])) { + LogPrint("net", "Oversized %s message from peer=%i (%d bytes)\n", + SanitizeString(strCommand), peer->GetId(), msg.hdr.nMessageSize); + Misbehaving(peer->GetId(), 20); + return msg.hdr.nMessageSize <= MAX_PROTOCOL_MESSAGE_LENGTH; + } + // This would be a good place for more sophisticated DoS detection/prevention. + // (e.g. disconnect a peer that is flooding us with excessive messages) + + return true; +} + bool static AlreadyHave(const CInv& inv) { @@ -4011,7 +4140,10 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, pfrom->fClient = !(pfrom->nServices & NODE_NETWORK); // Potentially mark this peer as a preferred download peer. - UpdatePreferredDownload(pfrom, State(pfrom->GetId())); + { + NodeStatePtr nodeState(pfrom->GetId()); + UpdatePreferredDownload(pfrom, nodeState); + } // Change version pfrom->PushMessage("verack"); @@ -4085,8 +4217,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, // Mark this node as currently connected, so we update its timestamp later. if (pfrom->fNetworkNode) { - LOCK(cs_main); - State(pfrom->GetId())->fCurrentlyConnected = true; + NodeStatePtr(pfrom->GetId())->fCurrentlyConnected = true; } } @@ -4197,7 +4328,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, // doing this will result in the received block being rejected as an orphan in case it is // not a direct successor. pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexBestHeader), inv.hash); - CNodeState *nodestate = State(pfrom->GetId()); + NodeStatePtr nodestate(pfrom->GetId()); if (chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - chainparams.GetConsensus().nPowTargetSpacing * 20 && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { vToFetch.push_back(inv); @@ -4949,8 +5080,8 @@ bool SendMessages(CNode* pto, bool fSendTrickle) pto->PushMessage("addr", vAddr); } - CNodeState &state = *State(pto->GetId()); - if (state.fShouldBan) { + NodeStatePtr statePtr(pto->GetId()); + if (statePtr->fShouldBan) { if (pto->fWhitelisted) LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto->addr.ToString()); else { @@ -4962,21 +5093,21 @@ bool SendMessages(CNode* pto, bool fSendTrickle) CNode::Ban(pto->addr); } } - state.fShouldBan = false; + statePtr->fShouldBan = false; } - BOOST_FOREACH(const CBlockReject& reject, state.rejects) + BOOST_FOREACH(const CBlockReject& reject, statePtr->rejects) pto->PushMessage("reject", (string)"block", reject.chRejectCode, reject.strRejectReason, reject.hashBlock); - state.rejects.clear(); + statePtr->rejects.clear(); // Start block sync if (pindexBestHeader == NULL) pindexBestHeader = chainActive.Tip(); - bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do. - if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) { + bool fFetch = statePtr->fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do. + if (!statePtr->fSyncStarted && !pto->fClient && !fImporting && !fReindex) { // Only actively request headers from a single peer, unless we're close to today. if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) { - state.fSyncStarted = true; + statePtr->fSyncStarted = true; nSyncStarted++; CBlockIndex *pindexStart = pindexBestHeader->pprev ? pindexBestHeader->pprev : pindexBestHeader; LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight); @@ -5042,7 +5173,7 @@ bool SendMessages(CNode* pto, bool fSendTrickle) // Detect whether we're stalling int64_t nNow = GetTimeMicros(); - if (!pto->fDisconnect && state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) { + if (!pto->fDisconnect && statePtr->nStallingSince && statePtr->nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) { // Stalling only triggers when the block download window cannot move. During normal steady state, // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection // should only happen during initial block download. @@ -5059,9 +5190,9 @@ bool SendMessages(CNode* pto, bool fSendTrickle) // only looking at this peer's oldest request). This way a large queue in the past doesn't result in a // permanently large window for this block to be delivered (ie if the number of blocks in flight is decreasing // more quickly than once every 5 minutes, then we'll shorten the download window for this block). - if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0) { - QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); - int64_t nTimeoutIfRequestedNow = GetBlockTimeout(nNow, nQueuedValidatedHeaders - state.nBlocksInFlightValidHeaders, consensusParams); + if (!pto->fDisconnect && statePtr->vBlocksInFlight.size() > 0) { + QueuedBlock &queuedBlock = statePtr->vBlocksInFlight.front(); + int64_t nTimeoutIfRequestedNow = GetBlockTimeout(nNow, nQueuedValidatedHeaders - statePtr->nBlocksInFlightValidHeaders, consensusParams); if (queuedBlock.nTimeDisconnect > nTimeoutIfRequestedNow) { LogPrint("net", "Reducing block download timeout for peer=%d block=%s, orig=%d new=%d\n", pto->id, queuedBlock.hash.ToString(), queuedBlock.nTimeDisconnect, nTimeoutIfRequestedNow); queuedBlock.nTimeDisconnect = nTimeoutIfRequestedNow; @@ -5076,19 +5207,19 @@ bool SendMessages(CNode* pto, bool fSendTrickle) // Message: getdata (blocks) // vector vGetData; - if (!pto->fDisconnect && !pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + if (!pto->fDisconnect && !pto->fClient && (fFetch || !IsInitialBlockDownload()) && statePtr->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { vector vToDownload; NodeId staller = -1; - FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller); + FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - statePtr->nBlocksInFlight, vToDownload, staller); BOOST_FOREACH(CBlockIndex *pindex, vToDownload) { vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash())); MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex); LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), pindex->nHeight, pto->id); } - if (state.nBlocksInFlight == 0 && staller != -1) { - if (State(staller)->nStallingSince == 0) { - State(staller)->nStallingSince = nNow; + if (statePtr->nBlocksInFlight == 0 && staller != -1) { + if (NodeStatePtr(staller)->nStallingSince == 0) { + NodeStatePtr(staller)->nStallingSince = nNow; LogPrint("net", "Stall started peer=%d\n", staller); } } @@ -5124,6 +5255,20 @@ bool SendMessages(CNode* pto, bool fSendTrickle) return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, DateTimeStrFormat("%Y-%m-%d", nTimeFirst), DateTimeStrFormat("%Y-%m-%d", nTimeLast)); } +SizeForkTime::SizeForkTime(uint64_t _t) +{ + t = _t; +} +uint64_t SizeForkTime::load() const +{ + LOCK(cs); + return t; +} +void SizeForkTime::store(uint64_t _t) +{ + LOCK(cs); + t = _t; +} class CMainCleanup diff --git a/src/main.h b/src/main.h index 4e2efaada0d49..cb3d6583694bb 100644 --- a/src/main.h +++ b/src/main.h @@ -54,16 +54,18 @@ static const unsigned int DEFAULT_BLOCK_MIN_SIZE = 0; static const unsigned int DEFAULT_BLOCK_PRIORITY_SIZE = 50000; /** Default for accepting alerts from the P2P network. */ static const bool DEFAULT_ALERTS = true; +/** Smallest possible serialized transaction, in bytes */ +static const unsigned int MIN_TRANSACTION_SIZE = 60; /** The maximum size for transactions we're willing to relay/mine */ static const unsigned int MAX_STANDARD_TX_SIZE = 100000; /** Maximum number of signature check operations in an IsStandard() P2SH script */ static const unsigned int MAX_P2SH_SIGOPS = 15; /** The maximum number of sigops we're willing to relay/mine in a single tx */ -static const unsigned int MAX_STANDARD_TX_SIGOPS = MAX_BLOCK_SIGOPS/5; +static const unsigned int MAX_STANDARD_TX_SIGOPS = MAX_STANDARD_TX_SIZE/25; // one sigop per 25 bytes /** Default for -maxorphantx, maximum number of orphan transactions kept in memory */ static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS = 100; -/** The maximum size of a blk?????.dat file (since 0.8) */ -static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB +/** Minimum number of max-sized blocks in blk?????.dat files */ +static const unsigned int MIN_BLOCKFILE_BLOCKS = 128; /** The pre-allocation chunk size for blk?????.dat files (since 0.8) */ static const unsigned int BLOCKFILE_CHUNK_SIZE = 0x1000000; // 16 MiB /** The pre-allocation chunk size for rev?????.dat files (since 0.8) */ @@ -90,6 +92,8 @@ static const unsigned int DATABASE_WRITE_INTERVAL = 60 * 60; static const unsigned int DATABASE_FLUSH_INTERVAL = 24 * 60 * 60; /** Maximum length of reject messages. */ static const unsigned int MAX_REJECT_MESSAGE_LENGTH = 111; +/** Maximum length of incoming protocol messages (no message over 2 MiB is currently acceptable). */ +static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH = 2 * 1024 * 1024; struct BlockHasher { @@ -321,7 +325,7 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi void UpdateCoins(const CTransaction& tx, CValidationState &state, CCoinsViewCache &inputs, int nHeight); /** Context-independent validity checks */ -bool CheckTransaction(const CTransaction& tx, CValidationState& state); +bool CheckTransaction(const CTransaction& tx, CValidationState& state, uint64_t nMaxTransactionSize); /** Check for standard transaction types * @return True if all outputs (scriptPubKeys) use only standard transaction forms @@ -497,4 +501,20 @@ extern CBlockTreeDB *pblocktree; */ int GetSpendHeight(const CCoinsViewCache& inputs); +// Time when bigger-than-1MB-blocks are allowed +class SizeForkTime { +public: + SizeForkTime(uint64_t _t); + + // Same interface as std::atomic -- when c++11 is supported, + // this class can go away and sizeForkTime can just be type + // std::atomic + uint64_t load() const; + void store(uint64_t _t); +private: + mutable CCriticalSection cs; + uint64_t t; +}; +extern SizeForkTime sizeForkTime; + #endif // BITCOIN_MAIN_H diff --git a/src/merkleblock.cpp b/src/merkleblock.cpp index 4d90fd8cd7dfe..c6f711448bf53 100644 --- a/src/merkleblock.cpp +++ b/src/merkleblock.cpp @@ -147,13 +147,13 @@ CPartialMerkleTree::CPartialMerkleTree(const std::vector &vTxid, const CPartialMerkleTree::CPartialMerkleTree() : nTransactions(0), fBad(true) {} -uint256 CPartialMerkleTree::ExtractMatches(std::vector &vMatch) { +uint256 CPartialMerkleTree::ExtractMatches(uint64_t nMaxTransactions, std::vector &vMatch) { vMatch.clear(); // An empty set will not work if (nTransactions == 0) return uint256(); // check for excessively high numbers of transactions - if (nTransactions > MAX_BLOCK_SIZE / 60) // 60 is the lower bound for the size of a serialized CTransaction + if (nTransactions > nMaxTransactions) return uint256(); // there can never be more hashes provided than one for every txid if (vHash.size() > nTransactions) diff --git a/src/merkleblock.h b/src/merkleblock.h index 904c22abc2b94..5dff7b60ac823 100644 --- a/src/merkleblock.h +++ b/src/merkleblock.h @@ -113,7 +113,7 @@ class CPartialMerkleTree * extract the matching txid's represented by this partial merkle tree. * returns the merkle root, or 0 in case of failure */ - uint256 ExtractMatches(std::vector &vMatch); + uint256 ExtractMatches(uint64_t nMaxTransactions, std::vector &vMatch); }; @@ -144,6 +144,8 @@ class CMerkleBlock CMerkleBlock() {} + int64_t GetBlockTime() { return header.GetBlockTime(); } + ADD_SERIALIZE_METHODS; template diff --git a/src/miner.cpp b/src/miner.cpp index 2c5cba742c674..899d24ee31283 100644 --- a/src/miner.cpp +++ b/src/miner.cpp @@ -114,21 +114,6 @@ CBlockTemplate* CreateNewBlock(const CScript& scriptPubKeyIn) pblocktemplate->vTxFees.push_back(-1); // updated at end pblocktemplate->vTxSigOps.push_back(-1); // updated at end - // Largest block you're willing to create: - unsigned int nBlockMaxSize = GetArg("-blockmaxsize", DEFAULT_BLOCK_MAX_SIZE); - // Limit to betweeen 1K and MAX_BLOCK_SIZE-1K for sanity: - nBlockMaxSize = std::max((unsigned int)1000, std::min((unsigned int)(MAX_BLOCK_SIZE-1000), nBlockMaxSize)); - - // How much of the block should be dedicated to high-priority transactions, - // included regardless of the fees they pay - unsigned int nBlockPrioritySize = GetArg("-blockprioritysize", DEFAULT_BLOCK_PRIORITY_SIZE); - nBlockPrioritySize = std::min(nBlockMaxSize, nBlockPrioritySize); - - // Minimum block size you want to create; block will be filled with free transactions - // until there are no more or the block reaches this size: - unsigned int nBlockMinSize = GetArg("-blockminsize", DEFAULT_BLOCK_MIN_SIZE); - nBlockMinSize = std::min(nBlockMaxSize, nBlockMinSize); - // Collect memory pool transactions into the block CAmount nFees = 0; @@ -139,6 +124,26 @@ CBlockTemplate* CreateNewBlock(const CScript& scriptPubKeyIn) pblock->nTime = GetAdjustedTime(); CCoinsViewCache view(pcoinsTip); + UpdateTime(pblock, Params().GetConsensus(), pindexPrev); + uint64_t nBlockTime = pblock->GetBlockTime(); + + // Largest block you're willing to create: + uint64_t nBlockMaxSize = GetArg("-blockmaxsize", DEFAULT_BLOCK_MAX_SIZE); + uint64_t nConsensusMaxSize = chainparams.GetConsensus().MaxBlockSize(nBlockTime, sizeForkTime.load()); + // Limit to betweeen 1K and MAX_BLOCK_SIZE-1K for sanity: + nBlockMaxSize = std::max((uint64_t)1000, + std::min(nConsensusMaxSize-1000, nBlockMaxSize)); + + // How much of the block should be dedicated to high-priority transactions, + // included regardless of the fees they pay + uint64_t nBlockPrioritySize = GetArg("-blockprioritysize", DEFAULT_BLOCK_PRIORITY_SIZE); + nBlockPrioritySize = std::min(nBlockMaxSize, nBlockPrioritySize); + + // Minimum block size you want to create; block will be filled with free transactions + // until there are no more or the block reaches this size: + uint64_t nBlockMinSize = GetArg("-blockminsize", DEFAULT_BLOCK_MIN_SIZE); + nBlockMinSize = std::min(nBlockMaxSize, nBlockMinSize); + // Priority order to process transactions list vOrphan; // list memory doesn't move map > mapDependers; @@ -244,7 +249,7 @@ CBlockTemplate* CreateNewBlock(const CScript& scriptPubKeyIn) // Legacy limits on sigOps: unsigned int nTxSigOps = GetLegacySigOpCount(tx); - if (nBlockSigOps + nTxSigOps >= MAX_BLOCK_SIGOPS) + if (nBlockSigOps + nTxSigOps >= chainparams.GetConsensus().MaxBlockSigops(nBlockTime, sizeForkTime.load())) continue; // Skip free transactions if we're past the minimum block size: @@ -271,7 +276,7 @@ CBlockTemplate* CreateNewBlock(const CScript& scriptPubKeyIn) CAmount nTxFees = view.GetValueIn(tx)-tx.GetValueOut(); nTxSigOps += GetP2SHSigOpCount(tx, view); - if (nBlockSigOps + nTxSigOps >= MAX_BLOCK_SIGOPS) + if (nBlockSigOps + nTxSigOps >= chainparams.GetConsensus().MaxBlockSigops(nBlockTime, sizeForkTime.load())) continue; // Note that flags: we don't want to set mempool/IsStandard() diff --git a/src/net.cpp b/src/net.cpp index 0511256e55b4f..90e0d37759d2d 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -595,12 +595,10 @@ bool CNode::ReceiveMsgBytes(const char *pch, unsigned int nBytes) handled = msg.readData(pch, nBytes); if (handled < 0) - return false; + return false; - if (msg.in_data && msg.hdr.nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH) { - LogPrint("net", "Oversized message from peer=%i, disconnecting", GetId()); + if (msg.in_data && !g_signals.SanityCheckMessages(this, boost::ref(msg))) return false; - } pch += handled; nBytes -= handled; @@ -614,6 +612,22 @@ bool CNode::ReceiveMsgBytes(const char *pch, unsigned int nBytes) return true; } +unsigned int CNetMessage::FinalizeHeader(CDataStream& s) +{ + // Set the size + unsigned int nSize = s.size() - CMessageHeader::HEADER_SIZE; + WriteLE32((uint8_t*)&s[CMessageHeader::MESSAGE_SIZE_OFFSET], nSize); + + // Set the checksum + uint256 hash = Hash(s.begin() + CMessageHeader::HEADER_SIZE, s.end()); + unsigned int nChecksum = 0; + memcpy(&nChecksum, &hash, sizeof(nChecksum)); + assert(s.size () >= CMessageHeader::CHECKSUM_OFFSET + sizeof(nChecksum)); + memcpy((char*)&s[CMessageHeader::CHECKSUM_OFFSET], &nChecksum, sizeof(nChecksum)); + + return nSize; +} + int CNetMessage::readHeader(const char *pch, unsigned int nBytes) { // copy data to temporary parsing buffer @@ -2084,16 +2098,7 @@ void CNode::EndMessage() UNLOCK_FUNCTION(cs_vSend) if (ssSend.size() == 0) return; - // Set the size - unsigned int nSize = ssSend.size() - CMessageHeader::HEADER_SIZE; - WriteLE32((uint8_t*)&ssSend[CMessageHeader::MESSAGE_SIZE_OFFSET], nSize); - - // Set the checksum - uint256 hash = Hash(ssSend.begin() + CMessageHeader::HEADER_SIZE, ssSend.end()); - unsigned int nChecksum = 0; - memcpy(&nChecksum, &hash, sizeof(nChecksum)); - assert(ssSend.size () >= CMessageHeader::CHECKSUM_OFFSET + sizeof(nChecksum)); - memcpy((char*)&ssSend[CMessageHeader::CHECKSUM_OFFSET], &nChecksum, sizeof(nChecksum)); + unsigned int nSize = CNetMessage::FinalizeHeader(ssSend); LogPrint("net", "(%d bytes) peer=%d\n", nSize, id); diff --git a/src/net.h b/src/net.h index 69e4c592a94f8..49215c60c8fe4 100644 --- a/src/net.h +++ b/src/net.h @@ -33,6 +33,7 @@ class CAddrMan; class CBlockIndex; class CScheduler; +class CNetMessage; class CNode; namespace boost { @@ -47,8 +48,6 @@ static const int TIMEOUT_INTERVAL = 20 * 60; static const unsigned int MAX_INV_SZ = 50000; /** The maximum number of new addresses to accumulate before announcing. */ static const unsigned int MAX_ADDR_TO_SEND = 1000; -/** Maximum length of incoming protocol messages (no message over 2 MiB is currently acceptable). */ -static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH = 2 * 1024 * 1024; /** -listen default */ static const bool DEFAULT_LISTEN = true; /** -upnp default */ @@ -95,10 +94,14 @@ struct CombinerAll } }; -// Signals for message handling +// Signals are used to communicate with higher-level code. struct CNodeSignals { boost::signals2::signal GetHeight; + // register a handler for this signal to do sanity checks as the bytes of a message are being + // received. Note that the message may not be completely read (so this can be + // used to prevent DoS attacks using over-size messages). + boost::signals2::signal SanityCheckMessages; boost::signals2::signal ProcessMessages; boost::signals2::signal SendMessages; boost::signals2::signal InitializeNode; @@ -210,6 +213,9 @@ class CNetMessage { nTime = 0; } + // Called by CNode::EndMessage() and unit tests: modify stream to set size/checksum of header + static unsigned int FinalizeHeader(CDataStream& s); + bool complete() const { if (!in_data) diff --git a/src/primitives/block.h b/src/primitives/block.h index 59f46deb1c420..760109a993cea 100644 --- a/src/primitives/block.h +++ b/src/primitives/block.h @@ -10,6 +10,9 @@ #include "serialize.h" #include "uint256.h" +/** Blocks with version fields that have these bits set activate the bigger-block fork */ +const unsigned int SIZE_FORK_VERSION = 0x20000004; + /** Nodes collect new transactions into a block, hash them into a hash tree, * and scan through nonce values to make the block's hash satisfy proof-of-work * requirements. When they solve the proof-of-work, they broadcast the block @@ -21,7 +24,7 @@ class CBlockHeader { public: // header - static const int32_t CURRENT_VERSION=3; + static const int32_t CURRENT_VERSION=SIZE_FORK_VERSION; int32_t nVersion; uint256 hashPrevBlock; uint256 hashMerkleRoot; diff --git a/src/rpcmining.cpp b/src/rpcmining.cpp index 9c6fb10af08ed..2985eb3c4788c 100644 --- a/src/rpcmining.cpp +++ b/src/rpcmining.cpp @@ -364,6 +364,7 @@ UniValue getblocktemplate(const UniValue& params, bool fHelp) " \"noncerange\" : \"00000000ffffffff\", (string) A range of valid nonces\n" " \"sigoplimit\" : n, (numeric) limit of sigops in blocks\n" " \"sizelimit\" : n, (numeric) limit of block size\n" + " \"txsizelimit\" : n, (numeric) limit of transaction size\n" " \"curtime\" : ttt, (numeric) current timestamp in seconds since epoch (Jan 1 1970 GMT)\n" " \"bits\" : \"xxx\", (string) compressed target of next block\n" " \"height\" : n (numeric) The height of the next block\n" @@ -562,6 +563,7 @@ UniValue getblocktemplate(const UniValue& params, bool fHelp) } UniValue result(UniValue::VOBJ); + int64_t nBlockTime = pblock->GetBlockTime(); result.push_back(Pair("capabilities", aCaps)); result.push_back(Pair("version", pblock->nVersion)); result.push_back(Pair("previousblockhash", pblock->hashPrevBlock.GetHex())); @@ -573,9 +575,10 @@ UniValue getblocktemplate(const UniValue& params, bool fHelp) result.push_back(Pair("mintime", (int64_t)pindexPrev->GetMedianTimePast()+1)); result.push_back(Pair("mutable", aMutable)); result.push_back(Pair("noncerange", "00000000ffffffff")); - result.push_back(Pair("sigoplimit", (int64_t)MAX_BLOCK_SIGOPS)); - result.push_back(Pair("sizelimit", (int64_t)MAX_BLOCK_SIZE)); - result.push_back(Pair("curtime", pblock->GetBlockTime())); + result.push_back(Pair("sigoplimit", Params().GetConsensus().MaxBlockSigops(nBlockTime, sizeForkTime.load()))); + result.push_back(Pair("sizelimit", Params().GetConsensus().MaxBlockSize(nBlockTime, sizeForkTime.load()))); + result.push_back(Pair("txsizelimit", Params().GetConsensus().MaxTransactionSize(nBlockTime, sizeForkTime.load()))); + result.push_back(Pair("curtime", nBlockTime)); result.push_back(Pair("bits", strprintf("%08x", pblock->nBits))); result.push_back(Pair("height", (int64_t)(pindexPrev->nHeight+1))); diff --git a/src/rpcrawtransaction.cpp b/src/rpcrawtransaction.cpp index 20394fc2c1ed1..2372fede2f2bd 100644 --- a/src/rpcrawtransaction.cpp +++ b/src/rpcrawtransaction.cpp @@ -296,7 +296,8 @@ UniValue verifytxoutproof(const UniValue& params, bool fHelp) UniValue res(UniValue::VARR); vector vMatch; - if (merkleBlock.txn.ExtractMatches(vMatch) != merkleBlock.header.hashMerkleRoot) + uint64_t nMaxTransactions = Params().GetConsensus().MaxBlockSize(merkleBlock.GetBlockTime(), sizeForkTime.load())/60; // 60 bytes == min tx size + if (merkleBlock.txn.ExtractMatches(nMaxTransactions, vMatch) != merkleBlock.header.hashMerkleRoot) return res; LOCK(cs_main); diff --git a/src/test/ReceiveMsgBytes_tests.cpp b/src/test/ReceiveMsgBytes_tests.cpp new file mode 100644 index 0000000000000..119d6808462be --- /dev/null +++ b/src/test/ReceiveMsgBytes_tests.cpp @@ -0,0 +1,139 @@ +// Copyright (c) 2011-2014 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +// +// Unit tests for CNode::ReceiveMsgBytes +// + + +#include "main.h" +#include "net.h" +#include "pow.h" +#include "serialize.h" +#include "timedata.h" +#include "util.h" + +#include "test/test_bitcoin.h" + +#include + +BOOST_FIXTURE_TEST_SUITE(ReceiveMsgBytes_tests, TestingSetup) + +BOOST_AUTO_TEST_CASE(FullMessages) +{ + CNode testNode(INVALID_SOCKET, CAddress(CService("127.0.0.1", 0), NODE_NETWORK)); + testNode.nVersion = 1; + + CDataStream s(SER_NETWORK, PROTOCOL_VERSION); + s << CMessageHeader(Params().MessageStart(), "ping", 0); + s << (uint64_t)11; // ping nonce + CNetMessage::FinalizeHeader(s); + + LOCK(testNode.cs_vRecvMsg); + + // Receive a full 'ping' message + { + BOOST_CHECK(testNode.ReceiveMsgBytes(&s[0], s.size())); + BOOST_CHECK_EQUAL(testNode.vRecvMsg.size(),1UL); + CNetMessage& msg = testNode.vRecvMsg[0]; + BOOST_CHECK(msg.complete()); + BOOST_CHECK_EQUAL(msg.hdr.GetCommand(), "ping"); + uint64_t nonce; + msg.vRecv >> nonce; + BOOST_CHECK_EQUAL(nonce, (uint64_t)11); + } + + + testNode.vRecvMsg.clear(); + + // ...receive it one byte at a time: + { + for (size_t i = 0; i < s.size(); i++) { + BOOST_CHECK(testNode.ReceiveMsgBytes(&s[i], 1)); + } + BOOST_CHECK_EQUAL(testNode.vRecvMsg.size(),1UL); + CNetMessage& msg = testNode.vRecvMsg[0]; + BOOST_CHECK(msg.complete()); + BOOST_CHECK_EQUAL(msg.hdr.GetCommand(), "ping"); + uint64_t nonce; + msg.vRecv >> nonce; + BOOST_CHECK_EQUAL(nonce, (uint64_t)11); + } +} + +BOOST_AUTO_TEST_CASE(TooLargeBlock) +{ + // Random real block (000000000000dab0130bbcc991d3d7ae6b81aa6f50a798888dfe62337458dc45) + // With one tx + CBlock block; + CDataStream stream(ParseHex("0100000079cda856b143d9db2c1caff01d1aecc8630d30625d10e8b4b8b0000000000000b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f196367291b4d4c86041b8fa45d630101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff08044c86041b020a02ffffffff0100f2052a01000000434104ecd3229b0571c3be876feaac0442a9f13c5a572742927af1dc623353ecf8c202225f64868137a18cdd85cbbb4c74fbccfd4f49639cf1bdc94a5672bb15ad5d4cac00000000"), SER_NETWORK, PROTOCOL_VERSION); + stream >> block; + + CNode testNode(INVALID_SOCKET, CAddress(CService("127.0.0.1", 0), NODE_NETWORK)); + testNode.nVersion = 1; + + CDataStream s(SER_NETWORK, PROTOCOL_VERSION); + s << CMessageHeader(Params().MessageStart(), "block", 0); + size_t headerLen = s.size(); + s << block; + + // Test: too large + size_t maxBlockSize = Params().GetConsensus().MaxBlockSize(GetAdjustedTime(), sizeForkTime.load()); + s.resize(maxBlockSize+headerLen+1); + CNetMessage::FinalizeHeader(s); + + BOOST_CHECK(!testNode.ReceiveMsgBytes(&s[0], s.size())); + + testNode.vRecvMsg.clear(); + + // Test: exactly at max: + s.resize(maxBlockSize+headerLen); + CNetMessage::FinalizeHeader(s); + + BOOST_CHECK(testNode.ReceiveMsgBytes(&s[0], s.size())); +} + +BOOST_AUTO_TEST_CASE(TooLargeVerack) +{ + CNode testNode(INVALID_SOCKET, CAddress(CService("127.0.0.1", 0), NODE_NETWORK)); + testNode.nVersion = 1; + + CDataStream s(SER_NETWORK, PROTOCOL_VERSION); + s << CMessageHeader(Params().MessageStart(), "verack", 0); + size_t headerLen = s.size(); + + CNetMessage::FinalizeHeader(s); + BOOST_CHECK(testNode.ReceiveMsgBytes(&s[0], s.size())); + + // verack is zero-length, so even one byte bigger is too big: + s.resize(headerLen+1); + CNetMessage::FinalizeHeader(s); + BOOST_CHECK(testNode.ReceiveMsgBytes(&s[0], s.size())); + CNodeStateStats stats; + GetNodeStateStats(testNode.GetId(), stats); + BOOST_CHECK(stats.nMisbehavior > 0); +} + +BOOST_AUTO_TEST_CASE(TooLargePing) +{ + CNode testNode(INVALID_SOCKET, CAddress(CService("127.0.0.1", 0), NODE_NETWORK)); + testNode.nVersion = 1; + + CDataStream s(SER_NETWORK, PROTOCOL_VERSION); + s << CMessageHeader(Params().MessageStart(), "ping", 0); + s << (uint64_t)11; // 8-byte nonce + + CNetMessage::FinalizeHeader(s); + BOOST_CHECK(testNode.ReceiveMsgBytes(&s[0], s.size())); + + // Add another nonce, sanity check should fail + s << (uint64_t)11; // 8-byte nonce + CNetMessage::FinalizeHeader(s); + BOOST_CHECK(testNode.ReceiveMsgBytes(&s[0], s.size())); + CNodeStateStats stats; + GetNodeStateStats(testNode.GetId(), stats); + BOOST_CHECK(stats.nMisbehavior > 0); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/block_size_tests.cpp b/src/test/block_size_tests.cpp new file mode 100644 index 0000000000000..c94f7b047515a --- /dev/null +++ b/src/test/block_size_tests.cpp @@ -0,0 +1,197 @@ +// Copyright (c) 2011-2014 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "chainparams.h" +#include "consensus/validation.h" +#include "main.h" +#include "miner.h" +#include "pubkey.h" +#include "random.h" +#include "uint256.h" +#include "util.h" + +#include "test/test_bitcoin.h" + +#include + +// These must match parameters in chainparams.cpp +static const uint64_t EARLIEST_FORK_TIME = 1452470400; // 11 Jan 2016 +static const uint32_t MAXSIZE_PREFORK = 1000*1000; +static const uint32_t MAXSIZE_POSTFORK = 8*1000*1000; +static const uint64_t SIZE_DOUBLE_EPOCH = 60*60*24*365*2; // two years + +BOOST_FIXTURE_TEST_SUITE(block_size_tests, TestingSetup) + +// Fill block with dummy transactions until it's serialized size is exactly nSize +static void +FillBlock(CBlock& block, unsigned int nSize) +{ + assert(block.vtx.size() > 0); // Start with at least a coinbase + + unsigned int nBlockSize = ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); + if (nBlockSize > nSize) { + block.vtx.resize(1); // passed in block is too big, start with just coinbase + nBlockSize = ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); + } + + CMutableTransaction tx; + tx.vin.resize(1); + tx.vin[0].scriptSig = CScript() << OP_11; + tx.vin[0].prevout.hash = block.vtx[0].GetHash(); // passes CheckBlock, would fail if we checked inputs. + tx.vin[0].prevout.n = 0; + tx.vout.resize(1); + tx.vout[0].nValue = 1LL; + tx.vout[0].scriptPubKey = block.vtx[0].vout[0].scriptPubKey; + + unsigned int nTxSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION); + block.vtx.reserve(1+nSize/nTxSize); + + // ... add copies of tx to the block to get close to nSize: + while (nBlockSize+nTxSize < nSize) { + block.vtx.push_back(tx); + nBlockSize += nTxSize; + tx.vin[0].prevout.hash = GetRandHash(); // Just to make each tx unique + } + // Make the last transaction exactly the right size by making the scriptSig bigger. + block.vtx.pop_back(); + nBlockSize = ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); + unsigned int nFill = nSize - nBlockSize - nTxSize; + for (unsigned int i = 0; i < nFill; i++) + tx.vin[0].scriptSig << OP_11; + block.vtx.push_back(tx); + nBlockSize = ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); + assert(nBlockSize == nSize); +} + +static bool TestCheckBlock(CBlock& block, uint64_t nTime, unsigned int nSize) +{ + SetMockTime(nTime); + block.nTime = nTime; + FillBlock(block, nSize); + CValidationState validationState; + bool fResult = CheckBlock(block, validationState, false, false) && validationState.IsValid(); + SetMockTime(0); + return fResult; +} + +// +// Unit test CheckBlock() for conditions around the block size hard fork +// +BOOST_AUTO_TEST_CASE(BigBlockFork_Time1) +{ + CScript scriptPubKey = CScript() << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f") << OP_CHECKSIG; + CBlockTemplate *pblocktemplate; + + uint64_t t = EARLIEST_FORK_TIME; + uint64_t preforkSize = MAXSIZE_PREFORK; + uint64_t postforkSize = MAXSIZE_POSTFORK; + uint64_t tActivate = EARLIEST_FORK_TIME; + + sizeForkTime.store(tActivate); + + LOCK(cs_main); + + BOOST_CHECK(pblocktemplate = CreateNewBlock(scriptPubKey)); + CBlock *pblock = &pblocktemplate->block; + + // Before fork time... + BOOST_CHECK(TestCheckBlock(*pblock, t-1LL, preforkSize)); // 1MB : valid + BOOST_CHECK(!TestCheckBlock(*pblock, t-1LL, preforkSize+1)); // >1MB : invalid + BOOST_CHECK(!TestCheckBlock(*pblock, t-1LL, postforkSize)); // big : invalid + + // Exactly at fork time... + BOOST_CHECK(TestCheckBlock(*pblock, t, preforkSize)); // 1MB : valid + BOOST_CHECK(TestCheckBlock(*pblock, t, postforkSize)); // big : valid + BOOST_CHECK(!TestCheckBlock(*pblock, t, postforkSize+1)); // big+1 : invalid + + // Halfway to first doubling... + uint64_t tHalf = t+SIZE_DOUBLE_EPOCH/2; + BOOST_CHECK(!TestCheckBlock(*pblock, tHalf-1, (3*postforkSize)/2)); + BOOST_CHECK(TestCheckBlock(*pblock, tHalf, (3*postforkSize)/2)); + BOOST_CHECK(!TestCheckBlock(*pblock, tHalf, (3*postforkSize)/2)+1); + + // Sanity check: April 1 2017 is more than halfway to first + // doubling: + uint64_t tApril_2017 = 1491004800; + BOOST_CHECK(TestCheckBlock(*pblock, tApril_2017, (3*postforkSize)/2)+1); + + // After one doubling... + uint64_t yearsAfter = t+SIZE_DOUBLE_EPOCH; + BOOST_CHECK(TestCheckBlock(*pblock, yearsAfter, 2*postforkSize)); // 2 * big : valid + BOOST_CHECK(!TestCheckBlock(*pblock, yearsAfter, 2*postforkSize+1)); // > 2 * big : invalid + +#if 0 + // These tests use gigabytes of memory and take a long time to run-- + // don't enable by default until computers have petabytes of memory + // and are 100 times faster than in 2015. + // Network protocol will have to be updated before we get there... + uint64_t maxDoublings = 8; + uint64_t postDoubleTime = t + SIZE_DOUBLE_EPOCH * maxDoublings + 1; + uint64_t farFuture = t + SIZE_DOUBLE_EPOCH * 100; + BOOST_CHECK(TestCheckBlock(*pblock, postDoubleTime, postforkSize<::max()); +} + +// Test activation time 30 days after earliest possible: +BOOST_AUTO_TEST_CASE(BigBlockFork_Time2) +{ + CScript scriptPubKey = CScript() << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f") << OP_CHECKSIG; + CBlockTemplate *pblocktemplate; + + uint64_t t = EARLIEST_FORK_TIME; + uint64_t preforkSize = MAXSIZE_PREFORK; + uint64_t postforkSize = MAXSIZE_POSTFORK; + + uint64_t tActivate = EARLIEST_FORK_TIME+60*60*24*30; + sizeForkTime.store(tActivate); + + LOCK(cs_main); + + BOOST_CHECK(pblocktemplate = CreateNewBlock(scriptPubKey)); + CBlock *pblock = &pblocktemplate->block; + + // Exactly at fork time... + BOOST_CHECK(TestCheckBlock(*pblock, t, preforkSize)); // 1MB : valid + BOOST_CHECK(!TestCheckBlock(*pblock, t, postforkSize)); // big : invalid + + // Exactly at activation time.... + BOOST_CHECK(TestCheckBlock(*pblock, tActivate, preforkSize)); // 1MB : valid + BOOST_CHECK(TestCheckBlock(*pblock, tActivate, postforkSize)); // big : valid + + // Halfway to first doubling IS after the activation time: + uint64_t tHalf = t+SIZE_DOUBLE_EPOCH/2; + BOOST_CHECK(TestCheckBlock(*pblock, tHalf, (3*postforkSize)/2)); + + sizeForkTime.store(std::numeric_limits::max()); +} + +// Test: no miner consensus, no big blocks: +BOOST_AUTO_TEST_CASE(BigBlockFork_NoActivation) +{ + CScript scriptPubKey = CScript() << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f") << OP_CHECKSIG; + CBlockTemplate *pblocktemplate; + + uint64_t t = EARLIEST_FORK_TIME; + uint64_t preforkSize = MAXSIZE_PREFORK; + uint64_t postforkSize = MAXSIZE_POSTFORK; + + LOCK(cs_main); + + BOOST_CHECK(pblocktemplate = CreateNewBlock(scriptPubKey)); + CBlock *pblock = &pblocktemplate->block; + + // Exactly at fork time... + BOOST_CHECK(TestCheckBlock(*pblock, t, preforkSize)); // 1MB : valid + BOOST_CHECK(!TestCheckBlock(*pblock, t, postforkSize)); // big : invalid + + uint64_t tHalf = t+SIZE_DOUBLE_EPOCH/2; + BOOST_CHECK(!TestCheckBlock(*pblock, tHalf, (3*postforkSize)/2)); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/bloom_tests.cpp b/src/test/bloom_tests.cpp index 1bda8a7ea161b..a83d6fe110ddd 100644 --- a/src/test/bloom_tests.cpp +++ b/src/test/bloom_tests.cpp @@ -7,6 +7,7 @@ #include "base58.h" #include "clientversion.h" #include "key.h" +#include "main.h" #include "merkleblock.h" #include "random.h" #include "serialize.h" @@ -23,6 +24,8 @@ using namespace std; +static const int maxTxn = 1000*1000/MIN_TRANSACTION_SIZE; // upper limit, number txns in 1MB block + BOOST_FIXTURE_TEST_SUITE(bloom_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize) @@ -204,7 +207,7 @@ BOOST_AUTO_TEST_CASE(merkle_block_1) BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 8); vector vMatched; - BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched) == block.hashMerkleRoot); + BOOST_CHECK(merkleBlock.txn.ExtractMatches(maxTxn, vMatched) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); @@ -221,7 +224,7 @@ BOOST_AUTO_TEST_CASE(merkle_block_1) BOOST_CHECK(merkleBlock.vMatchedTxn[0].second == uint256S("0xdd1fd2a6fc16404faf339881a90adbde7f4f728691ac62e8f168809cdfae1053")); BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 7); - BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched) == block.hashMerkleRoot); + BOOST_CHECK(merkleBlock.txn.ExtractMatches(maxTxn, vMatched) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); @@ -249,7 +252,7 @@ BOOST_AUTO_TEST_CASE(merkle_block_2) BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 0); vector vMatched; - BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched) == block.hashMerkleRoot); + BOOST_CHECK(merkleBlock.txn.ExtractMatches(maxTxn, vMatched) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); @@ -275,7 +278,7 @@ BOOST_AUTO_TEST_CASE(merkle_block_2) BOOST_CHECK(merkleBlock.vMatchedTxn[3].second == uint256S("0x3c1d7e82342158e4109df2e0b6348b6e84e403d8b4046d7007663ace63cddb23")); BOOST_CHECK(merkleBlock.vMatchedTxn[3].first == 3); - BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched) == block.hashMerkleRoot); + BOOST_CHECK(merkleBlock.txn.ExtractMatches(maxTxn, vMatched) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); @@ -303,7 +306,7 @@ BOOST_AUTO_TEST_CASE(merkle_block_2_with_update_none) BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 0); vector vMatched; - BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched) == block.hashMerkleRoot); + BOOST_CHECK(merkleBlock.txn.ExtractMatches(maxTxn, vMatched) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); @@ -326,7 +329,7 @@ BOOST_AUTO_TEST_CASE(merkle_block_2_with_update_none) BOOST_CHECK(merkleBlock.vMatchedTxn[2].second == uint256S("0x3c1d7e82342158e4109df2e0b6348b6e84e403d8b4046d7007663ace63cddb23")); BOOST_CHECK(merkleBlock.vMatchedTxn[2].first == 3); - BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched) == block.hashMerkleRoot); + BOOST_CHECK(merkleBlock.txn.ExtractMatches(maxTxn, vMatched) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); @@ -353,7 +356,7 @@ BOOST_AUTO_TEST_CASE(merkle_block_3_and_serialize) BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 0); vector vMatched; - BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched) == block.hashMerkleRoot); + BOOST_CHECK(merkleBlock.txn.ExtractMatches(maxTxn, vMatched) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); @@ -392,7 +395,7 @@ BOOST_AUTO_TEST_CASE(merkle_block_4) BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 6); vector vMatched; - BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched) == block.hashMerkleRoot); + BOOST_CHECK(merkleBlock.txn.ExtractMatches(maxTxn, vMatched) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); @@ -409,7 +412,7 @@ BOOST_AUTO_TEST_CASE(merkle_block_4) BOOST_CHECK(merkleBlock.vMatchedTxn[1] == pair); - BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched) == block.hashMerkleRoot); + BOOST_CHECK(merkleBlock.txn.ExtractMatches(maxTxn, vMatched) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); diff --git a/src/test/pmt_tests.cpp b/src/test/pmt_tests.cpp index f6d06d6805caa..46e7a38254963 100644 --- a/src/test/pmt_tests.cpp +++ b/src/test/pmt_tests.cpp @@ -2,6 +2,7 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include "main.h" #include "merkleblock.h" #include "serialize.h" #include "streams.h" @@ -18,6 +19,8 @@ using namespace std; +static const int maxTxn = 1000*1000/MIN_TRANSACTION_SIZE; // upper limit, number txns in 1MB block + class CPartialMerkleTreeTester : public CPartialMerkleTree { public: @@ -87,7 +90,7 @@ BOOST_AUTO_TEST_CASE(pmt_test1) // extract merkle root and matched txids from copy std::vector vMatchTxid2; - uint256 merkleRoot2 = pmt2.ExtractMatches(vMatchTxid2); + uint256 merkleRoot2 = pmt2.ExtractMatches(maxTxn, vMatchTxid2); // check that it has the same merkle root as the original, and a valid one BOOST_CHECK(merkleRoot1 == merkleRoot2); @@ -101,7 +104,7 @@ BOOST_AUTO_TEST_CASE(pmt_test1) CPartialMerkleTreeTester pmt3(pmt2); pmt3.Damage(); std::vector vMatchTxid3; - uint256 merkleRoot3 = pmt3.ExtractMatches(vMatchTxid3); + uint256 merkleRoot3 = pmt3.ExtractMatches(maxTxn, vMatchTxid3); BOOST_CHECK(merkleRoot3 != merkleRoot1); } } @@ -121,7 +124,7 @@ BOOST_AUTO_TEST_CASE(pmt_malleability) CPartialMerkleTree tree(vTxid, vMatch); std::vector vTxid2; - BOOST_CHECK(tree.ExtractMatches(vTxid).IsNull()); + BOOST_CHECK(tree.ExtractMatches(100, vTxid).IsNull()); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp index a0797d5f3f0d2..1ed14e3952237 100644 --- a/src/test/sighash_tests.cpp +++ b/src/test/sighash_tests.cpp @@ -197,7 +197,7 @@ BOOST_AUTO_TEST_CASE(sighash_from_data) stream >> tx; CValidationState state; - BOOST_CHECK_MESSAGE(CheckTransaction(tx, state), strTest); + BOOST_CHECK_MESSAGE(CheckTransaction(tx, state, MAX_STANDARD_TX_SIZE), strTest); BOOST_CHECK(state.IsValid()); std::vector raw = ParseHex(raw_script); diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp index 9ce7aae9664f7..a04762f28ebf7 100644 --- a/src/test/transaction_tests.cpp +++ b/src/test/transaction_tests.cpp @@ -135,7 +135,7 @@ BOOST_AUTO_TEST_CASE(tx_valid) stream >> tx; CValidationState state; - BOOST_CHECK_MESSAGE(CheckTransaction(tx, state), strTest); + BOOST_CHECK_MESSAGE(CheckTransaction(tx, state, MAX_STANDARD_TX_SIZE), strTest); BOOST_CHECK(state.IsValid()); for (unsigned int i = 0; i < tx.vin.size(); i++) @@ -210,7 +210,7 @@ BOOST_AUTO_TEST_CASE(tx_invalid) stream >> tx; CValidationState state; - fValid = CheckTransaction(tx, state) && state.IsValid(); + fValid = CheckTransaction(tx, state, MAX_STANDARD_TX_SIZE) && state.IsValid(); for (unsigned int i = 0; i < tx.vin.size() && fValid; i++) { @@ -239,11 +239,11 @@ BOOST_AUTO_TEST_CASE(basic_transaction_tests) CMutableTransaction tx; stream >> tx; CValidationState state; - BOOST_CHECK_MESSAGE(CheckTransaction(tx, state) && state.IsValid(), "Simple deserialized transaction should be valid."); + BOOST_CHECK_MESSAGE(CheckTransaction(tx, state, MAX_STANDARD_TX_SIZE) && state.IsValid(), "Simple deserialized transaction should be valid."); // Check that duplicate txins fail tx.vin.push_back(tx.vin[0]); - BOOST_CHECK_MESSAGE(!CheckTransaction(tx, state) || !state.IsValid(), "Transaction with duplicate txins should be invalid."); + BOOST_CHECK_MESSAGE(!CheckTransaction(tx, state, MAX_STANDARD_TX_SIZE) || !state.IsValid(), "Transaction with duplicate txins should be invalid."); } // diff --git a/src/txdb.cpp b/src/txdb.cpp index 935b7846762ee..f98e7023faf57 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -27,6 +27,7 @@ static const char DB_FLAG = 'F'; static const char DB_REINDEX_FLAG = 'R'; static const char DB_LAST_BLOCK = 'l'; +static const char DB_FORK_ACTIVATION = 'a'; void static BatchWriteCoins(CLevelDBBatch &batch, const uint256 &hash, const CCoins &coins) { if (coins.IsPruned()) @@ -240,5 +241,64 @@ bool CBlockTreeDB::LoadBlockIndexGuts() } } + // Load fork activation info + ssKeySet.clear(); + ssKeySet << make_pair(DB_FORK_ACTIVATION, 0); + pcursor->Seek(ssKeySet.str()); + while (pcursor->Valid()) { + try { + leveldb::Slice slKey = pcursor->key(); + CDataStream ssKey(slKey.data(), slKey.data()+slKey.size(), SER_DISK, CLIENT_VERSION); + char chType; + ssKey >> chType; + if (chType == DB_FORK_ACTIVATION) { + uint32_t nVersion; + ssKey >> nVersion; + leveldb::Slice slValue = pcursor->value(); + CDataStream ssValue(slValue.data(), slValue.data()+slValue.size(), SER_DISK, CLIENT_VERSION); + uint256 blockHash; + ssValue >> blockHash; + forkActivationMap[nVersion] = blockHash; + + pcursor->Next(); + } else { + break; // finished loading block index + } + } + catch (std::exception &e) { + return error("%s : Deserialize or I/O error - %s", __func__, e.what()); + } + } + return true; } + +uint256 CBlockTreeDB::ForkActivated(int32_t nForkVersion) const +{ + // Returns block at which a supermajority was reached for given + // fork version. + // NOTE! The max blocksize fork adds a grace period + // during which no bigger blocks are allowed; this routine + // just keeps track of the hash of the block that + // triggers the fork condition + + std::map::const_iterator it = forkActivationMap.find(nForkVersion); + if (it != forkActivationMap.end()) + return it->second; + + return uint256(); +} + +bool CBlockTreeDB::ActivateFork(int32_t nForkVersion, const uint256& blockHash) +{ + // Called when a supermajority of blocks (ending with blockHash) + // support a rule change + // OR if a chain re-org happens around the activation block, + // called with uint256(0) to reset the flag in the database. + + forkActivationMap[nForkVersion] = blockHash; + if (blockHash == uint256()) + return Erase(make_pair(DB_FORK_ACTIVATION, nForkVersion)); + else + return Write(make_pair(DB_FORK_ACTIVATION, nForkVersion), blockHash); +} diff --git a/src/txdb.h b/src/txdb.h index bef5dc9fd18fe..8459225536b37 100644 --- a/src/txdb.h +++ b/src/txdb.h @@ -49,6 +49,8 @@ class CBlockTreeDB : public CLevelDBWrapper private: CBlockTreeDB(const CBlockTreeDB&); void operator=(const CBlockTreeDB&); + std::map forkActivationMap; + public: bool WriteBatchSync(const std::vector >& fileInfo, int nLastFile, const std::vector& blockinfo); bool ReadBlockFileInfo(int nFile, CBlockFileInfo &fileinfo); @@ -60,6 +62,8 @@ class CBlockTreeDB : public CLevelDBWrapper bool WriteFlag(const std::string &name, bool fValue); bool ReadFlag(const std::string &name, bool &fValue); bool LoadBlockIndexGuts(); + uint256 ForkActivated(int32_t nForkVersion) const; + bool ActivateFork(int32_t nForkVersion, const uint256& blockHash); }; #endif // BITCOIN_TXDB_H diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index f777926e72d97..2c51114382f70 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -371,7 +371,8 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, CWalletTx wtx; ssValue >> wtx; CValidationState state; - if (!(CheckTransaction(wtx, state) && (wtx.GetHash() == hash) && state.IsValid())) + // Allow reading transactions up to 1MB large (largest ever allowed in a block): + if (!(CheckTransaction(wtx, state, 1000*1000) && (wtx.GetHash() == hash) && state.IsValid())) return false; // Undo serialize changes in 31600