diff --git a/src/consensus/params.h b/src/consensus/params.h index b6fd45f48..b6f2726d6 100644 --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -7,6 +7,7 @@ #define BITCOIN_CONSENSUS_PARAMS_H #include "uint256.h" +#include namespace Consensus { /** @@ -53,10 +54,47 @@ struct Params { uint64_t nMaxSize = (nMaxSizeBase << doublings) + interpolate; return nMaxSize; } - /** Maximum number of signature ops in a block with timestamp nBlockTimestamp */ - uint64_t MaxBlockSigops(uint64_t nBlockTimestamp, uint64_t nSizeForkActivationTime) const { - return MaxBlockSize(nBlockTimestamp, nSizeForkActivationTime)/50; + + // Signature-operation-counting is a CPU exhaustion denial-of-service prevention + // measure. Prior to the maximum block size fork it was done in two different, ad-hoc, + // inaccurate ways. + // Post-fork it is done in an accurate way, counting how many ECDSA verify operations + // and how many bytes must be hashed to compute signature hashes to validate a block. + + /** Pre-fork consensus rules use an inaccurate method of counting sigops **/ + uint64_t MaxBlockLegacySigops(uint64_t nBlockTimestamp, uint64_t nSizeForkActivationTime) const { + if (nBlockTimestamp < nEarliestSizeForkTime || nBlockTimestamp < nSizeForkActivationTime) + return MaxBlockSize(nBlockTimestamp, nSizeForkActivationTime)/50; + return std::numeric_limits::max(); // Post-fork uses accurate method + } + // + // MaxBlockSize/100 was chosen for number of sigops (ECDSA verifications) because + // a single ECDSA signature verification requires a public key (33 bytes) plus + // a signature (~72 bytes), so allowing one sigop per 100 bytes should allow any + // reasonable set of transactions (but will prevent 'attack' transactions that + // just try to use as much CPU as possible in as few bytes as possible). + // + uint64_t MaxBlockAccurateSigops(uint64_t nBlockTimestamp, uint64_t nSizeForkActivationTime) const { + if (nBlockTimestamp < nEarliestSizeForkTime || nBlockTimestamp < nSizeForkActivationTime) + return std::numeric_limits::max(); // Pre-fork doesn't care + return MaxBlockSize(nBlockTimestamp, nSizeForkActivationTime)/100; + } + // + // MaxBlockSize*160 was chosen for maximum number of bytes hashed so any possible + // non-attack one-megabyte-large transaction that might have been signed and + // saved before the fork could still be mined after the fork. A 5,000-SIGHASH_ALL-input, + // single-output, 999,000-byte transaction requires about 1.2 gigabytes of hashing + // to compute those 5,000 signature hashes. + // + // Note that such a transaction was, and is, considered "non-standard" because it is + // over 100,000 bytes big. + // + uint64_t MaxBlockSighashBytes(uint64_t nBlockTimestamp, uint64_t nSizeForkActivationTime) const { + if (nBlockTimestamp < nEarliestSizeForkTime || nBlockTimestamp < nSizeForkActivationTime) + return std::numeric_limits::max(); // Pre-fork doesn't care + return MaxBlockSize(nBlockTimestamp, nSizeForkActivationTime)*160; } + int ActivateSizeForkMajority() const { return nActivateSizeForkMajority; } uint64_t SizeForkGracePeriod() const { return nSizeForkGracePeriod; } }; diff --git a/src/main.cpp b/src/main.cpp index 772206727..00ef0597b 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -1921,7 +1921,17 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin CBlockUndo blockundo; - BlockValidationResourceTracker resourceTracker(std::numeric_limits::max(), std::numeric_limits::max()); + // Pre-fork, maxAccurateSigops and maxSighashBytes will be unlimited (they'll + // be the maximum possible uint64 value). + // And Post-fork, the legacy sigop limits will be unlimited. + // This code is written to be oblivious to whether or not the fork has happened; + // one or the other counting method is wasted effort (but it is not worth optimizing + // because sigop counting is not a significant percentage of validation time). + // Some future release well after the fork has occurred should remove all of the + // legacy sigop counting code and just keep the accurate counting method. + uint64_t maxAccurateSigops = chainparams.GetConsensus().MaxBlockAccurateSigops(block.GetBlockTime(), sizeForkTime.load()); + uint64_t maxSighashBytes = chainparams.GetConsensus().MaxBlockSighashBytes(block.GetBlockTime(), sizeForkTime.load()); + BlockValidationResourceTracker resourceTracker(maxAccurateSigops, maxSighashBytes); CCheckQueueControl control(fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : NULL); int64_t nTimeStart = GetTimeMicros(); @@ -1938,7 +1948,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin nInputs += tx.vin.size(); nSigOps += GetLegacySigOpCount(tx); - if (nSigOps > chainparams.GetConsensus().MaxBlockSigops(block.GetBlockTime(), sizeForkTime.load())) + if (nSigOps > chainparams.GetConsensus().MaxBlockLegacySigops(block.GetBlockTime(), sizeForkTime.load())) return state.DoS(100, error("ConnectBlock(): too many sigops"), REJECT_INVALID, "bad-blk-sigops"); @@ -1954,7 +1964,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin // this is to prevent a "rogue miner" from creating // an incredibly-expensive-to-validate block. nSigOps += GetP2SHSigOpCount(tx, view); - if (nSigOps > chainparams.GetConsensus().MaxBlockSigops(block.GetBlockTime(), sizeForkTime.load())) + if (nSigOps > chainparams.GetConsensus().MaxBlockLegacySigops(block.GetBlockTime(), sizeForkTime.load())) return state.DoS(100, error("ConnectBlock(): too many sigops"), REJECT_INVALID, "bad-blk-sigops"); } @@ -2818,7 +2828,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bo { nSigOps += GetLegacySigOpCount(tx); } - if (nSigOps > Params().GetConsensus().MaxBlockSigops(block.GetBlockTime(), sizeForkTime.load())) + if (nSigOps > Params().GetConsensus().MaxBlockLegacySigops(block.GetBlockTime(), sizeForkTime.load())) return state.DoS(100, error("CheckBlock(): out-of-bounds SigOpCount"), REJECT_INVALID, "bad-blk-sigops", true); diff --git a/src/miner.cpp b/src/miner.cpp index 5aa9653c1..c4a616b3c 100644 --- a/src/miner.cpp +++ b/src/miner.cpp @@ -98,7 +98,10 @@ CBlockTemplate* CreateNewBlock(const CScript& scriptPubKeyIn) if(!pblocktemplate.get()) return NULL; CBlock *pblock = &pblocktemplate->block; // pointer for convenience - BlockValidationResourceTracker resourceTracker(std::numeric_limits::max(), std::numeric_limits::max()); + + uint64_t maxAccurateSigops = chainparams.GetConsensus().MaxBlockAccurateSigops(pblock->GetBlockTime(), sizeForkTime.load()); + uint64_t maxSighashBytes = chainparams.GetConsensus().MaxBlockSighashBytes(pblock->GetBlockTime(), sizeForkTime.load()); + BlockValidationResourceTracker resourceTracker(maxAccurateSigops, maxSighashBytes); // -regtest only: allow overriding block.nVersion with // -blockversion=N to test forking scenarios @@ -257,7 +260,7 @@ CBlockTemplate* CreateNewBlock(const CScript& scriptPubKeyIn) // Legacy limits on sigOps: unsigned int nTxSigOps = GetLegacySigOpCount(tx); - if (nBlockSigOps + nTxSigOps >= chainparams.GetConsensus().MaxBlockSigops(nBlockTime, sizeForkTime.load())) + if (nBlockSigOps + nTxSigOps >= chainparams.GetConsensus().MaxBlockLegacySigops(nBlockTime, sizeForkTime.load())) continue; // Skip free transactions if we're past the minimum block size: @@ -284,7 +287,7 @@ CBlockTemplate* CreateNewBlock(const CScript& scriptPubKeyIn) CAmount nTxFees = view.GetValueIn(tx)-tx.GetValueOut(); nTxSigOps += GetP2SHSigOpCount(tx, view); - if (nBlockSigOps + nTxSigOps >= chainparams.GetConsensus().MaxBlockSigops(nBlockTime, sizeForkTime.load())) + if (nBlockSigOps + nTxSigOps >= chainparams.GetConsensus().MaxBlockLegacySigops(nBlockTime, sizeForkTime.load())) continue; // Note that flags: we don't want to set mempool/IsStandard() @@ -292,7 +295,25 @@ CBlockTemplate* CreateNewBlock(const CScript& scriptPubKeyIn) // create only contains transactions that are valid in new blocks. CValidationState state; if (!CheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true, &resourceTracker)) - continue; + { + // If CheckInputs fails because adding the transaction would hit + // per-block limits on sigops or sighash bytes, stop building the block + // right away. It is _possible_ we have another transaction in the mempool + // that wouldn't trigger the limits, but that case isn't worth optimizing + // for, because those limits are very difficult to hit with a mempool full of + // transactions that pass the IsStandard() test. + if (!resourceTracker.IsWithinLimits()) + break; // stop before adding this transaction to the block + else + // If ConnectInputs fails for some other reason, + // continue to consider other transactions for inclusion + // in this block. This should almost never happen-- it + // could theoretically happen if a timelocked transaction + // entered the mempool after the lock time, but then the + // blockchain re-orgs to a more-work chain with a lower + // height or time. + continue; + } UpdateCoins(tx, state, view, nHeight); diff --git a/src/rpcmining.cpp b/src/rpcmining.cpp index 58d529ae8..0c8dadfcd 100644 --- a/src/rpcmining.cpp +++ b/src/rpcmining.cpp @@ -582,8 +582,10 @@ Value getblocktemplate(const Array& params, bool fHelp) result.push_back(Pair("mintime", (int64_t)pindexPrev->GetMedianTimePast()+1)); result.push_back(Pair("mutable", aMutable)); result.push_back(Pair("noncerange", "00000000ffffffff")); - result.push_back(Pair("sigoplimit", Params().GetConsensus().MaxBlockSigops(nBlockTime, sizeForkTime.load()))); + result.push_back(Pair("sigoplimit", Params().GetConsensus().MaxBlockLegacySigops(nBlockTime, sizeForkTime.load()))); result.push_back(Pair("sizelimit", Params().GetConsensus().MaxBlockSize(nBlockTime, sizeForkTime.load()))); + result.push_back(Pair("accuratesigoplimit", Params().GetConsensus().MaxBlockAccurateSigops(nBlockTime, sizeForkTime.load()))); + result.push_back(Pair("sighashlimit", Params().GetConsensus().MaxBlockSighashBytes(nBlockTime, sizeForkTime.load()))); result.push_back(Pair("curtime", nBlockTime)); result.push_back(Pair("bits", strprintf("%08x", pblock->nBits))); result.push_back(Pair("height", (int64_t)(pindexPrev->nHeight+1)));