Permalink
// Copyright (c) 2009-2010 Satoshi Nakamoto | |
// Copyright (c) 2009-2020 The Bitcoin Core developers | |
// Distributed under the MIT software license, see the accompanying | |
// file COPYING or http://www.opensource.org/licenses/mit-license.php. | |
#include <validation.h> | |
#include <arith_uint256.h> | |
#include <chain.h> | |
#include <chainparams.h> | |
#include <checkqueue.h> | |
#include <consensus/consensus.h> | |
#include <consensus/merkle.h> | |
#include <consensus/tx_check.h> | |
#include <consensus/tx_verify.h> | |
#include <consensus/validation.h> | |
#include <cuckoocache.h> | |
#include <flatfile.h> | |
#include <hash.h> | |
#include <index/blockfilterindex.h> | |
#include <index/txindex.h> | |
#include <logging.h> | |
#include <logging/timer.h> | |
#include <node/coinstats.h> | |
#include <node/ui_interface.h> | |
#include <optional.h> | |
#include <policy/policy.h> | |
#include <policy/settings.h> | |
#include <pow.h> | |
#include <primitives/block.h> | |
#include <primitives/transaction.h> | |
#include <random.h> | |
#include <reverse_iterator.h> | |
#include <script/script.h> | |
#include <script/sigcache.h> | |
#include <shutdown.h> | |
#include <signet.h> | |
#include <timedata.h> | |
#include <tinyformat.h> | |
#include <txdb.h> | |
#include <txmempool.h> | |
#include <uint256.h> | |
#include <undo.h> | |
#include <util/check.h> // For NDEBUG compile time check | |
#include <util/moneystr.h> | |
#include <util/rbf.h> | |
#include <util/strencodings.h> | |
#include <util/system.h> | |
#include <util/translation.h> | |
#include <validationinterface.h> | |
#include <warnings.h> | |
#include <string> | |
#include <boost/algorithm/string/replace.hpp> | |
#define MICRO 0.000001 | |
#define MILLI 0.001 | |
/** | |
* An extra transaction can be added to a package, as long as it only has one | |
* ancestor and is no larger than this. Not really any reason to make this | |
* configurable as it doesn't materially change DoS parameters. | |
*/ | |
static const unsigned int EXTRA_DESCENDANT_TX_SIZE_LIMIT = 10000; | |
/** Maximum kilobytes for transactions to store for processing during reorg */ | |
static const unsigned int MAX_DISCONNECTED_TX_POOL_SIZE = 20000; | |
/** The pre-allocation chunk size for blk?????.dat files (since 0.8) */ | |
static const unsigned int BLOCKFILE_CHUNK_SIZE = 0x1000000; // 16 MiB | |
/** The pre-allocation chunk size for rev?????.dat files (since 0.8) */ | |
static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB | |
/** Time to wait between writing blocks/block index to disk. */ | |
static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1}; | |
/** Time to wait between flushing chainstate to disk. */ | |
static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24}; | |
/** Maximum age of our tip for us to be considered current for fee estimation */ | |
static constexpr std::chrono::hours MAX_FEE_ESTIMATION_TIP_AGE{3}; | |
const std::vector<std::string> CHECKLEVEL_DOC { | |
"level 0 reads the blocks from disk", | |
"level 1 verifies block validity", | |
"level 2 verifies undo data", | |
"level 3 checks disconnection of tip blocks", | |
"level 4 tries to reconnect the blocks", | |
"each level includes the checks of the previous levels", | |
}; | |
bool CBlockIndexWorkComparator::operator()(const CBlockIndex *pa, const CBlockIndex *pb) const { | |
// First sort by most total work, ... | |
if (pa->nChainWork > pb->nChainWork) return false; | |
if (pa->nChainWork < pb->nChainWork) return true; | |
// ... then by earliest time received, ... | |
if (pa->nSequenceId < pb->nSequenceId) return false; | |
if (pa->nSequenceId > pb->nSequenceId) return true; | |
// Use pointer address as tie breaker (should only happen with blocks | |
// loaded from disk, as those all have id 0). | |
if (pa < pb) return false; | |
if (pa > pb) return true; | |
// Identical blocks. | |
return false; | |
} | |
ChainstateManager g_chainman; | |
CChainState& ChainstateActive() | |
{ | |
LOCK(::cs_main); | |
assert(g_chainman.m_active_chainstate); | |
return *g_chainman.m_active_chainstate; | |
} | |
CChain& ChainActive() | |
{ | |
LOCK(::cs_main); | |
return ::ChainstateActive().m_chain; | |
} | |
/** | |
* Mutex to guard access to validation specific variables, such as reading | |
* or changing the chainstate. | |
* | |
* This may also need to be locked when updating the transaction pool, e.g. on | |
* AcceptToMemoryPool. See CTxMemPool::cs comment for details. | |
* | |
* The transaction pool has a separate lock to allow reading from it and the | |
* chainstate at the same time. | |
*/ | |
RecursiveMutex cs_main; | |
CBlockIndex *pindexBestHeader = nullptr; | |
Mutex g_best_block_mutex; | |
std::condition_variable g_best_block_cv; | |
uint256 g_best_block; | |
bool g_parallel_script_checks{false}; | |
std::atomic_bool fImporting(false); | |
std::atomic_bool fReindex(false); | |
bool fHavePruned = false; | |
bool fPruneMode = false; | |
bool fRequireStandard = true; | |
bool fCheckBlockIndex = false; | |
bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED; | |
uint64_t nPruneTarget = 0; | |
int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE; | |
uint256 hashAssumeValid; | |
arith_uint256 nMinimumChainWork; | |
CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE); | |
// Internal stuff | |
namespace { | |
CBlockIndex* pindexBestInvalid = nullptr; | |
RecursiveMutex cs_LastBlockFile; | |
std::vector<CBlockFileInfo> vinfoBlockFile; | |
int nLastBlockFile = 0; | |
/** Global flag to indicate we should check to see if there are | |
* block/undo files that should be deleted. Set on startup | |
* or if we allocate more file space when we're in prune mode | |
*/ | |
bool fCheckForPruning = false; | |
/** Dirty block index entries. */ | |
std::set<CBlockIndex*> setDirtyBlockIndex; | |
/** Dirty block file entries. */ | |
std::set<int> setDirtyFileInfo; | |
} // anon namespace | |
CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) | |
{ | |
AssertLockHeld(cs_main); | |
assert(std::addressof(g_chainman.BlockIndex()) == std::addressof(m_block_index)); | |
BlockMap::const_iterator it = m_block_index.find(hash); | |
return it == m_block_index.end() ? nullptr : it->second; | |
} | |
CBlockIndex* BlockManager::FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator) | |
{ | |
AssertLockHeld(cs_main); | |
assert(std::addressof(g_chainman.m_blockman) == std::addressof(*this)); | |
// Find the latest block common to locator and chain - we expect that | |
// locator.vHave is sorted descending by height. | |
for (const uint256& hash : locator.vHave) { | |
CBlockIndex* pindex = LookupBlockIndex(hash); | |
if (pindex) { | |
if (chain.Contains(pindex)) | |
return pindex; | |
if (pindex->GetAncestor(chain.Height()) == chain.Tip()) { | |
return chain.Tip(); | |
} | |
} | |
} | |
return chain.Genesis(); | |
} | |
std::unique_ptr<CBlockTreeDB> pblocktree; | |
bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, | |
const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore, | |
bool cacheFullScriptStore, PrecomputedTransactionData& txdata, | |
std::vector<CScriptCheck>* pvChecks = nullptr) | |
EXCLUSIVE_LOCKS_REQUIRED(cs_main); | |
static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false); | |
static FlatFileSeq BlockFileSeq(); | |
static FlatFileSeq UndoFileSeq(); | |
bool CheckFinalTx(const CBlockIndex* active_chain_tip, const CTransaction &tx, int flags) | |
{ | |
AssertLockHeld(cs_main); | |
assert(active_chain_tip); // TODO: Make active_chain_tip a reference | |
assert(std::addressof(*::ChainActive().Tip()) == std::addressof(*active_chain_tip)); | |
// By convention a negative value for flags indicates that the | |
// current network-enforced consensus rules should be used. In | |
// a future soft-fork scenario that would mean checking which | |
// rules would be enforced for the next block and setting the | |
// appropriate flags. At the present time no soft-forks are | |
// scheduled, so no flags are set. | |
flags = std::max(flags, 0); | |
// CheckFinalTx() uses active_chain_tip.Height()+1 to evaluate | |
// nLockTime because when IsFinalTx() is called within | |
// CBlock::AcceptBlock(), the height of the block *being* | |
// evaluated is what is used. Thus if we want to know if a | |
// transaction can be part of the *next* block, we need to call | |
// IsFinalTx() with one more than active_chain_tip.Height(). | |
const int nBlockHeight = active_chain_tip->nHeight + 1; | |
// BIP113 requires that time-locked transactions have nLockTime set to | |
// less than the median time of the previous block they're contained in. | |
// When the next block is created its previous block will be the current | |
// chain tip, so we use that to calculate the median time passed to | |
// IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set. | |
const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST) | |
? active_chain_tip->GetMedianTimePast() | |
: GetAdjustedTime(); | |
return IsFinalTx(tx, nBlockHeight, nBlockTime); | |
} | |
bool TestLockPointValidity(CChain& active_chain, const LockPoints* lp) | |
{ | |
AssertLockHeld(cs_main); | |
assert(lp); | |
// If there are relative lock times then the maxInputBlock will be set | |
// If there are no relative lock times, the LockPoints don't depend on the chain | |
if (lp->maxInputBlock) { | |
// Check whether ::ChainActive() is an extension of the block at which the LockPoints | |
// calculation was valid. If not LockPoints are no longer valid | |
assert(std::addressof(::ChainActive()) == std::addressof(active_chain)); | |
if (!active_chain.Contains(lp->maxInputBlock)) { | |
return false; | |
} | |
} | |
// LockPoints still valid | |
return true; | |
} | |
bool CheckSequenceLocks(CChainState& active_chainstate, | |
const CTxMemPool& pool, | |
const CTransaction& tx, | |
int flags, | |
LockPoints* lp, | |
bool useExistingLockPoints) | |
{ | |
AssertLockHeld(cs_main); | |
AssertLockHeld(pool.cs); | |
assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); | |
CBlockIndex* tip = active_chainstate.m_chain.Tip(); | |
assert(tip != nullptr); | |
CBlockIndex index; | |
index.pprev = tip; | |
// CheckSequenceLocks() uses active_chainstate.m_chain.Height()+1 to evaluate | |
// height based locks because when SequenceLocks() is called within | |
// ConnectBlock(), the height of the block *being* | |
// evaluated is what is used. | |
// Thus if we want to know if a transaction can be part of the | |
// *next* block, we need to use one more than active_chainstate.m_chain.Height() | |
index.nHeight = tip->nHeight + 1; | |
std::pair<int, int64_t> lockPair; | |
if (useExistingLockPoints) { | |
assert(lp); | |
lockPair.first = lp->height; | |
lockPair.second = lp->time; | |
} | |
else { | |
// CoinsTip() contains the UTXO set for active_chainstate.m_chain.Tip() | |
CCoinsViewMemPool viewMemPool(&active_chainstate.CoinsTip(), pool); | |
std::vector<int> prevheights; | |
prevheights.resize(tx.vin.size()); | |
for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) { | |
const CTxIn& txin = tx.vin[txinIndex]; | |
Coin coin; | |
if (!viewMemPool.GetCoin(txin.prevout, coin)) { | |
return error("%s: Missing input", __func__); | |
} | |
if (coin.nHeight == MEMPOOL_HEIGHT) { | |
// Assume all mempool transaction confirm in the next block | |
prevheights[txinIndex] = tip->nHeight + 1; | |
} else { | |
prevheights[txinIndex] = coin.nHeight; | |
} | |
} | |
lockPair = CalculateSequenceLocks(tx, flags, prevheights, index); | |
if (lp) { | |
lp->height = lockPair.first; | |
lp->time = lockPair.second; | |
// Also store the hash of the block with the highest height of | |
// all the blocks which have sequence locked prevouts. | |
// This hash needs to still be on the chain | |
// for these LockPoint calculations to be valid | |
// Note: It is impossible to correctly calculate a maxInputBlock | |
// if any of the sequence locked inputs depend on unconfirmed txs, | |
// except in the special case where the relative lock time/height | |
// is 0, which is equivalent to no sequence lock. Since we assume | |
// input height of tip+1 for mempool txs and test the resulting | |
// lockPair from CalculateSequenceLocks against tip+1. We know | |
// EvaluateSequenceLocks will fail if there was a non-zero sequence | |
// lock on a mempool input, so we can use the return value of | |
// CheckSequenceLocks to indicate the LockPoints validity | |
int maxInputHeight = 0; | |
for (const int height : prevheights) { | |
// Can ignore mempool inputs since we'll fail if they had non-zero locks | |
if (height != tip->nHeight+1) { | |
maxInputHeight = std::max(maxInputHeight, height); | |
} | |
} | |
lp->maxInputBlock = tip->GetAncestor(maxInputHeight); | |
} | |
} | |
return EvaluateSequenceLocks(index, lockPair); | |
} | |
// Returns the script flags which should be checked for a given block | |
static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& chainparams); | |
static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache, size_t limit, std::chrono::seconds age) | |
EXCLUSIVE_LOCKS_REQUIRED(pool.cs, ::cs_main) | |
{ | |
int expired = pool.Expire(GetTime<std::chrono::seconds>() - age); | |
if (expired != 0) { | |
LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired); | |
} | |
std::vector<COutPoint> vNoSpendsRemaining; | |
pool.TrimToSize(limit, &vNoSpendsRemaining); | |
assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(coins_cache)); | |
for (const COutPoint& removed : vNoSpendsRemaining) | |
coins_cache.Uncache(removed); | |
} | |
static bool IsCurrentForFeeEstimation(CChainState& active_chainstate) EXCLUSIVE_LOCKS_REQUIRED(cs_main) | |
{ | |
AssertLockHeld(cs_main); | |
assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); | |
if (active_chainstate.IsInitialBlockDownload()) | |
return false; | |
if (active_chainstate.m_chain.Tip()->GetBlockTime() < count_seconds(GetTime<std::chrono::seconds>() - MAX_FEE_ESTIMATION_TIP_AGE)) | |
return false; | |
if (active_chainstate.m_chain.Height() < pindexBestHeader->nHeight - 1) | |
return false; | |
return true; | |
} | |
/* Make mempool consistent after a reorg, by re-adding or recursively erasing | |
* disconnected block transactions from the mempool, and also removing any | |
* other transactions from the mempool that are no longer valid given the new | |
* tip/height. | |
* | |
* Note: we assume that disconnectpool only contains transactions that are NOT | |
* confirmed in the current chain nor already in the mempool (otherwise, | |
* in-mempool descendants of such transactions would be removed). | |
* | |
* Passing fAddToMempool=false will skip trying to add the transactions back, | |
* and instead just erase from the mempool as needed. | |
*/ | |
static void UpdateMempoolForReorg(CChainState& active_chainstate, CTxMemPool& mempool, DisconnectedBlockTransactions& disconnectpool, bool fAddToMempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, mempool.cs) | |
{ | |
AssertLockHeld(cs_main); | |
AssertLockHeld(mempool.cs); | |
assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); | |
std::vector<uint256> vHashUpdate; | |
// disconnectpool's insertion_order index sorts the entries from | |
// oldest to newest, but the oldest entry will be the last tx from the | |
// latest mined block that was disconnected. | |
// Iterate disconnectpool in reverse, so that we add transactions | |
// back to the mempool starting with the earliest transaction that had | |
// been previously seen in a block. | |
auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin(); | |
while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) { | |
// ignore validation errors in resurrected transactions | |
if (!fAddToMempool || (*it)->IsCoinBase() || | |
AcceptToMemoryPool(active_chainstate, mempool, *it, true /* bypass_limits */).m_result_type != MempoolAcceptResult::ResultType::VALID) { | |
// If the transaction doesn't make it in to the mempool, remove any | |
// transactions that depend on it (which would now be orphans). | |
mempool.removeRecursive(**it, MemPoolRemovalReason::REORG); | |
} else if (mempool.exists((*it)->GetHash())) { | |
vHashUpdate.push_back((*it)->GetHash()); | |
} | |
++it; | |
} | |
disconnectpool.queuedTx.clear(); | |
// AcceptToMemoryPool/addUnchecked all assume that new mempool entries have | |
// no in-mempool children, which is generally not true when adding | |
// previously-confirmed transactions back to the mempool. | |
// UpdateTransactionsFromBlock finds descendants of any transactions in | |
// the disconnectpool that were added back and cleans up the mempool state. | |
mempool.UpdateTransactionsFromBlock(vHashUpdate); | |
// We also need to remove any now-immature transactions | |
mempool.removeForReorg(active_chainstate, STANDARD_LOCKTIME_VERIFY_FLAGS); | |
// Re-limit mempool size, in case we added any transactions | |
LimitMempoolSize(mempool, active_chainstate.CoinsTip(), gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)}); | |
} | |
/** | |
* Checks to avoid mempool polluting consensus critical paths since cached | |
* signature and script validity results will be reused if we validate this | |
* transaction again during block validation. | |
* */ | |
static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationState& state, | |
const CCoinsViewCache& view, const CTxMemPool& pool, | |
unsigned int flags, PrecomputedTransactionData& txdata, CCoinsViewCache& coins_tip) | |
EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs) | |
{ | |
AssertLockHeld(cs_main); | |
AssertLockHeld(pool.cs); | |
assert(!tx.IsCoinBase()); | |
for (const CTxIn& txin : tx.vin) { | |
const Coin& coin = view.AccessCoin(txin.prevout); | |
// This coin was checked in PreChecks and MemPoolAccept | |
// has been holding cs_main since then. | |
Assume(!coin.IsSpent()); | |
if (coin.IsSpent()) return false; | |
// If the Coin is available, there are 2 possibilities: | |
// it is available in our current ChainstateActive UTXO set, | |
// or it's a UTXO provided by a transaction in our mempool. | |
// Ensure the scriptPubKeys in Coins from CoinsView are correct. | |
const CTransactionRef& txFrom = pool.get(txin.prevout.hash); | |
if (txFrom) { | |
assert(txFrom->GetHash() == txin.prevout.hash); | |
assert(txFrom->vout.size() > txin.prevout.n); | |
assert(txFrom->vout[txin.prevout.n] == coin.out); | |
} else { | |
assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(coins_tip)); | |
const Coin& coinFromUTXOSet = coins_tip.AccessCoin(txin.prevout); | |
assert(!coinFromUTXOSet.IsSpent()); | |
assert(coinFromUTXOSet.out == coin.out); | |
} | |
} | |
// Call CheckInputScripts() to cache signature and script validity against current tip consensus rules. | |
return CheckInputScripts(tx, state, view, flags, /* cacheSigStore = */ true, /* cacheFullSciptStore = */ true, txdata); | |
} | |
namespace { | |
class MemPoolAccept | |
{ | |
public: | |
explicit MemPoolAccept(CTxMemPool& mempool, CChainState& active_chainstate) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&active_chainstate.CoinsTip(), m_pool), m_active_chainstate(active_chainstate), | |
m_limit_ancestors(gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)), | |
m_limit_ancestor_size(gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000), | |
m_limit_descendants(gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)), | |
m_limit_descendant_size(gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000) { | |
assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate)); | |
} | |
// We put the arguments we're handed into a struct, so we can pass them | |
// around easier. | |
struct ATMPArgs { | |
const CChainParams& m_chainparams; | |
const int64_t m_accept_time; | |
const bool m_bypass_limits; | |
/* | |
* Return any outpoints which were not previously present in the coins | |
* cache, but were added as a result of validating the tx for mempool | |
* acceptance. This allows the caller to optionally remove the cache | |
* additions if the associated transaction ends up being rejected by | |
* the mempool. | |
*/ | |
std::vector<COutPoint>& m_coins_to_uncache; | |
const bool m_test_accept; | |
}; | |
// Single transaction acceptance | |
MempoolAcceptResult AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | |
private: | |
// All the intermediate state that gets passed between the various levels | |
// of checking a given transaction. | |
struct Workspace { | |
explicit Workspace(const CTransactionRef& ptx) : m_ptx(ptx), m_hash(ptx->GetHash()) {} | |
std::set<uint256> m_conflicts; | |
CTxMemPool::setEntries m_all_conflicting; | |
CTxMemPool::setEntries m_ancestors; | |
std::unique_ptr<CTxMemPoolEntry> m_entry; | |
std::list<CTransactionRef> m_replaced_transactions; | |
bool m_replacement_transaction; | |
CAmount m_base_fees; | |
CAmount m_modified_fees; | |
CAmount m_conflicting_fees; | |
size_t m_conflicting_size; | |
const CTransactionRef& m_ptx; | |
const uint256& m_hash; | |
TxValidationState m_state; | |
}; | |
// Run the policy checks on a given transaction, excluding any script checks. | |
// Looks up inputs, calculates feerate, considers replacement, evaluates | |
// package limits, etc. As this function can be invoked for "free" by a peer, | |
// only tests that are fast should be done here (to avoid CPU DoS). | |
bool PreChecks(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); | |
// Run the script checks using our policy flags. As this can be slow, we should | |
// only invoke this on transactions that have otherwise passed policy checks. | |
bool PolicyScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); | |
// Re-run the script checks, using consensus flags, and try to cache the | |
// result in the scriptcache. This should be done after | |
// PolicyScriptChecks(). This requires that all inputs either be in our | |
// utxo set or in the mempool. | |
bool ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData &txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); | |
// Try to add the transaction to the mempool, removing any conflicts first. | |
// Returns true if the transaction is in the mempool after any size | |
// limiting is performed, false otherwise. | |
bool Finalize(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); | |
// Compare a package's feerate against minimum allowed. | |
bool CheckFeeRate(size_t package_size, CAmount package_fee, TxValidationState& state) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs) | |
{ | |
CAmount mempoolRejectFee = m_pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(package_size); | |
if (mempoolRejectFee > 0 && package_fee < mempoolRejectFee) { | |
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met", strprintf("%d < %d", package_fee, mempoolRejectFee)); | |
} | |
if (package_fee < ::minRelayTxFee.GetFee(package_size)) { | |
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met", strprintf("%d < %d", package_fee, ::minRelayTxFee.GetFee(package_size))); | |
} | |
return true; | |
} | |
private: | |
CTxMemPool& m_pool; | |
CCoinsViewCache m_view; | |
CCoinsViewMemPool m_viewmempool; | |
CCoinsView m_dummy; | |
CChainState& m_active_chainstate; | |
// The package limits in effect at the time of invocation. | |
const size_t m_limit_ancestors; | |
const size_t m_limit_ancestor_size; | |
// These may be modified while evaluating a transaction (eg to account for | |
// in-mempool conflicts; see below). | |
size_t m_limit_descendants; | |
size_t m_limit_descendant_size; | |
}; | |
bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) | |
{ | |
const CTransactionRef& ptx = ws.m_ptx; | |
const CTransaction& tx = *ws.m_ptx; | |
const uint256& hash = ws.m_hash; | |
// Copy/alias what we need out of args | |
const int64_t nAcceptTime = args.m_accept_time; | |
const bool bypass_limits = args.m_bypass_limits; | |
std::vector<COutPoint>& coins_to_uncache = args.m_coins_to_uncache; | |
// Alias what we need out of ws | |
TxValidationState& state = ws.m_state; | |
std::set<uint256>& setConflicts = ws.m_conflicts; | |
CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting; | |
CTxMemPool::setEntries& setAncestors = ws.m_ancestors; | |
std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry; | |
bool& fReplacementTransaction = ws.m_replacement_transaction; | |
CAmount& nModifiedFees = ws.m_modified_fees; | |
CAmount& nConflictingFees = ws.m_conflicting_fees; | |
size_t& nConflictingSize = ws.m_conflicting_size; | |
if (!CheckTransaction(tx, state)) { | |
return false; // state filled in by CheckTransaction | |
} | |
// Coinbase is only valid in a block, not as a loose transaction | |
if (tx.IsCoinBase()) | |
return state.Invalid(TxValidationResult::TX_CONSENSUS, "coinbase"); | |
// Rather not work on nonstandard transactions (unless -testnet/-regtest) | |
std::string reason; | |
if (fRequireStandard && !IsStandardTx(tx, reason)) | |
return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason); | |
// Do not work on transactions that are too small. | |
// A transaction with 1 segwit input and 1 P2WPHK output has non-witness size of 82 bytes. | |
// Transactions smaller than this are not relayed to mitigate CVE-2017-12842 by not relaying | |
// 64-byte transactions. | |
if (::GetSerializeSize(tx, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) < MIN_STANDARD_TX_NONWITNESS_SIZE) | |
return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "tx-size-small"); | |
// Only accept nLockTime-using transactions that can be mined in the next | |
// block; we don't want our mempool filled up with transactions that can't | |
// be mined yet. | |
assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain)); | |
if (!CheckFinalTx(m_active_chainstate.m_chain.Tip(), tx, STANDARD_LOCKTIME_VERIFY_FLAGS)) | |
return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-final"); | |
// is it already in the memory pool? | |
if (m_pool.exists(hash)) { | |
return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-in-mempool"); | |
} | |
// Check for conflicts with in-memory transactions | |
for (const CTxIn &txin : tx.vin) | |
{ | |
const CTransaction* ptxConflicting = m_pool.GetConflictTx(txin.prevout); | |
if (ptxConflicting) { | |
if (!setConflicts.count(ptxConflicting->GetHash())) | |
{ | |
// Allow opt-out of transaction replacement by setting | |
// nSequence > MAX_BIP125_RBF_SEQUENCE (SEQUENCE_FINAL-2) on all inputs. | |
// | |
// SEQUENCE_FINAL-1 is picked to still allow use of nLockTime by | |
// non-replaceable transactions. All inputs rather than just one | |
// is for the sake of multi-party protocols, where we don't | |
// want a single party to be able to disable replacement. | |
// | |
// The opt-out ignores descendants as anyone relying on | |
// first-seen mempool behavior should be checking all | |
// unconfirmed ancestors anyway; doing otherwise is hopelessly | |
// insecure. | |
bool fReplacementOptOut = true; | |
for (const CTxIn &_txin : ptxConflicting->vin) | |
{ | |
if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE) | |
{ | |
fReplacementOptOut = false; | |
break; | |
} | |
} | |
if (fReplacementOptOut) { | |
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "txn-mempool-conflict"); | |
} | |
setConflicts.insert(ptxConflicting->GetHash()); | |
} | |
} | |
} | |
LockPoints lp; | |
m_view.SetBackend(m_viewmempool); | |
assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip())); | |
const CCoinsViewCache& coins_cache = m_active_chainstate.CoinsTip(); | |
// do all inputs exist? | |
for (const CTxIn& txin : tx.vin) { | |
if (!coins_cache.HaveCoinInCache(txin.prevout)) { | |
coins_to_uncache.push_back(txin.prevout); | |
} | |
// Note: this call may add txin.prevout to the coins cache | |
// (coins_cache.cacheCoins) by way of FetchCoin(). It should be removed | |
// later (via coins_to_uncache) if this tx turns out to be invalid. | |
if (!m_view.HaveCoin(txin.prevout)) { | |
// Are inputs missing because we already have the tx? | |
for (size_t out = 0; out < tx.vout.size(); out++) { | |
// Optimistically just do efficient check of cache for outputs | |
if (coins_cache.HaveCoinInCache(COutPoint(hash, out))) { | |
return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-known"); | |
} | |
} | |
// Otherwise assume this might be an orphan tx for which we just haven't seen parents yet | |
return state.Invalid(TxValidationResult::TX_MISSING_INPUTS, "bad-txns-inputs-missingorspent"); | |
} | |
} | |
// Bring the best block into scope | |
m_view.GetBestBlock(); | |
// we have all inputs cached now, so switch back to dummy (to protect | |
// against bugs where we pull more inputs from disk that miss being added | |
// to coins_to_uncache) | |
m_view.SetBackend(m_dummy); | |
// Only accept BIP68 sequence locked transactions that can be mined in the next | |
// block; we don't want our mempool filled up with transactions that can't | |
// be mined yet. | |
// Must keep pool.cs for this unless we change CheckSequenceLocks to take a | |
// CoinsViewCache instead of create its own | |
assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate)); | |
if (!CheckSequenceLocks(m_active_chainstate, m_pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp)) | |
return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-BIP68-final"); | |
assert(std::addressof(g_chainman.m_blockman) == std::addressof(m_active_chainstate.m_blockman)); | |
if (!Consensus::CheckTxInputs(tx, state, m_view, m_active_chainstate.m_blockman.GetSpendHeight(m_view), ws.m_base_fees)) { | |
return false; // state filled in by CheckTxInputs | |
} | |
// Check for non-standard pay-to-script-hash in inputs | |
const auto& params = args.m_chainparams.GetConsensus(); | |
assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain)); | |
auto taproot_state = VersionBitsState(m_active_chainstate.m_chain.Tip(), params, Consensus::DEPLOYMENT_TAPROOT, versionbitscache); | |
if (fRequireStandard && !AreInputsStandard(tx, m_view, taproot_state == ThresholdState::ACTIVE)) { | |
return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs"); | |
} | |
// Check for non-standard witnesses. | |
if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, m_view)) | |
return state.Invalid(TxValidationResult::TX_WITNESS_MUTATED, "bad-witness-nonstandard"); | |
int64_t nSigOpsCost = GetTransactionSigOpCost(tx, m_view, STANDARD_SCRIPT_VERIFY_FLAGS); | |
// nModifiedFees includes any fee deltas from PrioritiseTransaction | |
nModifiedFees = ws.m_base_fees; | |
m_pool.ApplyDelta(hash, nModifiedFees); | |
// Keep track of transactions that spend a coinbase, which we re-scan | |
// during reorgs to ensure COINBASE_MATURITY is still met. | |
bool fSpendsCoinbase = false; | |
for (const CTxIn &txin : tx.vin) { | |
const Coin &coin = m_view.AccessCoin(txin.prevout); | |
if (coin.IsCoinBase()) { | |
fSpendsCoinbase = true; | |
break; | |
} | |
} | |
assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain)); | |
entry.reset(new CTxMemPoolEntry(ptx, ws.m_base_fees, nAcceptTime, m_active_chainstate.m_chain.Height(), | |
fSpendsCoinbase, nSigOpsCost, lp)); | |
unsigned int nSize = entry->GetTxSize(); | |
if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST) | |
return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-too-many-sigops", | |
strprintf("%d", nSigOpsCost)); | |
// No transactions are allowed below minRelayTxFee except from disconnected | |
// blocks | |
if (!bypass_limits && !CheckFeeRate(nSize, nModifiedFees, state)) return false; | |
const CTxMemPool::setEntries setIterConflicting = m_pool.GetIterSet(setConflicts); | |
// Calculate in-mempool ancestors, up to a limit. | |
if (setConflicts.size() == 1) { | |
// In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we | |
// would meet the chain limits after the conflicts have been removed. However, there isn't a practical | |
// way to do this short of calculating the ancestor and descendant sets with an overlay cache of | |
// changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't | |
// very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool | |
// conflicts here. Importantly, we need to ensure that some transactions which were accepted using | |
// the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides | |
// for off-chain contract systems (see link in the comment below). | |
// | |
// Specifically, the subset of RBF transactions which we allow despite chain limits are those which | |
// conflict directly with exactly one other transaction (but may evict children of said transaction), | |
// and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies" | |
// check is accomplished later, so we don't bother doing anything about it here, but if BIP 125 is | |
// amended, we may need to move that check to here instead of removing it wholesale. | |
// | |
// Such transactions are clearly not merging any existing packages, so we are only concerned with | |
// ensuring that (a) no package is growing past the package size (not count) limits and (b) we are | |
// not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed | |
// to. | |
// | |
// To check these we first check if we meet the RBF criteria, above, and increment the descendant | |
// limits by the direct conflict and its descendants (as these are recalculated in | |
// CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no | |
// removals, of each parent's existing dependent set). The ancestor count limits are unmodified (as | |
// the ancestor limits should be the same for both our new transaction and any conflicts). | |
// We don't bother incrementing m_limit_descendants by the full removal count as that limit never comes | |
// into force here (as we're only adding a single transaction). | |
assert(setIterConflicting.size() == 1); | |
CTxMemPool::txiter conflict = *setIterConflicting.begin(); | |
m_limit_descendants += 1; | |
m_limit_descendant_size += conflict->GetSizeWithDescendants(); | |
} | |
std::string errString; | |
if (!m_pool.CalculateMemPoolAncestors(*entry, setAncestors, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants, m_limit_descendant_size, errString)) { | |
setAncestors.clear(); | |
// If CalculateMemPoolAncestors fails second time, we want the original error string. | |
std::string dummy_err_string; | |
// Contracting/payment channels CPFP carve-out: | |
// If the new transaction is relatively small (up to 40k weight) | |
// and has at most one ancestor (ie ancestor limit of 2, including | |
// the new transaction), allow it if its parent has exactly the | |
// descendant limit descendants. | |
// | |
// This allows protocols which rely on distrusting counterparties | |
// being able to broadcast descendants of an unconfirmed transaction | |
// to be secure by simply only having two immediately-spendable | |
// outputs - one for each counterparty. For more info on the uses for | |
// this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html | |
if (nSize > EXTRA_DESCENDANT_TX_SIZE_LIMIT || | |
!m_pool.CalculateMemPoolAncestors(*entry, setAncestors, 2, m_limit_ancestor_size, m_limit_descendants + 1, m_limit_descendant_size + EXTRA_DESCENDANT_TX_SIZE_LIMIT, dummy_err_string)) { | |
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", errString); | |
} | |
} | |
// A transaction that spends outputs that would be replaced by it is invalid. Now | |
// that we have the set of all ancestors we can detect this | |
// pathological case by making sure setConflicts and setAncestors don't | |
// intersect. | |
for (CTxMemPool::txiter ancestorIt : setAncestors) | |
{ | |
const uint256 &hashAncestor = ancestorIt->GetTx().GetHash(); | |
if (setConflicts.count(hashAncestor)) | |
{ | |
return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-txns-spends-conflicting-tx", | |
strprintf("%s spends conflicting transaction %s", | |
hash.ToString(), | |
hashAncestor.ToString())); | |
} | |
} | |
// Check if it's economically rational to mine this transaction rather | |
// than the ones it replaces. | |
nConflictingFees = 0; | |
nConflictingSize = 0; | |
uint64_t nConflictingCount = 0; | |
// If we don't hold the lock allConflicting might be incomplete; the | |
// subsequent RemoveStaged() and addUnchecked() calls don't guarantee | |
// mempool consistency for us. | |
fReplacementTransaction = setConflicts.size(); | |
if (fReplacementTransaction) | |
{ | |
CFeeRate newFeeRate(nModifiedFees, nSize); | |
std::set<uint256> setConflictsParents; | |
const int maxDescendantsToVisit = 100; | |
for (const auto& mi : setIterConflicting) { | |
// Don't allow the replacement to reduce the feerate of the | |
// mempool. | |
// | |
// We usually don't want to accept replacements with lower | |
// feerates than what they replaced as that would lower the | |
// feerate of the next block. Requiring that the feerate always | |
// be increased is also an easy-to-reason about way to prevent | |
// DoS attacks via replacements. | |
// | |
// We only consider the feerates of transactions being directly | |
// replaced, not their indirect descendants. While that does | |
// mean high feerate children are ignored when deciding whether | |
// or not to replace, we do require the replacement to pay more | |
// overall fees too, mitigating most cases. | |
CFeeRate oldFeeRate(mi->GetModifiedFee(), mi->GetTxSize()); | |
if (newFeeRate <= oldFeeRate) | |
{ | |
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee", | |
strprintf("rejecting replacement %s; new feerate %s <= old feerate %s", | |
hash.ToString(), | |
newFeeRate.ToString(), | |
oldFeeRate.ToString())); | |
} | |
for (const CTxIn &txin : mi->GetTx().vin) | |
{ | |
setConflictsParents.insert(txin.prevout.hash); | |
} | |
nConflictingCount += mi->GetCountWithDescendants(); | |
} | |
// This potentially overestimates the number of actual descendants | |
// but we just want to be conservative to avoid doing too much | |
// work. | |
if (nConflictingCount <= maxDescendantsToVisit) { | |
// If not too many to replace, then calculate the set of | |
// transactions that would have to be evicted | |
for (CTxMemPool::txiter it : setIterConflicting) { | |
m_pool.CalculateDescendants(it, allConflicting); | |
} | |
for (CTxMemPool::txiter it : allConflicting) { | |
nConflictingFees += it->GetModifiedFee(); | |
nConflictingSize += it->GetTxSize(); | |
} | |
} else { | |
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too many potential replacements", | |
strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n", | |
hash.ToString(), | |
nConflictingCount, | |
maxDescendantsToVisit)); | |
} | |
for (unsigned int j = 0; j < tx.vin.size(); j++) | |
{ | |
// We don't want to accept replacements that require low | |
// feerate junk to be mined first. Ideally we'd keep track of | |
// the ancestor feerates and make the decision based on that, | |
// but for now requiring all new inputs to be confirmed works. | |
// | |
// Note that if you relax this to make RBF a little more useful, | |
// this may break the CalculateMempoolAncestors RBF relaxation, | |
// above. See the comment above the first CalculateMempoolAncestors | |
// call for more info. | |
if (!setConflictsParents.count(tx.vin[j].prevout.hash)) | |
{ | |
// Rather than check the UTXO set - potentially expensive - | |
// it's cheaper to just check if the new input refers to a | |
// tx that's in the mempool. | |
if (m_pool.exists(tx.vin[j].prevout.hash)) { | |
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "replacement-adds-unconfirmed", | |
strprintf("replacement %s adds unconfirmed input, idx %d", | |
hash.ToString(), j)); | |
} | |
} | |
} | |
// The replacement must pay greater fees than the transactions it | |
// replaces - if we did the bandwidth used by those conflicting | |
// transactions would not be paid for. | |
if (nModifiedFees < nConflictingFees) | |
{ | |
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee", | |
strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s", | |
hash.ToString(), FormatMoney(nModifiedFees), FormatMoney(nConflictingFees))); | |
} | |
// Finally in addition to paying more fees than the conflicts the | |
// new transaction must pay for its own bandwidth. | |
CAmount nDeltaFees = nModifiedFees - nConflictingFees; | |
if (nDeltaFees < ::incrementalRelayFee.GetFee(nSize)) | |
{ | |
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee", | |
strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s", | |
hash.ToString(), | |
FormatMoney(nDeltaFees), | |
FormatMoney(::incrementalRelayFee.GetFee(nSize)))); | |
} | |
} | |
return true; | |
} | |
bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata) | |
{ | |
const CTransaction& tx = *ws.m_ptx; | |
TxValidationState& state = ws.m_state; | |
constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS; | |
// Check input scripts and signatures. | |
// This is done last to help prevent CPU exhaustion denial-of-service attacks. | |
if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, txdata)) { | |
// SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we | |
// need to turn both off, and compare against just turning off CLEANSTACK | |
// to see if the failure is specifically due to witness validation. | |
TxValidationState state_dummy; // Want reported failures to be from first CheckInputScripts | |
if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) && | |
!CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) { | |
// Only the witness is missing, so the transaction itself may be fine. | |
state.Invalid(TxValidationResult::TX_WITNESS_STRIPPED, | |
state.GetRejectReason(), state.GetDebugMessage()); | |
} | |
return false; // state filled in by CheckInputScripts | |
} | |
return true; | |
} | |
bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata) | |
{ | |
const CTransaction& tx = *ws.m_ptx; | |
const uint256& hash = ws.m_hash; | |
TxValidationState& state = ws.m_state; | |
const CChainParams& chainparams = args.m_chainparams; | |
// Check again against the current block tip's script verification | |
// flags to cache our script execution flags. This is, of course, | |
// useless if the next block has different script flags from the | |
// previous one, but because the cache tracks script flags for us it | |
// will auto-invalidate and we'll just have a few blocks of extra | |
// misses on soft-fork activation. | |
// | |
// This is also useful in case of bugs in the standard flags that cause | |
// transactions to pass as valid when they're actually invalid. For | |
// instance the STRICTENC flag was incorrectly allowing certain | |
// CHECKSIG NOT scripts to pass, even though they were invalid. | |
// | |
// There is a similar check in CreateNewBlock() to prevent creating | |
// invalid blocks (using TestBlockValidity), however allowing such | |
// transactions into the mempool can be exploited as a DoS attack. | |
assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain)); | |
unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(m_active_chainstate.m_chain.Tip(), chainparams.GetConsensus()); | |
assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip())); | |
if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, txdata, m_active_chainstate.CoinsTip())) { | |
return error("%s: BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s", | |
__func__, hash.ToString(), state.ToString()); | |
} | |
return true; | |
} | |
bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws) | |
{ | |
const CTransaction& tx = *ws.m_ptx; | |
const uint256& hash = ws.m_hash; | |
TxValidationState& state = ws.m_state; | |
const bool bypass_limits = args.m_bypass_limits; | |
CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting; | |
CTxMemPool::setEntries& setAncestors = ws.m_ancestors; | |
const CAmount& nModifiedFees = ws.m_modified_fees; | |
const CAmount& nConflictingFees = ws.m_conflicting_fees; | |
const size_t& nConflictingSize = ws.m_conflicting_size; | |
const bool fReplacementTransaction = ws.m_replacement_transaction; | |
std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry; | |
// Remove conflicting transactions from the mempool | |
for (CTxMemPool::txiter it : allConflicting) | |
{ | |
LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s additional fees, %d delta bytes\n", | |
it->GetTx().GetHash().ToString(), | |
hash.ToString(), | |
FormatMoney(nModifiedFees - nConflictingFees), | |
(int)entry->GetTxSize() - (int)nConflictingSize); | |
ws.m_replaced_transactions.push_back(it->GetSharedTx()); | |
} | |
m_pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED); | |
// This transaction should only count for fee estimation if: | |
// - it isn't a BIP 125 replacement transaction (may not be widely supported) | |
// - it's not being re-added during a reorg which bypasses typical mempool fee limits | |
// - the node is not behind | |
// - the transaction is not dependent on any other transactions in the mempool | |
assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate)); | |
bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation(m_active_chainstate) && m_pool.HasNoInputsOf(tx); | |
// Store transaction in memory | |
m_pool.addUnchecked(*entry, setAncestors, validForFeeEstimation); | |
// trim mempool and check if tx was trimmed | |
if (!bypass_limits) { | |
assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip())); | |
LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip(), gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)}); | |
if (!m_pool.exists(hash)) | |
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full"); | |
} | |
return true; | |
} | |
MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) | |
{ | |
AssertLockHeld(cs_main); | |
LOCK(m_pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool()) | |
Workspace ws(ptx); | |
if (!PreChecks(args, ws)) return MempoolAcceptResult(ws.m_state); | |
// Only compute the precomputed transaction data if we need to verify | |
// scripts (ie, other policy checks pass). We perform the inexpensive | |
// checks first and avoid hashing and signature verification unless those | |
// checks pass, to mitigate CPU exhaustion denial-of-service attacks. | |
PrecomputedTransactionData txdata; | |
if (!PolicyScriptChecks(args, ws, txdata)) return MempoolAcceptResult(ws.m_state); | |
if (!ConsensusScriptChecks(args, ws, txdata)) return MempoolAcceptResult(ws.m_state); | |
// Tx was accepted, but not added | |
if (args.m_test_accept) { | |
return MempoolAcceptResult(std::move(ws.m_replaced_transactions), ws.m_base_fees); | |
} | |
if (!Finalize(args, ws)) return MempoolAcceptResult(ws.m_state); | |
GetMainSignals().TransactionAddedToMempool(ptx, m_pool.GetAndIncrementSequence()); | |
return MempoolAcceptResult(std::move(ws.m_replaced_transactions), ws.m_base_fees); | |
} | |
} // anon namespace | |
/** (try to) add transaction to memory pool with a specified acceptance time **/ | |
static MempoolAcceptResult AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, | |
CChainState& active_chainstate, | |
const CTransactionRef &tx, int64_t nAcceptTime, | |
bool bypass_limits, bool test_accept) | |
EXCLUSIVE_LOCKS_REQUIRED(cs_main) | |
{ | |
std::vector<COutPoint> coins_to_uncache; | |
MemPoolAccept::ATMPArgs args { chainparams, nAcceptTime, bypass_limits, coins_to_uncache, test_accept }; | |
assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); | |
const MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptSingleTransaction(tx, args); | |
if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) { | |
// Remove coins that were not present in the coins cache before calling ATMPW; | |
// this is to prevent memory DoS in case we receive a large number of | |
// invalid transactions that attempt to overrun the in-memory coins cache | |
// (`CCoinsViewCache::cacheCoins`). | |
for (const COutPoint& hashTx : coins_to_uncache) | |
active_chainstate.CoinsTip().Uncache(hashTx); | |
} | |
// After we've (potentially) uncached entries, ensure our coins cache is still within its size limits | |
BlockValidationState state_dummy; | |
active_chainstate.FlushStateToDisk(chainparams, state_dummy, FlushStateMode::PERIODIC); | |
return result; | |
} | |
MempoolAcceptResult AcceptToMemoryPool(CChainState& active_chainstate, CTxMemPool& pool, const CTransactionRef& tx, | |
bool bypass_limits, bool test_accept) | |
{ | |
assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); | |
return AcceptToMemoryPoolWithTime(Params(), pool, active_chainstate, tx, GetTime(), bypass_limits, test_accept); | |
} | |
CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock) | |
{ | |
LOCK(cs_main); | |
if (block_index) { | |
CBlock block; | |
if (ReadBlockFromDisk(block, block_index, consensusParams)) { | |
for (const auto& tx : block.vtx) { | |
if (tx->GetHash() == hash) { | |
hashBlock = block_index->GetBlockHash(); | |
return tx; | |
} | |
} | |
} | |
return nullptr; | |
} | |
if (mempool) { | |
CTransactionRef ptx = mempool->get(hash); | |
if (ptx) return ptx; | |
} | |
if (g_txindex) { | |
CTransactionRef tx; | |
if (g_txindex->FindTx(hash, hashBlock, tx)) return tx; | |
} | |
return nullptr; | |
} | |
////////////////////////////////////////////////////////////////////////////// | |
// | |
// CBlock and CBlockIndex | |
// | |
static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessageHeader::MessageStartChars& messageStart) | |
{ | |
// Open history file to append | |
CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION); | |
if (fileout.IsNull()) | |
return error("WriteBlockToDisk: OpenBlockFile failed"); | |
// Write index header | |
unsigned int nSize = GetSerializeSize(block, fileout.GetVersion()); | |
fileout << messageStart << nSize; | |
// Write block | |
long fileOutPos = ftell(fileout.Get()); | |
if (fileOutPos < 0) | |
return error("WriteBlockToDisk: ftell failed"); | |
pos.nPos = (unsigned int)fileOutPos; | |
fileout << block; | |
return true; | |
} | |
bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos, const Consensus::Params& consensusParams) | |
{ | |
block.SetNull(); | |
// Open history file to read | |
CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION); | |
if (filein.IsNull()) | |
return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString()); | |
// Read block | |
try { | |
filein >> block; | |
} | |
catch (const std::exception& e) { | |
return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString()); | |
} | |
// Check the header | |
if (!CheckProofOfWork(block.GetHash(), block.nBits, consensusParams)) | |
return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString()); | |
// Signet only: check block solution | |
if (consensusParams.signet_blocks && !CheckSignetBlockSolution(block, consensusParams)) { | |
return error("ReadBlockFromDisk: Errors in block solution at %s", pos.ToString()); | |
} | |
return true; | |
} | |
bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams) | |
{ | |
FlatFilePos blockPos; | |
{ | |
LOCK(cs_main); | |
blockPos = pindex->GetBlockPos(); | |
} | |
if (!ReadBlockFromDisk(block, blockPos, consensusParams)) | |
return false; | |
if (block.GetHash() != pindex->GetBlockHash()) | |
return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s", | |
pindex->ToString(), pindex->GetBlockPos().ToString()); | |
return true; | |
} | |
bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, const CMessageHeader::MessageStartChars& message_start) | |
{ | |
FlatFilePos hpos = pos; | |
hpos.nPos -= 8; // Seek back 8 bytes for meta header | |
CAutoFile filein(OpenBlockFile(hpos, true), SER_DISK, CLIENT_VERSION); | |
if (filein.IsNull()) { | |
return error("%s: OpenBlockFile failed for %s", __func__, pos.ToString()); | |
} | |
try { | |
CMessageHeader::MessageStartChars blk_start; | |
unsigned int blk_size; | |
filein >> blk_start >> blk_size; | |
if (memcmp(blk_start, message_start, CMessageHeader::MESSAGE_START_SIZE)) { | |
return error("%s: Block magic mismatch for %s: %s versus expected %s", __func__, pos.ToString(), | |
HexStr(blk_start), | |
HexStr(message_start)); | |
} | |
if (blk_size > MAX_SIZE) { | |
return error("%s: Block data is larger than maximum deserialization size for %s: %s versus %s", __func__, pos.ToString(), | |
blk_size, MAX_SIZE); | |
} | |
block.resize(blk_size); // Zeroing of memory is intentional here | |
filein.read((char*)block.data(), blk_size); | |
} catch(const std::exception& e) { | |
return error("%s: Read from block file failed: %s for %s", __func__, e.what(), pos.ToString()); | |
} | |
return true; | |
} | |
bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start) | |
{ | |
FlatFilePos block_pos; | |
{ | |
LOCK(cs_main); | |
block_pos = pindex->GetBlockPos(); | |
} | |
return ReadRawBlockFromDisk(block, block_pos, message_start); | |
} | |
CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams) | |
{ | |
int halvings = nHeight / consensusParams.nSubsidyHalvingInterval; | |
// Force block reward to zero when right shift is undefined. | |
if (halvings >= 64) | |
return 0; | |
CAmount nSubsidy = 50 * COIN; | |
// Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years. | |
nSubsidy >>= halvings; | |
return nSubsidy; | |
} | |
CoinsViews::CoinsViews( | |
std::string ldb_name, | |
size_t cache_size_bytes, | |
bool in_memory, | |
bool should_wipe) : m_dbview( | |
GetDataDir() / ldb_name, cache_size_bytes, in_memory, should_wipe), | |
m_catcherview(&m_dbview) {} | |
void CoinsViews::InitCache() | |
{ | |
m_cacheview = MakeUnique<CCoinsViewCache>(&m_catcherview); | |
} | |
CChainState::CChainState(CTxMemPool& mempool, BlockManager& blockman, uint256 from_snapshot_blockhash) | |
: m_mempool(mempool), | |
m_blockman(blockman), | |
m_from_snapshot_blockhash(from_snapshot_blockhash) {} | |
void CChainState::InitCoinsDB( | |
size_t cache_size_bytes, | |
bool in_memory, | |
bool should_wipe, | |
std::string leveldb_name) | |
{ | |
if (!m_from_snapshot_blockhash.IsNull()) { | |
leveldb_name += "_" + m_from_snapshot_blockhash.ToString(); | |
} | |
m_coins_views = MakeUnique<CoinsViews>( | |
leveldb_name, cache_size_bytes, in_memory, should_wipe); | |
} | |
void CChainState::InitCoinsCache(size_t cache_size_bytes) | |
{ | |
assert(m_coins_views != nullptr); | |
m_coinstip_cache_size_bytes = cache_size_bytes; | |
m_coins_views->InitCache(); | |
} | |
// Note that though this is marked const, we may end up modifying `m_cached_finished_ibd`, which | |
// is a performance-related implementation detail. This function must be marked | |
// `const` so that `CValidationInterface` clients (which are given a `const CChainState*`) | |
// can call it. | |
// | |
bool CChainState::IsInitialBlockDownload() const | |
{ | |
// Optimization: pre-test latch before taking the lock. | |
if (m_cached_finished_ibd.load(std::memory_order_relaxed)) | |
return false; | |
LOCK(cs_main); | |
if (m_cached_finished_ibd.load(std::memory_order_relaxed)) | |
return false; | |
if (fImporting || fReindex) | |
return true; | |
if (m_chain.Tip() == nullptr) | |
return true; | |
if (m_chain.Tip()->nChainWork < nMinimumChainWork) | |
return true; | |
if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge)) | |
return true; | |
LogPrintf("Leaving InitialBlockDownload (latching to false)\n"); | |
m_cached_finished_ibd.store(true, std::memory_order_relaxed); | |
return false; | |
} | |
static void AlertNotify(const std::string& strMessage) | |
{ | |
uiInterface.NotifyAlertChanged(); | |
#if HAVE_SYSTEM | |
std::string strCmd = gArgs.GetArg("-alertnotify", ""); | |
if (strCmd.empty()) return; | |
// Alert text should be plain ascii coming from a trusted source, but to | |
// be safe we first strip anything not in safeChars, then add single quotes around | |
// the whole string before passing it to the shell: | |
std::string singleQuote("'"); | |
std::string safeStatus = SanitizeString(strMessage); | |
safeStatus = singleQuote+safeStatus+singleQuote; | |
boost::replace_all(strCmd, "%s", safeStatus); | |
std::thread t(runCommand, strCmd); | |
t.detach(); // thread runs free | |
#endif | |
} | |
void CChainState::CheckForkWarningConditions() | |
{ | |
AssertLockHeld(cs_main); | |
assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); | |
// Before we get past initial download, we cannot reliably alert about forks | |
// (we assume we don't get stuck on a fork before finishing our initial sync) | |
if (IsInitialBlockDownload()) { | |
return; | |
} | |
if (pindexBestInvalid && pindexBestInvalid->nChainWork > m_chain.Tip()->nChainWork + (GetBlockProof(*m_chain.Tip()) * 6)) { | |
LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__); | |
SetfLargeWorkInvalidChainFound(true); | |
} else { | |
SetfLargeWorkInvalidChainFound(false); | |
} | |
} | |
// Called both upon regular invalid block discovery *and* InvalidateBlock | |
void CChainState::InvalidChainFound(CBlockIndex* pindexNew) | |
{ | |
assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); | |
if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork) | |
pindexBestInvalid = pindexNew; | |
if (pindexBestHeader != nullptr && pindexBestHeader->GetAncestor(pindexNew->nHeight) == pindexNew) { | |
pindexBestHeader = m_chain.Tip(); | |
} | |
LogPrintf("%s: invalid block=%s height=%d log2_work=%f date=%s\n", __func__, | |
pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, | |
log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime())); | |
CBlockIndex *tip = m_chain.Tip(); | |
assert (tip); | |
LogPrintf("%s: current best=%s height=%d log2_work=%f date=%s\n", __func__, | |
tip->GetBlockHash().ToString(), m_chain.Height(), log(tip->nChainWork.getdouble())/log(2.0), | |
FormatISO8601DateTime(tip->GetBlockTime())); | |
CheckForkWarningConditions(); | |
} | |
// Same as InvalidChainFound, above, except not called directly from InvalidateBlock, | |
// which does its own setBlockIndexCandidates manageent. | |
void CChainState::InvalidBlockFound(CBlockIndex *pindex, const BlockValidationState &state) { | |
if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) { | |
pindex->nStatus |= BLOCK_FAILED_VALID; | |
m_blockman.m_failed_blocks.insert(pindex); | |
setDirtyBlockIndex.insert(pindex); | |
setBlockIndexCandidates.erase(pindex); | |
InvalidChainFound(pindex); | |
} | |
} | |
void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight) | |
{ | |
// mark inputs spent | |
if (!tx.IsCoinBase()) { | |
txundo.vprevout.reserve(tx.vin.size()); | |
for (const CTxIn &txin : tx.vin) { | |
txundo.vprevout.emplace_back(); | |
bool is_spent = inputs.SpendCoin(txin.prevout, &txundo.vprevout.back()); | |
assert(is_spent); | |
} | |
} | |
// add outputs | |
AddCoins(inputs, tx, nHeight); | |
} | |
void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight) | |
{ | |
CTxUndo txundo; | |
UpdateCoins(tx, inputs, txundo, nHeight); | |
} | |
bool CScriptCheck::operator()() { | |
const CScript &scriptSig = ptxTo->vin[nIn].scriptSig; | |
const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness; | |
return VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *txdata), &error); | |
} | |
int BlockManager::GetSpendHeight(const CCoinsViewCache& inputs) | |
{ | |
AssertLockHeld(cs_main); | |
assert(std::addressof(g_chainman.m_blockman) == std::addressof(*this)); | |
CBlockIndex* pindexPrev = LookupBlockIndex(inputs.GetBestBlock()); | |
return pindexPrev->nHeight + 1; | |
} | |
static CuckooCache::cache<uint256, SignatureCacheHasher> g_scriptExecutionCache; | |
static CSHA256 g_scriptExecutionCacheHasher; | |
void InitScriptExecutionCache() { | |
// Setup the salted hasher | |
uint256 nonce = GetRandHash(); | |
// We want the nonce to be 64 bytes long to force the hasher to process | |
// this chunk, which makes later hash computations more efficient. We | |
// just write our 32-byte entropy twice to fill the 64 bytes. | |
g_scriptExecutionCacheHasher.Write(nonce.begin(), 32); | |
g_scriptExecutionCacheHasher.Write(nonce.begin(), 32); | |
// nMaxCacheSize is unsigned. If -maxsigcachesize is set to zero, | |
// setup_bytes creates the minimum possible cache (2 elements). | |
size_t nMaxCacheSize = std::min(std::max((int64_t)0, gArgs.GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) / 2), MAX_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20); | |
size_t nElems = g_scriptExecutionCache.setup_bytes(nMaxCacheSize); | |
LogPrintf("Using %zu MiB out of %zu/2 requested for script execution cache, able to store %zu elements\n", | |
(nElems*sizeof(uint256)) >>20, (nMaxCacheSize*2)>>20, nElems); | |
} | |
/** | |
* Check whether all of this transaction's input scripts succeed. | |
* | |
* This involves ECDSA signature checks so can be computationally intensive. This function should | |
* only be called after the cheap sanity checks in CheckTxInputs passed. | |
* | |
* If pvChecks is not nullptr, script checks are pushed onto it instead of being performed inline. Any | |
* script checks which are not necessary (eg due to script execution cache hits) are, obviously, | |
* not pushed onto pvChecks/run. | |
* | |
* Setting cacheSigStore/cacheFullScriptStore to false will remove elements from the corresponding cache | |
* which are matched. This is useful for checking blocks where we will likely never need the cache | |
* entry again. | |
* | |
* Note that we may set state.reason to NOT_STANDARD for extra soft-fork flags in flags, block-checking | |
* callers should probably reset it to CONSENSUS in such cases. | |
* | |
* Non-static (and re-declared) in src/test/txvalidationcache_tests.cpp | |
*/ | |
bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, | |
const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore, | |
bool cacheFullScriptStore, PrecomputedTransactionData& txdata, | |
std::vector<CScriptCheck>* pvChecks) | |
{ | |
if (tx.IsCoinBase()) return true; | |
if (pvChecks) { | |
pvChecks->reserve(tx.vin.size()); | |
} | |
// First check if script executions have been cached with the same | |
// flags. Note that this assumes that the inputs provided are | |
// correct (ie that the transaction hash which is in tx's prevouts | |
// properly commits to the scriptPubKey in the inputs view of that | |
// transaction). | |
uint256 hashCacheEntry; | |
CSHA256 hasher = g_scriptExecutionCacheHasher; | |
hasher.Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin()); | |
AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks | |
if (g_scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) { | |
return true; | |
} | |
if (!txdata.m_spent_outputs_ready) { | |
std::vector<CTxOut> spent_outputs; | |
spent_outputs.reserve(tx.vin.size()); | |
for (const auto& txin : tx.vin) { | |
const COutPoint& prevout = txin.prevout; | |
const Coin& coin = inputs.AccessCoin(prevout); | |
assert(!coin.IsSpent()); | |
spent_outputs.emplace_back(coin.out); | |
} | |
txdata.Init(tx, std::move(spent_outputs)); | |
} | |
assert(txdata.m_spent_outputs.size() == tx.vin.size()); | |
for (unsigned int i = 0; i < tx.vin.size(); i++) { | |
// We very carefully only pass in things to CScriptCheck which | |
// are clearly committed to by tx' witness hash. This provides | |
// a sanity check that our caching is not introducing consensus | |
// failures through additional data in, eg, the coins being | |
// spent being checked as a part of CScriptCheck. | |
// Verify signature | |
CScriptCheck check(txdata.m_spent_outputs[i], tx, i, flags, cacheSigStore, &txdata); | |
if (pvChecks) { | |
pvChecks->push_back(CScriptCheck()); | |
check.swap(pvChecks->back()); | |
} else if (!check()) { | |
if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) { | |
// Check whether the failure was caused by a | |
// non-mandatory script verification check, such as | |
// non-standard DER encodings or non-null dummy | |
// arguments; if so, ensure we return NOT_STANDARD | |
// instead of CONSENSUS to avoid downstream users | |
// splitting the network between upgraded and | |
// non-upgraded nodes by banning CONSENSUS-failing | |
// data providers. | |
CScriptCheck check2(txdata.m_spent_outputs[i], tx, i, | |
flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata); | |
if (check2()) | |
return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError()))); | |
} | |
// MANDATORY flag failures correspond to | |
// TxValidationResult::TX_CONSENSUS. Because CONSENSUS | |
// failures are the most serious case of validation | |
// failures, we may need to consider using | |
// RECENT_CONSENSUS_CHANGE for any script failure that | |
// could be due to non-upgraded nodes which we may want to | |
// support, to avoid splitting the network (but this | |
// depends on the details of how net_processing handles | |
// such errors). | |
return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError()))); | |
} | |
} | |
if (cacheFullScriptStore && !pvChecks) { | |
// We executed all of the provided scripts, and were told to | |
// cache the result. Do so now. | |
g_scriptExecutionCache.insert(hashCacheEntry); | |
} | |
return true; | |
} | |
static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart) | |
{ | |
// Open history file to append | |
CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION); | |
if (fileout.IsNull()) | |
return error("%s: OpenUndoFile failed", __func__); | |
// Write index header | |
unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion()); | |
fileout << messageStart << nSize; | |
// Write undo data | |
long fileOutPos = ftell(fileout.Get()); | |
if (fileOutPos < 0) | |
return error("%s: ftell failed", __func__); | |
pos.nPos = (unsigned int)fileOutPos; | |
fileout << blockundo; | |
// calculate & write checksum | |
CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); | |
hasher << hashBlock; | |
hasher << blockundo; | |
fileout << hasher.GetHash(); | |
return true; | |
} | |
bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex) | |
{ | |
FlatFilePos pos = pindex->GetUndoPos(); | |
if (pos.IsNull()) { | |
return error("%s: no undo data available", __func__); | |
} | |
// Open history file to read | |
CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION); | |
if (filein.IsNull()) | |
return error("%s: OpenUndoFile failed", __func__); | |
// Read block | |
uint256 hashChecksum; | |
CHashVerifier<CAutoFile> verifier(&filein); // We need a CHashVerifier as reserializing may lose data | |
try { | |
verifier << pindex->pprev->GetBlockHash(); | |
verifier >> blockundo; | |
filein >> hashChecksum; | |
} | |
catch (const std::exception& e) { | |
return error("%s: Deserialize or I/O error - %s", __func__, e.what()); | |
} | |
// Verify checksum | |
if (hashChecksum != verifier.GetHash()) | |
return error("%s: Checksum mismatch", __func__); | |
return true; | |
} | |
/** Abort with a message */ | |
static bool AbortNode(const std::string& strMessage, bilingual_str user_message = bilingual_str()) | |
{ | |
SetMiscWarning(Untranslated(strMessage)); | |
LogPrintf("*** %s\n", strMessage); | |
if (user_message.empty()) { | |
user_message = _("A fatal internal error occurred, see debug.log for details"); | |
} | |
AbortError(user_message); | |
StartShutdown(); | |
return false; | |
} | |
static bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage = bilingual_str()) | |
{ | |
AbortNode(strMessage, userMessage); | |
return state.Error(strMessage); | |
} | |
/** | |
* Restore the UTXO in a Coin at a given COutPoint | |
* @param undo The Coin to be restored. | |
* @param view The coins view to which to apply the changes. | |
* @param out The out point that corresponds to the tx input. | |
* @return A DisconnectResult as an int | |
*/ | |
int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out) | |
{ | |
bool fClean = true; | |
if (view.HaveCoin(out)) fClean = false; // overwriting transaction output | |
if (undo.nHeight == 0) { | |
// Missing undo metadata (height and coinbase). Older versions included this | |
// information only in undo records for the last spend of a transactions' | |
// outputs. This implies that it must be present for some other output of the same tx. | |
const Coin& alternate = AccessByTxid(view, out.hash); | |
if (!alternate.IsSpent()) { | |
undo.nHeight = alternate.nHeight; | |
undo.fCoinBase = alternate.fCoinBase; | |
} else { | |
return DISCONNECT_FAILED; // adding output for transaction without known metadata | |
} | |
} | |
// If the coin already exists as an unspent coin in the cache, then the | |
// possible_overwrite parameter to AddCoin must be set to true. We have | |
// already checked whether an unspent coin exists above using HaveCoin, so | |
// we don't need to guess. When fClean is false, an unspent coin already | |
// existed and it is an overwrite. | |
view.AddCoin(out, std::move(undo), !fClean); | |
return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN; | |
} | |
/** Undo the effects of this block (with given index) on the UTXO set represented by coins. | |
* When FAILED is returned, view is left in an indeterminate state. */ | |
DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view) | |
{ | |
bool fClean = true; | |
CBlockUndo blockUndo; | |
if (!UndoReadFromDisk(blockUndo, pindex)) { | |
error("DisconnectBlock(): failure reading undo data"); | |
return DISCONNECT_FAILED; | |
} | |
if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) { | |
error("DisconnectBlock(): block and undo data inconsistent"); | |
return DISCONNECT_FAILED; | |
} | |
// undo transactions in reverse order | |
for (int i = block.vtx.size() - 1; i >= 0; i--) { | |
const CTransaction &tx = *(block.vtx[i]); | |
uint256 hash = tx.GetHash(); | |
bool is_coinbase = tx.IsCoinBase(); | |
// Check that all outputs are available and match the outputs in the block itself | |
// exactly. | |
for (size_t o = 0; o < tx.vout.size(); o++) { | |
if (!tx.vout[o].scriptPubKey.IsUnspendable()) { | |
COutPoint out(hash, o); | |
Coin coin; | |
bool is_spent = view.SpendCoin(out, &coin); | |
if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) { | |
fClean = false; // transaction output mismatch | |
} | |
} | |
} | |
// restore inputs | |
if (i > 0) { // not coinbases | |
CTxUndo &txundo = blockUndo.vtxundo[i-1]; | |
if (txundo.vprevout.size() != tx.vin.size()) { | |
error("DisconnectBlock(): transaction and undo data inconsistent"); | |
return DISCONNECT_FAILED; | |
} | |
for (unsigned int j = tx.vin.size(); j-- > 0;) { | |
const COutPoint &out = tx.vin[j].prevout; | |
int res = ApplyTxInUndo(std::move(txundo.vprevout[j]), view, out); | |
if (res == DISCONNECT_FAILED) return DISCONNECT_FAILED; | |
fClean = fClean && res != DISCONNECT_UNCLEAN; | |
} | |
// At this point, all of txundo.vprevout should have been moved out. | |
} | |
} | |
// move best block pointer to prevout block | |
view.SetBestBlock(pindex->pprev->GetBlockHash()); | |
return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN; | |
} | |
static void FlushUndoFile(int block_file, bool finalize = false) | |
{ | |
FlatFilePos undo_pos_old(block_file, vinfoBlockFile[block_file].nUndoSize); | |
if (!UndoFileSeq().Flush(undo_pos_old, finalize)) { | |
AbortNode("Flushing undo file to disk failed. This is likely the result of an I/O error."); | |
} | |
} | |
static void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false) | |
{ | |
LOCK(cs_LastBlockFile); | |
FlatFilePos block_pos_old(nLastBlockFile, vinfoBlockFile[nLastBlockFile].nSize); | |
if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) { | |
AbortNode("Flushing block file to disk failed. This is likely the result of an I/O error."); | |
} | |
// we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks, | |
// e.g. during IBD or a sync after a node going offline | |
if (!fFinalize || finalize_undo) FlushUndoFile(nLastBlockFile, finalize_undo); | |
} | |
static bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize); | |
static bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams) | |
{ | |
// Write undo information to disk | |
if (pindex->GetUndoPos().IsNull()) { | |
FlatFilePos _pos; | |
if (!FindUndoPos(state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40)) | |
return error("ConnectBlock(): FindUndoPos failed"); | |
if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart())) | |
return AbortNode(state, "Failed to write undo data"); | |
// rev files are written in block height order, whereas blk files are written as blocks come in (often out of order) | |
// we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height | |
// in the block file info as below; note that this does not catch the case where the undo writes are keeping up | |
// with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in | |
// the FindBlockPos function | |
if (_pos.nFile < nLastBlockFile && static_cast<uint32_t>(pindex->nHeight) == vinfoBlockFile[_pos.nFile].nHeightLast) { | |
FlushUndoFile(_pos.nFile, true); | |
} | |
// update nUndoPos in block index | |
pindex->nUndoPos = _pos.nPos; | |
pindex->nStatus |= BLOCK_HAVE_UNDO; | |
setDirtyBlockIndex.insert(pindex); | |
} | |
return true; | |
} | |
static CCheckQueue<CScriptCheck> scriptcheckqueue(128); | |
void StartScriptCheckWorkerThreads(int threads_num) | |
{ | |
scriptcheckqueue.StartWorkerThreads(threads_num); | |
} | |
void StopScriptCheckWorkerThreads() | |
{ | |
scriptcheckqueue.StopWorkerThreads(); | |
} | |
VersionBitsCache versionbitscache GUARDED_BY(cs_main); | |
int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params) | |
{ | |
LOCK(cs_main); | |
int32_t nVersion = VERSIONBITS_TOP_BITS; | |
for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) { | |
ThresholdState state = VersionBitsState(pindexPrev, params, static_cast<Consensus::DeploymentPos>(i), versionbitscache); | |
if (state == ThresholdState::LOCKED_IN || state == ThresholdState::STARTED) { | |
nVersion |= VersionBitsMask(params, static_cast<Consensus::DeploymentPos>(i)); | |
} | |
} | |
return nVersion; | |
} | |
/** | |
* Threshold condition checker that triggers when unknown versionbits are seen on the network. | |
*/ | |
class WarningBitsConditionChecker : public AbstractThresholdConditionChecker | |
{ | |
private: | |
int bit; | |
public: | |
explicit WarningBitsConditionChecker(int bitIn) : bit(bitIn) {} | |
int64_t BeginTime(const Consensus::Params& params) const override { return 0; } | |
int64_t EndTime(const Consensus::Params& params) const override { return std::numeric_limits<int64_t>::max(); } | |
int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; } | |
int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; } | |
bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override | |
{ | |
return pindex->nHeight >= params.MinBIP9WarningHeight && | |
((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) && | |
((pindex->nVersion >> bit) & 1) != 0 && | |
((ComputeBlockVersion(pindex->pprev, params) >> bit) & 1) == 0; | |
} | |
}; | |
static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS] GUARDED_BY(cs_main); | |
// 0.13.0 was shipped with a segwit deployment defined for testnet, but not for | |
// mainnet. We no longer need to support disabling the segwit deployment | |
// except for testing purposes, due to limitations of the functional test | |
// environment. See test/functional/p2p-segwit.py. | |
static bool IsScriptWitnessEnabled(const Consensus::Params& params) | |
{ | |
return params.SegwitHeight != std::numeric_limits<int>::max(); | |
} | |
static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { | |
AssertLockHeld(cs_main); | |
unsigned int flags = SCRIPT_VERIFY_NONE; | |
// BIP16 didn't become active until Apr 1 2012 (on mainnet, and | |
// retroactively applied to testnet) | |
// However, only one historical block violated the P2SH rules (on both | |
// mainnet and testnet), so for simplicity, always leave P2SH | |
// on except for the one violating block. | |
if (consensusparams.BIP16Exception.IsNull() || // no bip16 exception on this chain | |
pindex->phashBlock == nullptr || // this is a new candidate block, eg from TestBlockValidity() | |
*pindex->phashBlock != consensusparams.BIP16Exception) // this block isn't the historical exception | |
{ | |
flags |= SCRIPT_VERIFY_P2SH; | |
} | |
// Enforce WITNESS rules whenever P2SH is in effect (and the segwit | |
// deployment is defined). | |
if (flags & SCRIPT_VERIFY_P2SH && IsScriptWitnessEnabled(consensusparams)) { | |
flags |= SCRIPT_VERIFY_WITNESS; | |
} | |
// Start enforcing the DERSIG (BIP66) rule | |
if (pindex->nHeight >= consensusparams.BIP66Height) { | |
flags |= SCRIPT_VERIFY_DERSIG; | |
} | |
// Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule | |
if (pindex->nHeight >= consensusparams.BIP65Height) { | |
flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY; | |
} | |
// Start enforcing BIP112 (CHECKSEQUENCEVERIFY) | |
if (pindex->nHeight >= consensusparams.CSVHeight) { | |
flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY; | |
} | |
// Start enforcing Taproot using versionbits logic. | |
if (VersionBitsState(pindex->pprev, consensusparams, Consensus::DEPLOYMENT_TAPROOT, versionbitscache) == ThresholdState::ACTIVE) { | |
flags |= SCRIPT_VERIFY_TAPROOT; | |
} | |
// Start enforcing BIP147 NULLDUMMY (activated simultaneously with segwit) | |
if (IsWitnessEnabled(pindex->pprev, consensusparams)) { | |
flags |= SCRIPT_VERIFY_NULLDUMMY; | |
} | |
return flags; | |
} | |
static int64_t nTimeCheck = 0; | |
static int64_t nTimeForks = 0; | |
static int64_t nTimeVerify = 0; | |
static int64_t nTimeConnect = 0; | |
static int64_t nTimeIndex = 0; | |
static int64_t nTimeCallbacks = 0; | |
static int64_t nTimeTotal = 0; | |
static int64_t nBlocksTotal = 0; | |
/** Apply the effects of this block (with given index) on the UTXO set represented by coins. | |
* Validity checks that depend on the UTXO set are also done; ConnectBlock() | |
* can fail if those validity checks fail (among other reasons). */ | |
bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, CBlockIndex* pindex, | |
CCoinsViewCache& view, const CChainParams& chainparams, bool fJustCheck) | |
{ | |
AssertLockHeld(cs_main); | |
assert(pindex); | |
assert(*pindex->phashBlock == block.GetHash()); | |
int64_t nTimeStart = GetTimeMicros(); | |
// Check it again in case a previous version let a bad block in | |
// NOTE: We don't currently (re-)invoke ContextualCheckBlock() or | |
// ContextualCheckBlockHeader() here. This means that if we add a new | |
// consensus rule that is enforced in one of those two functions, then we | |
// may have let in a block that violates the rule prior to updating the | |
// software, and we would NOT be enforcing the rule here. Fully solving | |
// upgrade from one software version to the next after a consensus rule | |
// change is potentially tricky and issue-specific (see RewindBlockIndex() | |
// for one general approach that was used for BIP 141 deployment). | |
// Also, currently the rule against blocks more than 2 hours in the future | |
// is enforced in ContextualCheckBlockHeader(); we wouldn't want to | |
// re-enforce that rule here (at least until we make it impossible for | |
// GetAdjustedTime() to go backward). | |
if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck)) { | |
if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED) { | |
// We don't write down blocks to disk if they may have been | |
// corrupted, so this should be impossible unless we're having hardware | |
// problems. | |
return AbortNode(state, "Corrupt block found indicating potential hardware failure; shutting down"); | |
} | |
return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString()); | |
} | |
// verify that the view's current state corresponds to the previous block | |
uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash(); | |
assert(hashPrevBlock == view.GetBestBlock()); | |
nBlocksTotal++; | |
// Special case for the genesis block, skipping connection of its transactions | |
// (its coinbase is unspendable) | |
if (block.GetHash() == chainparams.GetConsensus().hashGenesisBlock) { | |
if (!fJustCheck) | |
view.SetBestBlock(pindex->GetBlockHash()); | |
return true; | |
} | |
bool fScriptChecks = true; | |
if (!hashAssumeValid.IsNull()) { | |
// We've been configured with the hash of a block which has been externally verified to have a valid history. | |
// A suitable default value is included with the software and updated from time to time. Because validity | |
// relative to a piece of software is an objective fact these defaults can be easily reviewed. | |
// This setting doesn't force the selection of any particular chain but makes validating some faster by | |
// effectively caching the result of part of the verification. | |
BlockMap::const_iterator it = m_blockman.m_block_index.find(hashAssumeValid); | |
if (it != m_blockman.m_block_index.end()) { | |
if (it->second->GetAncestor(pindex->nHeight) == pindex && | |
pindexBestHeader->GetAncestor(pindex->nHeight) == pindex && | |
pindexBestHeader->nChainWork >= nMinimumChainWork) { | |
// This block is a member of the assumed verified chain and an ancestor of the best header. | |
// Script verification is skipped when connecting blocks under the | |
// assumevalid block. Assuming the assumevalid block is valid this | |
// is safe because block merkle hashes are still computed and checked, | |
// Of course, if an assumed valid block is invalid due to false scriptSigs | |
// this optimization would allow an invalid chain to be accepted. | |
// The equivalent time check discourages hash power from extorting the network via DOS attack | |
// into accepting an invalid block through telling users they must manually set assumevalid. | |
// Requiring a software change or burying the invalid block, regardless of the setting, makes | |
// it hard to hide the implication of the demand. This also avoids having release candidates | |
// that are hardly doing any signature verification at all in testing without having to | |
// artificially set the default assumed verified block further back. | |
// The test against nMinimumChainWork prevents the skipping when denied access to any chain at | |
// least as good as the expected chain. | |
fScriptChecks = (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, chainparams.GetConsensus()) <= 60 * 60 * 24 * 7 * 2); | |
} | |
} | |
} | |
int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart; | |
LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, nTimeCheck * MILLI / nBlocksTotal); | |
// Do not allow blocks that contain transactions which 'overwrite' older transactions, | |
// unless those are already completely spent. | |
// If such overwrites are allowed, coinbases and transactions depending upon those | |
// can be duplicated to remove the ability to spend the first instance -- even after | |
// being sent to another address. | |
// See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html for more information. | |
// This logic is not necessary for memory pool transactions, as AcceptToMemoryPool | |
// already refuses previously-known transaction ids entirely. | |
// This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC. | |
// Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the | |
// two in the chain that violate it. This prevents exploiting the issue against nodes during their | |
// initial block download. | |
bool fEnforceBIP30 = !((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) || | |
(pindex->nHeight==91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721"))); | |
// Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting | |
// with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the | |
// time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first | |
// before the first had been spent. Since those coinbases are sufficiently buried it's no longer possible to create further | |
// duplicate transactions descending from the known pairs either. | |
// If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check. | |
// BIP34 requires that a block at height X (block X) has its coinbase | |
// scriptSig start with a CScriptNum of X (indicated height X). The above | |
// logic of no longer requiring BIP30 once BIP34 activates is flawed in the | |
// case that there is a block X before the BIP34 height of 227,931 which has | |
// an indicated height Y where Y is greater than X. The coinbase for block | |
// X would also be a valid coinbase for block Y, which could be a BIP30 | |
// violation. An exhaustive search of all mainnet coinbases before the | |
// BIP34 height which have an indicated height greater than the block height | |
// reveals many occurrences. The 3 lowest indicated heights found are | |
// 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3 | |
// heights would be the first opportunity for BIP30 to be violated. | |
// The search reveals a great many blocks which have an indicated height | |
// greater than 1,983,702, so we simply remove the optimization to skip | |
// BIP30 checking for blocks at height 1,983,702 or higher. Before we reach | |
// that block in another 25 years or so, we should take advantage of a | |
// future consensus change to do a new and improved version of BIP34 that | |
// will actually prevent ever creating any duplicate coinbases in the | |
// future. | |
static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702; | |
// There is no potential to create a duplicate coinbase at block 209,921 | |
// because this is still before the BIP34 height and so explicit BIP30 | |
// checking is still active. | |
// The final case is block 176,684 which has an indicated height of | |
// 490,897. Unfortunately, this issue was not discovered until about 2 weeks | |
// before block 490,897 so there was not much opportunity to address this | |
// case other than to carefully analyze it and determine it would not be a | |
// problem. Block 490,897 was, in fact, mined with a different coinbase than | |
// block 176,684, but it is important to note that even if it hadn't been or | |
// is remined on an alternate fork with a duplicate coinbase, we would still | |
// not run into a BIP30 violation. This is because the coinbase for 176,684 | |
// is spent in block 185,956 in transaction | |
// d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This | |
// spending transaction can't be duplicated because it also spends coinbase | |
// 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This | |
// coinbase has an indicated height of over 4.2 billion, and wouldn't be | |
// duplicatable until that height, and it's currently impossible to create a | |
// chain that long. Nevertheless we may wish to consider a future soft fork | |
// which retroactively prevents block 490,897 from creating a duplicate | |
// coinbase. The two historical BIP30 violations often provide a confusing | |
// edge case when manipulating the UTXO and it would be simpler not to have | |
// another edge case to deal with. | |
// testnet3 has no blocks before the BIP34 height with indicated heights | |
// post BIP34 before approximately height 486,000,000 and presumably will | |
// be reset before it reaches block 1,983,702 and starts doing unnecessary | |
// BIP30 checking again. | |
assert(pindex->pprev); | |
CBlockIndex *pindexBIP34height = pindex->pprev->GetAncestor(chainparams.GetConsensus().BIP34Height); | |
//Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond. | |
fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == chainparams.GetConsensus().BIP34Hash)); | |
// TODO: Remove BIP30 checking from block height 1,983,702 on, once we have a | |
// consensus change that ensures coinbases at those heights can not | |
// duplicate earlier coinbases. | |
if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) { | |
for (const auto& tx : block.vtx) { | |
for (size_t o = 0; o < tx->vout.size(); o++) { | |
if (view.HaveCoin(COutPoint(tx->GetHash(), o))) { | |
LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n"); | |
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-BIP30"); | |
} | |
} | |
} | |
} | |
// Start enforcing BIP68 (sequence locks) | |
int nLockTimeFlags = 0; | |
if (pindex->nHeight >= chainparams.GetConsensus().CSVHeight) { | |
nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE; | |
} | |
// Get the script flags for this block | |
unsigned int flags = GetBlockScriptFlags(pindex, chainparams.GetConsensus()); | |
int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1; | |
LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime2 - nTime1), nTimeForks * MICRO, nTimeForks * MILLI / nBlocksTotal); | |
CBlockUndo blockundo; | |
// Precomputed transaction data pointers must not be invalidated | |
// until after `control` has run the script checks (potentially | |
// in multiple threads). Preallocate the vector size so a new allocation | |
// doesn't invalidate pointers into the vector, and keep txsdata in scope | |
// for as long as `control`. | |
CCheckQueueControl<CScriptCheck> control(fScriptChecks && g_parallel_script_checks ? &scriptcheckqueue : nullptr); | |
std::vector<PrecomputedTransactionData> txsdata(block.vtx.size()); | |
std::vector<int> prevheights; | |
CAmount nFees = 0; | |
int nInputs = 0; | |
int64_t nSigOpsCost = 0; | |
blockundo.vtxundo.reserve(block.vtx.size() - 1); | |
for (unsigned int i = 0; i < block.vtx.size(); i++) | |
{ | |
const CTransaction &tx = *(block.vtx[i]); | |
nInputs += tx.vin.size(); | |
if (!tx.IsCoinBase()) | |
{ | |
CAmount txfee = 0; | |
TxValidationState tx_state; | |
if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee)) { | |
// Any transaction validation failure in ConnectBlock is a block consensus failure | |
state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, | |
tx_state.GetRejectReason(), tx_state.GetDebugMessage()); | |
return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString()); | |
} | |
nFees += txfee; | |
if (!MoneyRange(nFees)) { | |
LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n", __func__); | |
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-accumulated-fee-outofrange"); | |
} | |
// Check that transaction is BIP68 final | |
// BIP68 lock checks (as opposed to nLockTime checks) must | |
// be in ConnectBlock because they require the UTXO set | |
prevheights.resize(tx.vin.size()); | |
for (size_t j = 0; j < tx.vin.size(); j++) { | |
prevheights[j] = view.AccessCoin(tx.vin[j].prevout).nHeight; | |
} | |
if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) { | |
LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n", __func__); | |
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal"); | |
} | |
} | |
// GetTransactionSigOpCost counts 3 types of sigops: | |
// * legacy (always) | |
// * p2sh (when P2SH enabled in flags and excludes coinbase) | |
// * witness (when witness enabled in flags and excludes coinbase) | |
nSigOpsCost += GetTransactionSigOpCost(tx, view, flags); | |
if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST) { | |
LogPrintf("ERROR: ConnectBlock(): too many sigops\n"); | |
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops"); | |
} | |
if (!tx.IsCoinBase()) | |
{ | |
std::vector<CScriptCheck> vChecks; | |
bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */ | |
TxValidationState tx_state; | |
if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], g_parallel_script_checks ? &vChecks : nullptr)) { | |
// Any transaction validation failure in ConnectBlock is a block consensus failure | |
state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, | |
tx_state.GetRejectReason(), tx_state.GetDebugMessage()); | |
return error("ConnectBlock(): CheckInputScripts on %s failed with %s", | |
tx.GetHash().ToString(), state.ToString()); | |
} | |
control.Add(vChecks); | |
} | |
CTxUndo undoDummy; | |
if (i > 0) { | |
blockundo.vtxundo.push_back(CTxUndo()); | |
} | |
UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight); | |
} | |
int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2; | |
LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal); | |
CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus()); | |
if (block.vtx[0]->GetValueOut() > blockReward) { | |
LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)\n", block.vtx[0]->GetValueOut(), blockReward); | |
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-amount"); | |
} | |
if (!control.Wait()) { | |
LogPrintf("ERROR: %s: CheckQueue failed\n", __func__); | |
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "block-validation-failed"); | |
} | |
int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2; | |
LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal); | |
if (fJustCheck) | |
return true; | |
if (!WriteUndoDataForBlock(blockundo, state, pindex, chainparams)) | |
return false; | |
if (!pindex->IsValid(BLOCK_VALID_SCRIPTS)) { | |
pindex->RaiseValidity(BLOCK_VALID_SCRIPTS); | |
setDirtyBlockIndex.insert(pindex); | |
} | |
assert(pindex->phashBlock); | |
// add this block to the view's block chain | |
view.SetBestBlock(pindex->GetBlockHash()); | |
int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4; | |
LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime5 - nTime4), nTimeIndex * MICRO, nTimeIndex * MILLI / nBlocksTotal); | |
int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5; | |
LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime6 - nTime5), nTimeCallbacks * MICRO, nTimeCallbacks * MILLI / nBlocksTotal); | |
return true; | |
} | |
CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(const CTxMemPool* tx_pool) | |
{ | |
return this->GetCoinsCacheSizeState( | |
tx_pool, | |
m_coinstip_cache_size_bytes, | |
gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000); | |
} | |
CoinsCacheSizeState CChainState::GetCoinsCacheSizeState( | |
const CTxMemPool* tx_pool, | |
size_t max_coins_cache_size_bytes, | |
size_t max_mempool_size_bytes) | |
{ | |
const int64_t nMempoolUsage = tx_pool ? tx_pool->DynamicMemoryUsage() : 0; | |
int64_t cacheSize = CoinsTip().DynamicMemoryUsage(); | |
int64_t nTotalSpace = | |
max_coins_cache_size_bytes + std::max<int64_t>(max_mempool_size_bytes - nMempoolUsage, 0); | |
//! No need to periodic flush if at least this much space still available. | |
static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES = 10 * 1024 * 1024; // 10MB | |
int64_t large_threshold = | |
std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES); | |
if (cacheSize > nTotalSpace) { | |
LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize, nTotalSpace); | |
return CoinsCacheSizeState::CRITICAL; | |
} else if (cacheSize > large_threshold) { | |
return CoinsCacheSizeState::LARGE; | |
} | |
return CoinsCacheSizeState::OK; | |
} | |
bool CChainState::FlushStateToDisk( | |
const CChainParams& chainparams, | |
BlockValidationState &state, | |
FlushStateMode mode, | |
int nManualPruneHeight) | |
{ | |
LOCK(cs_main); | |
assert(this->CanFlushToDisk()); | |
static std::chrono::microseconds nLastWrite{0}; | |
static std::chrono::microseconds nLastFlush{0}; | |
std::set<int> setFilesToPrune; | |
bool full_flush_completed = false; | |
const size_t coins_count = CoinsTip().GetCacheSize(); | |
const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage(); | |
try { | |
{ | |
bool fFlushForPrune = false; | |
bool fDoFullFlush = false; | |
CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(&m_mempool); | |
LOCK(cs_LastBlockFile); | |
if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) { | |
// make sure we don't prune above the blockfilterindexes bestblocks | |
// pruning is height-based | |
int last_prune = m_chain.Height(); // last height we can prune | |
ForEachBlockFilterIndex([&](BlockFilterIndex& index) { | |
last_prune = std::max(1, std::min(last_prune, index.GetSummary().best_block_height)); | |
}); | |
if (nManualPruneHeight > 0) { | |
LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH); | |
m_blockman.FindFilesToPruneManual(setFilesToPrune, std::min(last_prune, nManualPruneHeight), m_chain.Height()); | |
} else { | |
LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH); | |
m_blockman.FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight(), m_chain.Height(), last_prune, IsInitialBlockDownload()); | |
fCheckForPruning = false; | |
} | |
if (!setFilesToPrune.empty()) { | |
fFlushForPrune = true; | |
if (!fHavePruned) { | |
pblocktree->WriteFlag("prunedblockfiles", true); | |
fHavePruned = true; | |
} | |
} | |
} | |
const auto nNow = GetTime<std::chrono::microseconds>(); | |
// Avoid writing/flushing immediately after startup. | |
if (nLastWrite.count() == 0) { | |
nLastWrite = nNow; | |
} | |
if (nLastFlush.count() == 0) { | |
nLastFlush = nNow; | |
} | |
// The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing). | |
bool fCacheLarge = mode == FlushStateMode::PERIODIC && cache_state >= CoinsCacheSizeState::LARGE; | |
// The cache is over the limit, we have to write now. | |
bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cache_state >= CoinsCacheSizeState::CRITICAL; | |
// It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash. | |
bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > nLastWrite + DATABASE_WRITE_INTERVAL; | |
// It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage. | |
bool fPeriodicFlush = mode == FlushStateMode::PERIODIC && nNow > nLastFlush + DATABASE_FLUSH_INTERVAL; | |
// Combine all conditions that result in a full cache flush. | |
fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune; | |
// Write blocks and block index to disk. | |
if (fDoFullFlush || fPeriodicWrite) { | |
// Depend on nMinDiskSpace to ensure we can write block index | |
if (!CheckDiskSpace(GetBlocksDir())) { | |
return AbortNode(state, "Disk space is too low!", _("Disk space is too low!")); | |
} | |
{ | |
LOG_TIME_MILLIS_WITH_CATEGORY("write block and undo data to disk", BCLog::BENCH); | |
// First make sure all block and undo data is flushed to disk. | |
FlushBlockFile(); | |
} | |
// Then update all block file information (which may refer to block and undo files). | |
{ | |
LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk", BCLog::BENCH); | |
std::vector<std::pair<int, const CBlockFileInfo*> > vFiles; | |
vFiles.reserve(setDirtyFileInfo.size()); | |
for (std::set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) { | |
vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it])); | |
setDirtyFileInfo.erase(it++); | |
} | |
std::vector<const CBlockIndex*> vBlocks; | |
vBlocks.reserve(setDirtyBlockIndex.size()); | |
for (std::set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) { | |
vBlocks.push_back(*it); | |
setDirtyBlockIndex.erase(it++); | |
} | |
if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) { | |
return AbortNode(state, "Failed to write to block index database"); | |
} | |
} | |
// Finally remove any pruned files | |
if (fFlushForPrune) { | |
LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files", BCLog::BENCH); | |
UnlinkPrunedFiles(setFilesToPrune); | |
} | |
nLastWrite = nNow; | |
} | |
// Flush best chain related state. This can only be done if the blocks / block index write was also done. | |
if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) { | |
LOG_TIME_SECONDS(strprintf("write coins cache to disk (%d coins, %.2fkB)", | |
coins_count, coins_mem_usage / 1000)); | |
// Typical Coin structures on disk are around 48 bytes in size. | |
// Pushing a new one to the database can cause it to be written | |
// twice (once in the log, and once in the tables). This is already | |
// an overestimation, as most will delete an existing entry or | |
// overwrite one. Still, use a conservative safety factor of 2. | |
if (!CheckDiskSpace(GetDataDir(), 48 * 2 * 2 * CoinsTip().GetCacheSize())) { | |
return AbortNode(state, "Disk space is too low!", _("Disk space is too low!")); | |
} | |
// Flush the chainstate (which may refer to block index entries). | |
if (!CoinsTip().Flush()) | |
return AbortNode(state, "Failed to write to coin database"); | |
nLastFlush = nNow; | |
full_flush_completed = true; | |
} | |
} | |
if (full_flush_completed) { | |
// Update best block in wallet (so we can detect restored wallets). | |
GetMainSignals().ChainStateFlushed(m_chain.GetLocator()); | |
} | |
} catch (const std::runtime_error& e) { | |
return AbortNode(state, std::string("System error while flushing: ") + e.what()); | |
} | |
return true; | |
} | |
void CChainState::ForceFlushStateToDisk() { | |
BlockValidationState state; | |
const CChainParams& chainparams = Params(); | |
if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) { | |
LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString()); | |
} | |
} | |
void CChainState::PruneAndFlush() { | |
BlockValidationState state; | |
fCheckForPruning = true; | |
const CChainParams& chainparams = Params(); | |
if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) { | |
LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString()); | |
} | |
} | |
static void DoWarning(const bilingual_str& warning) | |
{ | |
static bool fWarned = false; | |
SetMiscWarning(warning); | |
if (!fWarned) { | |
AlertNotify(warning.original); | |
fWarned = true; | |
} | |
} | |
/** Private helper function that concatenates warning messages. */ | |
static void AppendWarning(bilingual_str& res, const bilingual_str& warn) | |
{ | |
if (!res.empty()) res += Untranslated(", "); | |
res += warn; | |
} | |
/** Check warning conditions and do some notifications on new chain tip set. */ | |
static void UpdateTip(CTxMemPool& mempool, const CBlockIndex* pindexNew, const CChainParams& chainParams, CChainState& active_chainstate) | |
EXCLUSIVE_LOCKS_REQUIRED(::cs_main) | |
{ | |
// New best block | |
mempool.AddTransactionsUpdated(1); | |
{ | |
LOCK(g_best_block_mutex); | |
g_best_block = pindexNew->GetBlockHash(); | |
g_best_block_cv.notify_all(); | |
} | |
bilingual_str warning_messages; | |
assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); | |
if (!active_chainstate.IsInitialBlockDownload()) { | |
const CBlockIndex* pindex = pindexNew; | |
for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) { | |
WarningBitsConditionChecker checker(bit); | |
ThresholdState state = checker.GetStateFor(pindex, chainParams.GetConsensus(), warningcache[bit]); | |
if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) { | |
const bilingual_str warning = strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit); | |
if (state == ThresholdState::ACTIVE) { | |
DoWarning(warning); | |
} else { | |
AppendWarning(warning_messages, warning); | |
} | |
} | |
} | |
} | |
assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); | |
LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n", __func__, | |
pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, pindexNew->nVersion, | |
log(pindexNew->nChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx, | |
FormatISO8601DateTime(pindexNew->GetBlockTime()), | |
GuessVerificationProgress(chainParams.TxData(), pindexNew), active_chainstate.CoinsTip().DynamicMemoryUsage() * (1.0 / (1<<20)), active_chainstate.CoinsTip().GetCacheSize(), | |
!warning_messages.empty() ? strprintf(" warning='%s'", warning_messages.original) : ""); | |
} | |
/** Disconnect m_chain's tip. | |
* After calling, the mempool will be in an inconsistent state, with | |
* transactions from disconnected blocks being added to disconnectpool. You | |
* should make the mempool consistent again by calling UpdateMempoolForReorg. | |
* with cs_main held. | |
* | |
* If disconnectpool is nullptr, then no disconnected transactions are added to | |
* disconnectpool (note that the caller is responsible for mempool consistency | |
* in any case). | |
*/ | |
bool CChainState::DisconnectTip(BlockValidationState& state, const CChainParams& chainparams, DisconnectedBlockTransactions* disconnectpool) | |
{ | |
AssertLockHeld(cs_main); | |
AssertLockHeld(m_mempool.cs); | |
CBlockIndex *pindexDelete = m_chain.Tip(); | |
assert(pindexDelete); | |
// Read block from disk. | |
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); | |
CBlock& block = *pblock; | |
if (!ReadBlockFromDisk(block, pindexDelete, chainparams.GetConsensus())) | |
return error("DisconnectTip(): Failed to read block"); | |
// Apply the block atomically to the chain state. | |
int64_t nStart = GetTimeMicros(); | |
{ | |
CCoinsViewCache view(&CoinsTip()); | |
assert(view.GetBestBlock() == pindexDelete->GetBlockHash()); | |
if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) | |
return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); | |
bool flushed = view.Flush(); | |
assert(flushed); | |
} | |
LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI); | |
// Write the chain state to disk, if necessary. | |
if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED)) | |
return false; | |
if (disconnectpool) { | |
// Save transactions to re-add to mempool at end of reorg | |
for (auto it = block.vtx.rbegin(); it != block.vtx.rend(); ++it) { | |
disconnectpool->addTransaction(*it); | |
} | |
while (disconnectpool->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE * 1000) { | |
// Drop the earliest entry, and remove its children from the mempool. | |
auto it = disconnectpool->queuedTx.get<insertion_order>().begin(); | |
m_mempool.removeRecursive(**it, MemPoolRemovalReason::REORG); | |
disconnectpool->removeEntry(it); | |
} | |
} | |
m_chain.SetTip(pindexDelete->pprev); | |
UpdateTip(m_mempool, pindexDelete->pprev, chainparams, *this); | |
// Let wallets know transactions went from 1-confirmed to | |
// 0-confirmed or conflicted: | |
GetMainSignals().BlockDisconnected(pblock, pindexDelete); | |
return true; | |
} | |
static int64_t nTimeReadFromDisk = 0; | |
static int64_t nTimeConnectTotal = 0; | |
static int64_t nTimeFlush = 0; | |
static int64_t nTimeChainState = 0; | |
static int64_t nTimePostConnect = 0; | |
struct PerBlockConnectTrace { | |
CBlockIndex* pindex = nullptr; | |
std::shared_ptr<const CBlock> pblock; | |
PerBlockConnectTrace() {} | |
}; | |
/** | |
* Used to track blocks whose transactions were applied to the UTXO state as a | |
* part of a single ActivateBestChainStep call. | |
* | |
* This class is single-use, once you call GetBlocksConnected() you have to throw | |
* it away and make a new one. | |
*/ | |
class ConnectTrace { | |
private: | |
std::vector<PerBlockConnectTrace> blocksConnected; | |
public: | |
explicit ConnectTrace() : blocksConnected(1) {} | |
void BlockConnected(CBlockIndex* pindex, std::shared_ptr<const CBlock> pblock) { | |
assert(!blocksConnected.back().pindex); | |
assert(pindex); | |
assert(pblock); | |
blocksConnected.back().pindex = pindex; | |
blocksConnected.back().pblock = std::move(pblock); | |
blocksConnected.emplace_back(); | |
} | |
std::vector<PerBlockConnectTrace>& GetBlocksConnected() { | |
// We always keep one extra block at the end of our list because | |
// blocks are added after all the conflicted transactions have | |
// been filled in. Thus, the last entry should always be an empty | |
// one waiting for the transactions from the next block. We pop | |
// the last entry here to make sure the list we return is sane. | |
assert(!blocksConnected.back().pindex); | |
blocksConnected.pop_back(); | |
return blocksConnected; | |
} | |
}; | |
/** | |
* Connect a new block to m_chain. pblock is either nullptr or a pointer to a CBlock | |
* corresponding to pindexNew, to bypass loading it again from disk. | |
* | |
* The block is added to connectTrace if connection succeeds. | |
*/ | |
bool CChainState::ConnectTip(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions &disconnectpool) | |
{ | |
AssertLockHeld(cs_main); | |
AssertLockHeld(m_mempool.cs); | |
assert(pindexNew->pprev == m_chain.Tip()); | |
// Read block from disk. | |
int64_t nTime1 = GetTimeMicros(); | |
std::shared_ptr<const CBlock> pthisBlock; | |
if (!pblock) { | |
std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>(); | |
if (!ReadBlockFromDisk(*pblockNew, pindexNew, chainparams.GetConsensus())) | |
return AbortNode(state, "Failed to read block"); | |
pthisBlock = pblockNew; | |
} else { | |
pthisBlock = pblock; | |
} | |
const CBlock& blockConnecting = *pthisBlock; | |
// Apply the block atomically to the chain state. | |
int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1; | |
int64_t nTime3; | |
LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO); | |
{ | |
CCoinsViewCache view(&CoinsTip()); | |
bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, chainparams); | |
GetMainSignals().BlockChecked(blockConnecting, state); | |
if (!rv) { | |
if (state.IsInvalid()) | |
InvalidBlockFound(pindexNew, state); | |
return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), state.ToString()); | |
} | |
nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2; | |
assert(nBlocksTotal > 0); | |
LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal); | |
bool flushed = view.Flush(); | |
assert(flushed); | |
} | |
int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3; | |
LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO, nTimeFlush * MILLI / nBlocksTotal); | |
// Write the chain state to disk, if necessary. | |
if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED)) | |
return false; | |
int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4; | |
LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal); | |
// Remove conflicting transactions from the mempool.; | |
m_mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight); | |
disconnectpool.removeForBlock(blockConnecting.vtx); | |
// Update m_chain & related variables. | |
m_chain.SetTip(pindexNew); | |
UpdateTip(m_mempool, pindexNew, chainparams, *this); | |
int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1; | |
LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO, nTimePostConnect * MILLI / nBlocksTotal); | |
LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO, nTimeTotal * MILLI / nBlocksTotal); | |
connectTrace.BlockConnected(pindexNew, std::move(pthisBlock)); | |
return true; | |
} | |
/** | |
* Return the tip of the chain with the most work in it, that isn't | |
* known to be invalid (it's however far from certain to be valid). | |
*/ | |
CBlockIndex* CChainState::FindMostWorkChain() { | |
do { | |
CBlockIndex *pindexNew = nullptr; | |
// Find the best candidate header. | |
{ | |
std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin(); | |
if (it == setBlockIndexCandidates.rend()) | |
return nullptr; | |
pindexNew = *it; | |
} | |
// Check whether all blocks on the path between the currently active chain and the candidate are valid. | |
// Just going until the active chain is an optimization, as we know all blocks in it are valid already. | |
CBlockIndex *pindexTest = pindexNew; | |
bool fInvalidAncestor = false; | |
while (pindexTest && !m_chain.Contains(pindexTest)) { | |
assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0); | |
// Pruned nodes may have entries in setBlockIndexCandidates for | |
// which block files have been deleted. Remove those as candidates | |
// for the most work chain if we come across them; we can't switch | |
// to a chain unless we have all the non-active-chain parent blocks. | |
bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK; | |
bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA); | |
if (fFailedChain || fMissingData) { | |
// Candidate chain is not usable (either invalid or missing data) | |
if (fFailedChain && (pindexBestInvalid == nullptr || pindexNew->nChainWork > pindexBestInvalid->nChainWork)) | |
pindexBestInvalid = pindexNew; | |
CBlockIndex *pindexFailed = pindexNew; | |
// Remove the entire chain from the set. | |
while (pindexTest != pindexFailed) { | |
if (fFailedChain) { | |
pindexFailed->nStatus |= BLOCK_FAILED_CHILD; | |
} else if (fMissingData) { | |
// If we're missing data, then add back to m_blocks_unlinked, | |
// so that if the block arrives in the future we can try adding | |
// to setBlockIndexCandidates again. | |
m_blockman.m_blocks_unlinked.insert( | |
std::make_pair(pindexFailed->pprev, pindexFailed)); | |
} | |
setBlockIndexCandidates.erase(pindexFailed); | |
pindexFailed = pindexFailed->pprev; | |
} | |
setBlockIndexCandidates.erase(pindexTest); | |
fInvalidAncestor = true; | |
break; | |
} | |
pindexTest = pindexTest->pprev; | |
} | |
if (!fInvalidAncestor) | |
return pindexNew; | |
} while(true); | |
} | |
/** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */ | |
void CChainState::PruneBlockIndexCandidates() { | |
// Note that we can't delete the current block itself, as we may need to return to it later in case a | |
// reorganization to a better block fails. | |
std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin(); | |
while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) { | |
setBlockIndexCandidates.erase(it++); | |
} | |
// Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates. | |
assert(!setBlockIndexCandidates.empty()); | |
} | |
/** | |
* Try to make some progress towards making pindexMostWork the active block. | |
* pblock is either nullptr or a pointer to a CBlock corresponding to pindexMostWork. | |
* | |
* @returns true unless a system error occurred | |
*/ | |
bool CChainState::ActivateBestChainStep(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace) | |
{ | |
AssertLockHeld(cs_main); | |
AssertLockHeld(m_mempool.cs); | |
assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); | |
const CBlockIndex* pindexOldTip = m_chain.Tip(); | |
const CBlockIndex* pindexFork = m_chain.FindFork(pindexMostWork); | |
// Disconnect active blocks which are no longer in the best chain. | |
bool fBlocksDisconnected = false; | |
DisconnectedBlockTransactions disconnectpool; | |
while (m_chain.Tip() && m_chain.Tip() != pindexFork) { | |
if (!DisconnectTip(state, chainparams, &disconnectpool)) { | |
// This is likely a fatal error, but keep the mempool consistent, | |
// just in case. Only remove from the mempool in this case. | |
UpdateMempoolForReorg(*this, m_mempool, disconnectpool, false); | |
// If we're unable to disconnect a block during normal operation, | |
// then that is a failure of our local system -- we should abort | |
// rather than stay on a less work chain. | |
AbortNode(state, "Failed to disconnect block; see debug.log for details"); | |
return false; | |
} | |
fBlocksDisconnected = true; | |
} | |
// Build list of new blocks to connect (in descending height order). | |
std::vector<CBlockIndex*> vpindexToConnect; | |
bool fContinue = true; | |
int nHeight = pindexFork ? pindexFork->nHeight : -1; | |
while (fContinue && nHeight != pindexMostWork->nHeight) { | |
// Don't iterate the entire list of potential improvements toward the best tip, as we likely only need | |
// a few blocks along the way. | |
int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight); | |
vpindexToConnect.clear(); | |
vpindexToConnect.reserve(nTargetHeight - nHeight); | |
CBlockIndex* pindexIter = pindexMostWork->GetAncestor(nTargetHeight); | |
while (pindexIter && pindexIter->nHeight != nHeight) { | |
vpindexToConnect.push_back(pindexIter); | |
pindexIter = pindexIter->pprev; | |
} | |
nHeight = nTargetHeight; | |
// Connect new blocks. | |
for (CBlockIndex* pindexConnect : reverse_iterate(vpindexToConnect)) { | |
if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) { | |
if (state.IsInvalid()) { | |
// The block violates a consensus rule. | |
if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) { | |
InvalidChainFound(vpindexToConnect.front()); | |
} | |
state = BlockValidationState(); | |
fInvalidFound = true; | |
fContinue = false; | |
break; | |
} else { | |
// A system error occurred (disk space, database error, ...). | |
// Make the mempool consistent with the current tip, just in case | |
// any observers try to use it before shutdown. | |
UpdateMempoolForReorg(*this, m_mempool, disconnectpool, false); | |
return false; | |
} | |
} else { | |
PruneBlockIndexCandidates(); | |
if (!pindexOldTip || m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) { | |
// We're in a better position than we were. Return temporarily to release the lock. | |
fContinue = false; | |
break; | |
} | |
} | |
} | |
} | |
if (fBlocksDisconnected) { | |
// If any blocks were disconnected, disconnectpool may be non empty. Add | |
// any disconnected transactions back to the mempool. | |
UpdateMempoolForReorg(*this, m_mempool, disconnectpool, true); | |
} | |
m_mempool.check(*this); | |
CheckForkWarningConditions(); | |
return true; | |
} | |
static SynchronizationState GetSynchronizationState(bool init) | |
{ | |
if (!init) return SynchronizationState::POST_INIT; | |
if (::fReindex) return SynchronizationState::INIT_REINDEX; | |
return SynchronizationState::INIT_DOWNLOAD; | |
} | |
static bool NotifyHeaderTip(CChainState& chainstate) LOCKS_EXCLUDED(cs_main) { | |
bool fNotify = false; | |
bool fInitialBlockDownload = false; | |
static CBlockIndex* pindexHeaderOld = nullptr; | |
CBlockIndex* pindexHeader = nullptr; | |
{ | |
LOCK(cs_main); | |
pindexHeader = pindexBestHeader; | |
if (pindexHeader != pindexHeaderOld) { | |
fNotify = true; | |
assert(std::addressof(::ChainstateActive()) == std::addressof(chainstate)); | |
fInitialBlockDownload = chainstate.IsInitialBlockDownload(); | |
pindexHeaderOld = pindexHeader; | |
} | |
} | |
// Send block tip changed notifications without cs_main | |
if (fNotify) { | |
uiInterface.NotifyHeaderTip(GetSynchronizationState(fInitialBlockDownload), pindexHeader); | |
} | |
return fNotify; | |
} | |
static void LimitValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main) { | |
AssertLockNotHeld(cs_main); | |
if (GetMainSignals().CallbacksPending() > 10) { | |
SyncWithValidationInterfaceQueue(); | |
} | |
} | |
bool CChainState::ActivateBestChain(BlockValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) { | |
// Note that while we're often called here from ProcessNewBlock, this is | |
// far from a guarantee. Things in the P2P/RPC will often end up calling | |
// us in the middle of ProcessNewBlock - do not assume pblock is set | |
// sanely for performance or correctness! | |
AssertLockNotHeld(cs_main); | |
// ABC maintains a fair degree of expensive-to-calculate internal state | |
// because this function periodically releases cs_main so that it does not lock up other threads for too long | |
// during large connects - and to allow for e.g. the callback queue to drain | |
// we use m_cs_chainstate to enforce mutual exclusion so that only one caller may execute this function at a time | |
LOCK(m_cs_chainstate); | |
CBlockIndex *pindexMostWork = nullptr; | |
CBlockIndex *pindexNewTip = nullptr; | |
int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT); | |
do { | |
// Block until the validation queue drains. This should largely | |
// never happen in normal operation, however may happen during | |
// reindex, causing memory blowup if we run too far ahead. | |
// Note that if a validationinterface callback ends up calling | |
// ActivateBestChain this may lead to a deadlock! We should | |
// probably have a DEBUG_LOCKORDER test for this in the future. | |
LimitValidationInterfaceQueue(); | |
{ | |
LOCK(cs_main); | |
LOCK(m_mempool.cs); // Lock transaction pool for at least as long as it takes for connectTrace to be consumed | |
CBlockIndex* starting_tip = m_chain.Tip(); | |
bool blocks_connected = false; | |
do { | |
// We absolutely may not unlock cs_main until we've made forward progress | |
// (with the exception of shutdown due to hardware issues, low disk space, etc). | |
ConnectTrace connectTrace; // Destructed before cs_main is unlocked | |
if (pindexMostWork == nullptr) { | |
pindexMostWork = FindMostWorkChain(); | |
} | |
// Whether we have anything to do at all. | |
if (pindexMostWork == nullptr || pindexMostWork == m_chain.Tip()) { | |
break; | |
} | |
bool fInvalidFound = false; | |
std::shared_ptr<const CBlock> nullBlockPtr; | |
if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) { | |
// A system error occurred | |
return false; | |
} | |
blocks_connected = true; | |
if (fInvalidFound) { | |
// Wipe cache, we may need another branch now. | |
pindexMostWork = nullptr; | |
} | |
pindexNewTip = m_chain.Tip(); | |
for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) { | |
assert(trace.pblock && trace.pindex); | |
GetMainSignals().BlockConnected(trace.pblock, trace.pindex); | |
} | |
} while (!m_chain.Tip() || (starting_tip && CBlockIndexWorkComparator()(m_chain.Tip(), starting_tip))); | |
if (!blocks_connected) return true; | |
const CBlockIndex* pindexFork = m_chain.FindFork(starting_tip); | |
bool fInitialDownload = IsInitialBlockDownload(); | |
// Notify external listeners about the new tip. | |
// Enqueue while holding cs_main to ensure that UpdatedBlockTip is called in the order in which blocks are connected | |
if (pindexFork != pindexNewTip) { | |
// Notify ValidationInterface subscribers | |
GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload); | |
// Always notify the UI if a new block tip was connected | |
uiInterface.NotifyBlockTip(GetSynchronizationState(fInitialDownload), pindexNewTip); | |
} | |
} | |
// When we reach this point, we switched to a new tip (stored in pindexNewTip). | |
if (nStopAtHeight && pindexNewTip && pindexNewTip->nHeight >= nStopAtHeight) StartShutdown(); | |
// We check shutdown only after giving ActivateBestChainStep a chance to run once so that we | |
// never shutdown before connecting the genesis block during LoadChainTip(). Previously this | |
// caused an assert() failure during shutdown in such cases as the UTXO DB flushing checks | |
// that the best block hash is non-null. | |
if (ShutdownRequested()) break; | |
} while (pindexNewTip != pindexMostWork); | |
CheckBlockIndex(chainparams.GetConsensus()); | |
// Write changes periodically to disk, after relay. | |
if (!FlushStateToDisk(chainparams, state, FlushStateMode::PERIODIC)) { | |
return false; | |
} | |
return true; | |
} | |
bool CChainState::PreciousBlock(BlockValidationState& state, const CChainParams& params, CBlockIndex *pindex) | |
{ | |
{ | |
LOCK(cs_main); | |
if (pindex->nChainWork < m_chain.Tip()->nChainWork) { | |
// Nothing to do, this block is not at the tip. | |
return true; | |
} | |
if (m_chain.Tip()->nChainWork > nLastPreciousChainwork) { | |
// The chain has been extended since the last call, reset the counter. | |
nBlockReverseSequenceId = -1; | |
} | |
nLastPreciousChainwork = m_chain.Tip()->nChainWork; | |
setBlockIndexCandidates.erase(pindex); | |
pindex->nSequenceId = nBlockReverseSequenceId; | |
if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) { | |
// We can't keep reducing the counter if somebody really wants to | |
// call preciousblock 2**31-1 times on the same set of tips... | |
nBlockReverseSequenceId--; | |
} | |
if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->HaveTxsDownloaded()) { | |
setBlockIndexCandidates.insert(pindex); | |
PruneBlockIndexCandidates(); | |
} | |
} | |
return ActivateBestChain(state, params, std::shared_ptr<const CBlock>()); | |
} | |
bool CChainState::InvalidateBlock(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex) | |
{ | |
CBlockIndex* to_mark_failed = pindex; | |
bool pindex_was_in_chain = false; | |
int disconnected = 0; | |
// We do not allow ActivateBestChain() to run while InvalidateBlock() is | |
// running, as that could cause the tip to change while we disconnect | |
// blocks. | |
LOCK(m_cs_chainstate); | |
// We'll be acquiring and releasing cs_main below, to allow the validation | |
// callbacks to run. However, we should keep the block index in a | |
// consistent state as we disconnect blocks -- in particular we need to | |
// add equal-work blocks to setBlockIndexCandidates as we disconnect. | |
// To avoid walking the block index repeatedly in search of candidates, | |
// build a map once so that we can look up candidate blocks by chain | |
// work as we go. | |
std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work; | |
{ | |
LOCK(cs_main); | |
for (const auto& entry : m_blockman.m_block_index) { | |
CBlockIndex *candidate = entry.second; | |
// We don't need to put anything in our active chain into the | |
// multimap, because those candidates will be found and considered | |
// as we disconnect. | |
// Instead, consider only non-active-chain blocks that have at | |
// least as much work as where we expect the new tip to end up. | |
if (!m_chain.Contains(candidate) && | |
!CBlockIndexWorkComparator()(candidate, pindex->pprev) && | |
candidate->IsValid(BLOCK_VALID_TRANSACTIONS) && | |
candidate->HaveTxsDownloaded()) { | |
candidate_blocks_by_work.insert(std::make_pair(candidate->nChainWork, candidate)); | |
} | |
} | |
} | |
// Disconnect (descendants of) pindex, and mark them invalid. | |
while (true) { | |
if (ShutdownRequested()) break; | |
// Make sure the queue of validation callbacks doesn't grow unboundedly. | |
LimitValidationInterfaceQueue(); | |
LOCK(cs_main); | |
LOCK(m_mempool.cs); // Lock for as long as disconnectpool is in scope to make sure UpdateMempoolForReorg is called after DisconnectTip without unlocking in between | |
if (!m_chain.Contains(pindex)) break; | |
pindex_was_in_chain = true; | |
CBlockIndex *invalid_walk_tip = m_chain.Tip(); | |
// ActivateBestChain considers blocks already in m_chain | |
// unconditionally valid already, so force disconnect away from it. | |
DisconnectedBlockTransactions disconnectpool; | |
bool ret = DisconnectTip(state, chainparams, &disconnectpool); | |
// DisconnectTip will add transactions to disconnectpool. | |
// Adjust the mempool to be consistent with the new tip, adding | |
// transactions back to the mempool if disconnecting was successful, | |
// and we're not doing a very deep invalidation (in which case | |
// keeping the mempool up to date is probably futile anyway). | |
assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); | |
UpdateMempoolForReorg(*this, m_mempool, disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret); | |
if (!ret) return false; | |
assert(invalid_walk_tip->pprev == m_chain.Tip()); | |
// We immediately mark the disconnected blocks as invalid. | |
// This prevents a case where pruned nodes may fail to invalidateblock | |
// and be left unable to start as they have no tip candidates (as there | |
// are no blocks that meet the "have data and are not invalid per | |
// nStatus" criteria for inclusion in setBlockIndexCandidates). | |
invalid_walk_tip->nStatus |= BLOCK_FAILED_VALID; | |
setDirtyBlockIndex.insert(invalid_walk_tip); | |
setBlockIndexCandidates.erase(invalid_walk_tip); | |
setBlockIndexCandidates.insert(invalid_walk_tip->pprev); | |
if (invalid_walk_tip->pprev == to_mark_failed && (to_mark_failed->nStatus & BLOCK_FAILED_VALID)) { | |
// We only want to mark the last disconnected block as BLOCK_FAILED_VALID; its children | |
// need to be BLOCK_FAILED_CHILD instead. | |
to_mark_failed->nStatus = (to_mark_failed->nStatus ^ BLOCK_FAILED_VALID) | BLOCK_FAILED_CHILD; | |
setDirtyBlockIndex.insert(to_mark_failed); | |
} | |
// Add any equal or more work headers to setBlockIndexCandidates | |
auto candidate_it = candidate_blocks_by_work.lower_bound(invalid_walk_tip->pprev->nChainWork); | |
while (candidate_it != candidate_blocks_by_work.end()) { | |
if (!CBlockIndexWorkComparator()(candidate_it->second, invalid_walk_tip->pprev)) { | |
setBlockIndexCandidates.insert(candidate_it->second); | |
candidate_it = candidate_blocks_by_work.erase(candidate_it); | |
} else { | |
++candidate_it; | |
} | |
} | |
// Track the last disconnected block, so we can correct its BLOCK_FAILED_CHILD status in future | |
// iterations, or, if it's the last one, call InvalidChainFound on it. | |
to_mark_failed = invalid_walk_tip; | |
} | |
CheckBlockIndex(chainparams.GetConsensus()); | |
{ | |
LOCK(cs_main); | |
if (m_chain.Contains(to_mark_failed)) { | |
// If the to-be-marked invalid block is in the active chain, something is interfering and we can't proceed. | |
return false; | |
} | |
// Mark pindex (or the last disconnected block) as invalid, even when it never was in the main chain | |
to_mark_failed->nStatus |= BLOCK_FAILED_VALID; | |
setDirtyBlockIndex.insert(to_mark_failed); | |
setBlockIndexCandidates.erase(to_mark_failed); | |
m_blockman.m_failed_blocks.insert(to_mark_failed); | |
// If any new blocks somehow arrived while we were disconnecting | |
// (above), then the pre-calculation of what should go into | |
// setBlockIndexCandidates may have missed entries. This would | |
// technically be an inconsistency in the block index, but if we clean | |
// it up here, this should be an essentially unobservable error. | |
// Loop back over all block index entries and add any missing entries | |
// to setBlockIndexCandidates. | |
BlockMap::iterator it = m_blockman.m_block_index.begin(); | |
while (it != m_blockman.m_block_index.end()) { | |
if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(it->second, m_chain.Tip())) { | |
setBlockIndexCandidates.insert(it->second); | |
} | |
it++; | |
} | |
InvalidChainFound(to_mark_failed); | |
} | |
// Only notify about a new block tip if the active chain was modified. | |
if (pindex_was_in_chain) { | |
uiInterface.NotifyBlockTip(GetSynchronizationState(IsInitialBlockDownload()), to_mark_failed->pprev); | |
} | |
return true; | |
} | |
void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) { | |
AssertLockHeld(cs_main); | |
int nHeight = pindex->nHeight; | |
// Remove the invalidity flag from this block and all its descendants. | |
BlockMap::iterator it = m_blockman.m_block_index.begin(); | |
while (it != m_blockman.m_block_index.end()) { | |
if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) { | |
it->second->nStatus &= ~BLOCK_FAILED_MASK; | |
setDirtyBlockIndex.insert(it->second); | |
if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), it->second)) { | |
setBlockIndexCandidates.insert(it->second); | |
} | |
if (it->second == pindexBestInvalid) { | |
// Reset invalid block marker if it was pointing to one of those. | |
pindexBestInvalid = nullptr; | |
} | |
m_blockman.m_failed_blocks.erase(it->second); | |
} | |
it++; | |
} | |
// Remove the invalidity flag from all ancestors too. | |
while (pindex != nullptr) { | |
if (pindex->nStatus & BLOCK_FAILED_MASK) { | |
pindex->nStatus &= ~BLOCK_FAILED_MASK; | |
setDirtyBlockIndex.insert(pindex); | |
m_blockman.m_failed_blocks.erase(pindex); | |
} | |
pindex = pindex->pprev; | |
} | |
} | |
CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block) | |
{ | |
AssertLockHeld(cs_main); | |
// Check for duplicate | |
uint256 hash = block.GetHash(); | |
BlockMap::iterator it = m_block_index.find(hash); | |
if (it != m_block_index.end()) | |
return it->second; | |
// Construct new block index object | |
CBlockIndex* pindexNew = new CBlockIndex(block); | |
// We assign the sequence id to blocks only when the full data is available, | |
// to avoid miners withholding blocks but broadcasting headers, to get a | |
// competitive advantage. | |
pindexNew->nSequenceId = 0; | |
BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first; | |
pindexNew->phashBlock = &((*mi).first); | |
BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock); | |
if (miPrev != m_block_index.end()) | |
{ | |
pindexNew->pprev = (*miPrev).second; | |
pindexNew->nHeight = pindexNew->pprev->nHeight + 1; | |
pindexNew->BuildSkip(); | |
} | |
pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime); | |
pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew); | |
pindexNew->RaiseValidity(BLOCK_VALID_TREE); | |
if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork) | |
pindexBestHeader = pindexNew; | |
setDirtyBlockIndex.insert(pindexNew); | |
return pindexNew; | |
} | |
/** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */ | |
void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos, const Consensus::Params& consensusParams) | |
{ | |
pindexNew->nTx = block.vtx.size(); | |
pindexNew->nChainTx = 0; | |
pindexNew->nFile = pos.nFile; | |
pindexNew->nDataPos = pos.nPos; | |
pindexNew->nUndoPos = 0; | |
pindexNew->nStatus |= BLOCK_HAVE_DATA; | |
if (IsWitnessEnabled(pindexNew->pprev, consensusParams)) { | |
pindexNew->nStatus |= BLOCK_OPT_WITNESS; | |
} | |
pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS); | |
setDirtyBlockIndex.insert(pindexNew); | |
if (pindexNew->pprev == nullptr || pindexNew->pprev->HaveTxsDownloaded()) { | |
// If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS. | |
std::deque<CBlockIndex*> queue; | |
queue.push_back(pindexNew); | |
// Recursively process any descendant blocks that now may be eligible to be connected. | |
while (!queue.empty()) { | |
CBlockIndex *pindex = queue.front(); | |
queue.pop_front(); | |
pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx; | |
{ | |
LOCK(cs_nBlockSequenceId); | |
pindex->nSequenceId = nBlockSequenceId++; | |
} | |
if (m_chain.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) { | |
setBlockIndexCandidates.insert(pindex); | |
} | |
std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = m_blockman.m_blocks_unlinked.equal_range(pindex); | |
while (range.first != range.second) { | |
std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first; | |
queue.push_back(it->second); | |
range.first++; | |
m_blockman.m_blocks_unlinked.erase(it); | |
} | |
} | |
} else { | |
if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) { | |
m_blockman.m_blocks_unlinked.insert(std::make_pair(pindexNew->pprev, pindexNew)); | |
} | |
} | |
} | |
static bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize, unsigned int nHeight, CChain& active_chain, uint64_t nTime, bool fKnown = false) | |
{ | |
LOCK(cs_LastBlockFile); | |
unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile; | |
if (vinfoBlockFile.size() <= nFile) { | |
vinfoBlockFile.resize(nFile + 1); | |
} | |
bool finalize_undo = false; | |
if (!fKnown) { | |
while (vinfoBlockFile[nFile].nSize + nAddSize >= (gArgs.GetBoolArg("-fastprune", false) ? 0x10000 /* 64kb */ : MAX_BLOCKFILE_SIZE)) { | |
// when the undo file is keeping up with the block file, we want to flush it explicitly | |
// when it is lagging behind (more blocks arrive than are being connected), we let the | |
// undo block write case handle it | |
assert(std::addressof(::ChainActive()) == std::addressof(active_chain)); | |
finalize_undo = (vinfoBlockFile[nFile].nHeightLast == (unsigned int)active_chain.Tip()->nHeight); | |
nFile++; | |
if (vinfoBlockFile.size() <= nFile) { | |
vinfoBlockFile.resize(nFile + 1); | |
} | |
} | |
pos.nFile = nFile; | |
pos.nPos = vinfoBlockFile[nFile].nSize; | |
} | |
if ((int)nFile != nLastBlockFile) { | |
if (!fKnown) { | |
LogPrint(BCLog::VALIDATION, "Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString()); | |
} | |
FlushBlockFile(!fKnown, finalize_undo); | |
nLastBlockFile = nFile; | |
} | |
vinfoBlockFile[nFile].AddBlock(nHeight, nTime); | |
if (fKnown) | |
vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize); | |
else | |
vinfoBlockFile[nFile].nSize += nAddSize; | |
if (!fKnown) { | |
bool out_of_space; | |
size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space); | |
if (out_of_space) { | |
return AbortNode("Disk space is too low!", _("Disk space is too low!")); | |
} | |
if (bytes_allocated != 0 && fPruneMode) { | |
fCheckForPruning = true; | |
} | |
} | |
setDirtyFileInfo.insert(nFile); | |
return true; | |
} | |
static bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize) | |
{ | |
pos.nFile = nFile; | |
LOCK(cs_LastBlockFile); | |
pos.nPos = vinfoBlockFile[nFile].nUndoSize; | |
vinfoBlockFile[nFile].nUndoSize += nAddSize; | |
setDirtyFileInfo.insert(nFile); | |
bool out_of_space; | |
size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space); | |
if (out_of_space) { | |
return AbortNode(state, "Disk space is too low!", _("Disk space is too low!")); | |
} | |
if (bytes_allocated != 0 && fPruneMode) { | |
fCheckForPruning = true; | |
} | |
return true; | |
} | |
static bool CheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true) | |
{ | |
// Check proof of work matches claimed amount | |
if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams)) | |
return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "high-hash", "proof of work failed"); | |
return true; | |
} | |
bool CheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot) | |
{ | |
// These are checks that are independent of context. | |
if (block.fChecked) | |
return true; | |
// Check that the header is valid (particularly PoW). This is mostly | |
// redundant with the call in AcceptBlockHeader. | |
if (!CheckBlockHeader(block, state, consensusParams, fCheckPOW)) | |
return false; | |
// Signet only: check block solution | |
if (consensusParams.signet_blocks && fCheckPOW && !CheckSignetBlockSolution(block, consensusParams)) { | |
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-signet-blksig", "signet block signature validation failure"); | |
} | |
// Check the merkle root. | |
if (fCheckMerkleRoot) { | |
bool mutated; | |
uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated); | |
if (block.hashMerkleRoot != hashMerkleRoot2) | |
return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txnmrklroot", "hashMerkleRoot mismatch"); | |
// Check for merkle tree malleability (CVE-2012-2459): repeating sequences | |
// of transactions in a block without affecting the merkle root of a block, | |
// while still invalidating it. | |
if (mutated) | |
return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txns-duplicate", "duplicate transaction"); | |
} | |
// All potential-corruption validation must be done before we do any | |
// transaction validation, as otherwise we may mark the header as invalid | |
// because we receive the wrong transactions for it. | |
// Note that witness malleability is checked in ContextualCheckBlock, so no | |
// checks that use witness data may be performed here. | |
// Size limits | |
if (block.vtx.empty() || block.vtx.size() * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT || ::GetSerializeSize(block, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT) | |
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-length", "size limits failed"); | |
// First transaction must be coinbase, the rest must not be | |
if (block.vtx.empty() || !block.vtx[0]->IsCoinBase()) | |
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-missing", "first tx is not coinbase"); | |
for (unsigned int i = 1; i < block.vtx.size(); i++) | |
if (block.vtx[i]->IsCoinBase()) | |
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-multiple", "more than one coinbase"); | |
// Check transactions | |
// Must check for duplicate inputs (see CVE-2018-17144) | |
for (const auto& tx : block.vtx) { | |
TxValidationState tx_state; | |
if (!CheckTransaction(*tx, tx_state)) { | |
// CheckBlock() does context-free validation checks. The only | |
// possible failures are consensus failures. | |
assert(tx_state.GetResult() == TxValidationResult::TX_CONSENSUS); | |
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, tx_state.GetRejectReason(), | |
strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), tx_state.GetDebugMessage())); | |
} | |
} | |
unsigned int nSigOps = 0; | |
for (const auto& tx : block.vtx) | |
{ | |
nSigOps += GetLegacySigOpCount(*tx); | |
} | |
if (nSigOps * WITNESS_SCALE_FACTOR > MAX_BLOCK_SIGOPS_COST) | |
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops", "out-of-bounds SigOpCount"); | |
if (fCheckPOW && fCheckMerkleRoot) | |
block.fChecked = true; | |
return true; | |
} | |
bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& params) | |
{ | |
int height = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1; | |
return (height >= params.SegwitHeight); | |
} | |
void UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams) | |
{ | |
int commitpos = GetWitnessCommitmentIndex(block); | |
static const std::vector<unsigned char> nonce(32, 0x00); | |
if (commitpos != NO_WITNESS_COMMITMENT && IsWitnessEnabled(pindexPrev, consensusParams) && !block.vtx[0]->HasWitness()) { | |
CMutableTransaction tx(*block.vtx[0]); | |
tx.vin[0].scriptWitness.stack.resize(1); | |
tx.vin[0].scriptWitness.stack[0] = nonce; | |
block.vtx[0] = MakeTransactionRef(std::move(tx)); | |
} | |
} | |
std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams) | |
{ | |
std::vector<unsigned char> commitment; | |
int |