Permalink
Cannot retrieve contributors at this time
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
4876 lines (4302 sloc)
218 KB
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Copyright (c) 2009-2010 Satoshi Nakamoto | |
// Copyright (c) 2009-2018 The Bitcoin Core developers | |
// Distributed under the MIT software license, see the accompanying | |
// file COPYING or http://www.opensource.org/licenses/mit-license.php. | |
#include <validation.h> | |
#include <arith_uint256.h> | |
#include <chain.h> | |
#include <chainparams.h> | |
#include <checkpoints.h> | |
#include <checkqueue.h> | |
#include <claimscriptop.h> | |
#include <consensus/consensus.h> | |
#include <consensus/merkle.h> | |
#include <consensus/tx_verify.h> | |
#include <consensus/validation.h> | |
#include <cuckoocache.h> | |
#include <hash.h> | |
#include <index/txindex.h> | |
#include <nameclaim.h> | |
#include <policy/fees.h> | |
#include <policy/policy.h> | |
#include <policy/rbf.h> | |
#include <pow.h> | |
#include <primitives/block.h> | |
#include <primitives/transaction.h> | |
#include <protocol.h> | |
#include <random.h> | |
#include <reverse_iterator.h> | |
#include <script/script.h> | |
#include <script/sigcache.h> | |
#include <script/standard.h> | |
#include <shutdown.h> | |
#include <timedata.h> | |
#include <tinyformat.h> | |
#include <txdb.h> | |
#include <txmempool.h> | |
#include <ui_interface.h> | |
#include <undo.h> | |
#include <util.h> | |
#include <utilmoneystr.h> | |
#include <utilstrencodings.h> | |
#include <validationinterface.h> | |
#include <warnings.h> | |
#include <future> | |
#include <sstream> | |
#include <boost/algorithm/string/replace.hpp> | |
#include <boost/thread.hpp> | |
#if defined(NDEBUG) | |
# error "LBRYcrd cannot be compiled without assertions." | |
#endif | |
#define MICRO 0.000001 | |
#define MILLI 0.001 | |
CChainState g_chainstate; | |
CCriticalSection cs_main; | |
BlockMap& mapBlockIndex = g_chainstate.mapBlockIndex; | |
CChain& chainActive = g_chainstate.chainActive; | |
CBlockIndex *pindexBestHeader = nullptr; | |
CWaitableCriticalSection g_best_block_mutex; | |
CConditionVariable g_best_block_cv; | |
uint256 g_best_block; | |
int nScriptCheckThreads = 0; | |
std::atomic_bool fImporting(false); | |
std::atomic_bool fReindex(false); | |
bool fHavePruned = false; | |
bool fPruneMode = false; | |
bool fIsBareMultisigStd = DEFAULT_PERMIT_BAREMULTISIG; | |
bool fRequireStandard = true; | |
bool fCheckBlockIndex = false; | |
bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED; | |
size_t nCoinCacheUsage = 5000 * 300; | |
uint64_t nPruneTarget = 0; | |
int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE; | |
bool fEnableReplacement = DEFAULT_ENABLE_REPLACEMENT; | |
uint256 hashAssumeValid; | |
arith_uint256 nMinimumChainWork; | |
CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE); | |
CAmount maxTxFee = DEFAULT_TRANSACTION_MAXFEE; | |
CAmount minFeePerNameClaimChar = MIN_FEE_PER_NAMECLAIM_CHAR; | |
CBlockPolicyEstimator feeEstimator; | |
CTxMemPool mempool(&feeEstimator); | |
std::atomic_bool g_is_mempool_loaded{false}; | |
/** Constant stuff for coinbase transactions we create: */ | |
CScript COINBASE_FLAGS; | |
const std::string strMessageMagic = "LBRYcrd Signed Message:\n"; | |
// Internal stuff | |
namespace { | |
CBlockIndex *&pindexBestInvalid = g_chainstate.pindexBestInvalid; | |
/** All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions. | |
* Pruned nodes may have entries where B is missing data. | |
*/ | |
std::multimap<CBlockIndex*, CBlockIndex*>& mapBlocksUnlinked = g_chainstate.mapBlocksUnlinked; | |
CCriticalSection cs_LastBlockFile; | |
std::vector<CBlockFileInfo> vinfoBlockFile; | |
int nLastBlockFile = 0; | |
/** Global flag to indicate we should check to see if there are | |
* block/undo files that should be deleted. Set on startup | |
* or if we allocate more file space when we're in prune mode | |
*/ | |
bool fCheckForPruning = false; | |
/** Dirty block index entries. */ | |
std::set<CBlockIndex*> setDirtyBlockIndex; | |
/** Dirty block file entries. */ | |
std::set<int> setDirtyFileInfo; | |
} // anon namespace | |
CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator) | |
{ | |
AssertLockHeld(cs_main); | |
// Find the latest block common to locator and chain - we expect that | |
// locator.vHave is sorted descending by height. | |
for (const uint256& hash : locator.vHave) { | |
CBlockIndex* pindex = LookupBlockIndex(hash); | |
if (pindex) { | |
if (chain.Contains(pindex)) | |
return pindex; | |
if (pindex->GetAncestor(chain.Height()) == chain.Tip()) { | |
return chain.Tip(); | |
} | |
} | |
} | |
return chain.Genesis(); | |
} | |
std::unique_ptr<CCoinsViewDB> pcoinsdbview; | |
std::unique_ptr<CCoinsViewCache> pcoinsTip; | |
std::unique_ptr<CBlockTreeDB> pblocktree; | |
// FIXME: make unique_ptr | |
CClaimTrie *pclaimTrie = nullptr; | |
enum class FlushStateMode { | |
NONE, | |
IF_NEEDED, | |
PERIODIC, | |
ALWAYS | |
}; | |
// See definition for documentation | |
static bool FlushStateToDisk(const CChainParams& chainParams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight=0); | |
static void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight); | |
static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight); | |
bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = nullptr); | |
static FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly = false); | |
bool CheckFinalTx(const CTransaction &tx, int flags) | |
{ | |
AssertLockHeld(cs_main); | |
// By convention a negative value for flags indicates that the | |
// current network-enforced consensus rules should be used. In | |
// a future soft-fork scenario that would mean checking which | |
// rules would be enforced for the next block and setting the | |
// appropriate flags. At the present time no soft-forks are | |
// scheduled, so no flags are set. | |
flags = std::max(flags, 0); | |
// CheckFinalTx() uses chainActive.Height()+1 to evaluate | |
// nLockTime because when IsFinalTx() is called within | |
// CBlock::AcceptBlock(), the height of the block *being* | |
// evaluated is what is used. Thus if we want to know if a | |
// transaction can be part of the *next* block, we need to call | |
// IsFinalTx() with one more than chainActive.Height(). | |
const int nBlockHeight = chainActive.Height() + 1; | |
// BIP113 requires that time-locked transactions have nLockTime set to | |
// less than the median time of the previous block they're contained in. | |
// When the next block is created its previous block will be the current | |
// chain tip, so we use that to calculate the median time passed to | |
// IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set. | |
const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST) | |
? chainActive.Tip()->GetMedianTimePast() | |
: GetAdjustedTime(); | |
return IsFinalTx(tx, nBlockHeight, nBlockTime); | |
} | |
bool TestLockPointValidity(const LockPoints* lp) | |
{ | |
AssertLockHeld(cs_main); | |
assert(lp); | |
// If there are relative lock times then the maxInputBlock will be set | |
// If there are no relative lock times, the LockPoints don't depend on the chain | |
if (lp->maxInputBlock) { | |
// Check whether chainActive is an extension of the block at which the LockPoints | |
// calculation was valid. If not LockPoints are no longer valid | |
if (!chainActive.Contains(lp->maxInputBlock)) { | |
return false; | |
} | |
} | |
// LockPoints still valid | |
return true; | |
} | |
bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints* lp, bool useExistingLockPoints) | |
{ | |
AssertLockHeld(cs_main); | |
AssertLockHeld(mempool.cs); | |
CBlockIndex* tip = chainActive.Tip(); | |
assert(tip != nullptr); | |
CBlockIndex index; | |
index.pprev = tip; | |
// CheckSequenceLocks() uses chainActive.Height()+1 to evaluate | |
// height based locks because when SequenceLocks() is called within | |
// ConnectBlock(), the height of the block *being* | |
// evaluated is what is used. | |
// Thus if we want to know if a transaction can be part of the | |
// *next* block, we need to use one more than chainActive.Height() | |
index.nHeight = tip->nHeight + 1; | |
std::pair<int, int64_t> lockPair; | |
if (useExistingLockPoints) { | |
assert(lp); | |
lockPair.first = lp->height; | |
lockPair.second = lp->time; | |
} | |
else { | |
// pcoinsTip contains the UTXO set for chainActive.Tip() | |
CCoinsViewMemPool viewMemPool(pcoinsTip.get(), mempool); | |
std::vector<int> prevheights; | |
prevheights.resize(tx.vin.size()); | |
for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) { | |
const CTxIn& txin = tx.vin[txinIndex]; | |
Coin coin; | |
if (!viewMemPool.GetCoin(txin.prevout, coin)) { | |
return error("%s: Missing input", __func__); | |
} | |
if (coin.nHeight == MEMPOOL_HEIGHT) { | |
// Assume all mempool transaction confirm in the next block | |
prevheights[txinIndex] = tip->nHeight + 1; | |
} else { | |
prevheights[txinIndex] = coin.nHeight; | |
} | |
} | |
lockPair = CalculateSequenceLocks(tx, flags, &prevheights, index); | |
if (lp) { | |
lp->height = lockPair.first; | |
lp->time = lockPair.second; | |
// Also store the hash of the block with the highest height of | |
// all the blocks which have sequence locked prevouts. | |
// This hash needs to still be on the chain | |
// for these LockPoint calculations to be valid | |
// Note: It is impossible to correctly calculate a maxInputBlock | |
// if any of the sequence locked inputs depend on unconfirmed txs, | |
// except in the special case where the relative lock time/height | |
// is 0, which is equivalent to no sequence lock. Since we assume | |
// input height of tip+1 for mempool txs and test the resulting | |
// lockPair from CalculateSequenceLocks against tip+1. We know | |
// EvaluateSequenceLocks will fail if there was a non-zero sequence | |
// lock on a mempool input, so we can use the return value of | |
// CheckSequenceLocks to indicate the LockPoints validity | |
int maxInputHeight = 0; | |
for (int height : prevheights) { | |
// Can ignore mempool inputs since we'll fail if they had non-zero locks | |
if (height != tip->nHeight+1) { | |
maxInputHeight = std::max(maxInputHeight, height); | |
} | |
} | |
lp->maxInputBlock = tip->GetAncestor(maxInputHeight); | |
} | |
} | |
return EvaluateSequenceLocks(index, lockPair); | |
} | |
// Returns the script flags which should be checked for a given block | |
static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& chainparams); | |
static void LimitMempoolSize(CTxMemPool& pool, size_t limit, unsigned long age) { | |
int expired = pool.Expire(GetTime() - age); | |
if (expired != 0) { | |
LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired); | |
} | |
std::vector<COutPoint> vNoSpendsRemaining; | |
pool.TrimToSize(limit, &vNoSpendsRemaining); | |
for (const COutPoint& removed : vNoSpendsRemaining) | |
pcoinsTip->Uncache(removed); | |
} | |
/** Convert CValidationState to a human-readable message for logging */ | |
std::string FormatStateMessage(const CValidationState &state) | |
{ | |
return strprintf("%s%s (code %i)", | |
state.GetRejectReason(), | |
state.GetDebugMessage().empty() ? "" : ", "+state.GetDebugMessage(), | |
state.GetRejectCode()); | |
} | |
static bool IsCurrentForFeeEstimation() | |
{ | |
AssertLockHeld(cs_main); | |
if (IsInitialBlockDownload()) | |
return false; | |
if (chainActive.Tip()->GetBlockTime() < (GetTime() - MAX_FEE_ESTIMATION_TIP_AGE)) | |
return false; | |
if (chainActive.Height() < pindexBestHeader->nHeight - 1) | |
return false; | |
return true; | |
} | |
/* Make mempool consistent after a reorg, by re-adding or recursively erasing | |
* disconnected block transactions from the mempool, and also removing any | |
* other transactions from the mempool that are no longer valid given the new | |
* tip/height. | |
* | |
* Note: we assume that disconnectpool only contains transactions that are NOT | |
* confirmed in the current chain nor already in the mempool (otherwise, | |
* in-mempool descendants of such transactions would be removed). | |
* | |
* Passing fAddToMempool=false will skip trying to add the transactions back, | |
* and instead just erase from the mempool as needed. | |
*/ | |
static void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool, bool fAddToMempool) | |
{ | |
AssertLockHeld(cs_main); | |
std::vector<uint256> vHashUpdate; | |
// disconnectpool's insertion_order index sorts the entries from | |
// oldest to newest, but the oldest entry will be the last tx from the | |
// latest mined block that was disconnected. | |
// Iterate disconnectpool in reverse, so that we add transactions | |
// back to the mempool starting with the earliest transaction that had | |
// been previously seen in a block. | |
auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin(); | |
while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) { | |
// ignore validation errors in resurrected transactions | |
CValidationState stateDummy; | |
if (!fAddToMempool || (*it)->IsCoinBase() || | |
!AcceptToMemoryPool(mempool, stateDummy, *it, nullptr /* pfMissingInputs */, | |
nullptr /* plTxnReplaced */, true /* bypass_limits */, 0 /* nAbsurdFee */)) { | |
// If the transaction doesn't make it in to the mempool, remove any | |
// transactions that depend on it (which would now be orphans). | |
mempool.removeRecursive(**it, MemPoolRemovalReason::REORG); | |
} else if (mempool.exists((*it)->GetHash())) { | |
vHashUpdate.push_back((*it)->GetHash()); | |
} | |
++it; | |
} | |
disconnectpool.queuedTx.clear(); | |
// AcceptToMemoryPool/addUnchecked all assume that new mempool entries have | |
// no in-mempool children, which is generally not true when adding | |
// previously-confirmed transactions back to the mempool. | |
// UpdateTransactionsFromBlock finds descendants of any transactions in | |
// the disconnectpool that were added back and cleans up the mempool state. | |
mempool.UpdateTransactionsFromBlock(vHashUpdate); | |
// We also need to remove any now-immature transactions | |
mempool.removeForReorg(pcoinsTip.get(), chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); | |
// Re-limit mempool size, in case we added any transactions | |
LimitMempoolSize(mempool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); | |
} | |
// Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool | |
// were somehow broken and returning the wrong scriptPubKeys | |
static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& view, const CTxMemPool& pool, | |
unsigned int flags, bool cacheSigStore, PrecomputedTransactionData& txdata) { | |
AssertLockHeld(cs_main); | |
// pool.cs should be locked already, but go ahead and re-take the lock here | |
// to enforce that mempool doesn't change between when we check the view | |
// and when we actually call through to CheckInputs | |
LOCK(pool.cs); | |
assert(!tx.IsCoinBase()); | |
for (const CTxIn& txin : tx.vin) { | |
const Coin& coin = view.AccessCoin(txin.prevout); | |
// At this point we haven't actually checked if the coins are all | |
// available (or shouldn't assume we have, since CheckInputs does). | |
// So we just return failure if the inputs are not available here, | |
// and then only have to check equivalence for available inputs. | |
if (coin.IsSpent()) return false; | |
const CTransactionRef& txFrom = pool.get(txin.prevout.hash); | |
if (txFrom) { | |
assert(txFrom->GetHash() == txin.prevout.hash); | |
assert(txFrom->vout.size() > txin.prevout.n); | |
assert(txFrom->vout[txin.prevout.n] == coin.out); | |
} else { | |
const Coin& coinFromDisk = pcoinsTip->AccessCoin(txin.prevout); | |
assert(!coinFromDisk.IsSpent()); | |
assert(coinFromDisk.out == coin.out); | |
} | |
} | |
return CheckInputs(tx, state, view, true, flags, cacheSigStore, true, txdata); | |
} | |
static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool& pool, CValidationState& state, const CTransactionRef& ptx, | |
bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced, | |
bool bypass_limits, const CAmount& nAbsurdFee, std::vector<COutPoint>& coins_to_uncache, bool test_accept) | |
{ | |
const CTransaction& tx = *ptx; | |
const uint256 hash = tx.GetHash(); | |
AssertLockHeld(cs_main); | |
LOCK(pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool()) | |
if (pfMissingInputs) { | |
*pfMissingInputs = false; | |
} | |
if (!CheckTransaction(tx, state)) | |
return false; // state filled in by CheckTransaction | |
// Coinbase is only valid in a block, not as a loose transaction | |
if (tx.IsCoinBase()) | |
return state.DoS(100, false, REJECT_INVALID, "coinbase"); | |
// Rather not work on nonstandard transactions (unless -testnet/-regtest) | |
std::string reason; | |
if (fRequireStandard && !IsStandardTx(tx, reason)) | |
return state.DoS(0, false, (reason == "dust" ? REJECT_DUST : REJECT_NONSTANDARD), reason); | |
// Do not work on transactions that are too small. | |
// A transaction with 1 segwit input and 1 P2WPHK output has non-witness size of 82 bytes. | |
// Transactions smaller than this are not relayed to reduce unnecessary malloc overhead. | |
// | |
// NOTE: LBRY does not honor this node rule. | |
/* if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) < MIN_STANDARD_TX_NONWITNESS_SIZE) */ | |
/* return state.DoS(0, false, REJECT_NONSTANDARD, "tx-size-small"); */ | |
// Only accept nLockTime-using transactions that can be mined in the next | |
// block; we don't want our mempool filled up with transactions that can't | |
// be mined yet. | |
if (!CheckFinalTx(tx, STANDARD_LOCKTIME_VERIFY_FLAGS)) | |
return state.DoS(0, false, REJECT_NONSTANDARD, "non-final"); | |
// is it already in the memory pool? | |
if (pool.exists(hash)) { | |
return state.Invalid(false, REJECT_DUPLICATE, "txn-already-in-mempool"); | |
} | |
// Check for conflicts with in-memory transactions | |
std::set<uint256> setConflicts; | |
for (const CTxIn &txin : tx.vin) | |
{ | |
auto itConflicting = pool.mapNextTx.find(txin.prevout); | |
if (itConflicting != pool.mapNextTx.end()) | |
{ | |
const CTransaction *ptxConflicting = itConflicting->second; | |
if (!setConflicts.count(ptxConflicting->GetHash())) | |
{ | |
// Allow opt-out of transaction replacement by setting | |
// nSequence > MAX_BIP125_RBF_SEQUENCE (SEQUENCE_FINAL-2) on all inputs. | |
// | |
// SEQUENCE_FINAL-1 is picked to still allow use of nLockTime by | |
// non-replaceable transactions. All inputs rather than just one | |
// is for the sake of multi-party protocols, where we don't | |
// want a single party to be able to disable replacement. | |
// | |
// The opt-out ignores descendants as anyone relying on | |
// first-seen mempool behavior should be checking all | |
// unconfirmed ancestors anyway; doing otherwise is hopelessly | |
// insecure. | |
bool fReplacementOptOut = true; | |
if (fEnableReplacement) | |
{ | |
for (const CTxIn &_txin : ptxConflicting->vin) | |
{ | |
if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE) | |
{ | |
fReplacementOptOut = false; | |
break; | |
} | |
} | |
} | |
if (fReplacementOptOut) { | |
return state.Invalid(false, REJECT_DUPLICATE, "txn-mempool-conflict"); | |
} | |
setConflicts.insert(ptxConflicting->GetHash()); | |
} | |
} | |
} | |
{ | |
CCoinsView dummy; | |
CCoinsViewCache view(&dummy); | |
LockPoints lp; | |
CCoinsViewMemPool viewMemPool(pcoinsTip.get(), pool); | |
view.SetBackend(viewMemPool); | |
// do all inputs exist? | |
for (const CTxIn& txin : tx.vin) { | |
if (!pcoinsTip->HaveCoinInCache(txin.prevout)) { | |
coins_to_uncache.push_back(txin.prevout); | |
} | |
if (!view.HaveCoin(txin.prevout)) { | |
// Are inputs missing because we already have the tx? | |
for (size_t out = 0; out < tx.vout.size(); out++) { | |
// Optimistically just do efficient check of cache for outputs | |
if (pcoinsTip->HaveCoinInCache(COutPoint(hash, out))) { | |
return state.Invalid(false, REJECT_DUPLICATE, "txn-already-known"); | |
} | |
} | |
// Otherwise assume this might be an orphan tx for which we just haven't seen parents yet | |
if (pfMissingInputs) { | |
*pfMissingInputs = true; | |
} | |
return false; // fMissingInputs and !state.IsInvalid() is used to detect this condition, don't set state.Invalid() | |
} | |
} | |
// Bring the best block into scope | |
view.GetBestBlock(); | |
// we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool | |
view.SetBackend(dummy); | |
// Only accept BIP68 sequence locked transactions that can be mined in the next | |
// block; we don't want our mempool filled up with transactions that can't | |
// be mined yet. | |
// Must keep pool.cs for this unless we change CheckSequenceLocks to take a | |
// CoinsViewCache instead of create its own | |
if (!CheckSequenceLocks(tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp)) | |
return state.DoS(0, false, REJECT_NONSTANDARD, "non-BIP68-final"); | |
CAmount nFees = 0; | |
if (!Consensus::CheckTxInputs(tx, state, view, GetSpendHeight(view), nFees)) { | |
return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state)); | |
} | |
// Check for non-standard pay-to-script-hash in inputs | |
if (fRequireStandard && !AreInputsStandard(tx, view)) | |
return state.Invalid(false, REJECT_NONSTANDARD, "bad-txns-nonstandard-inputs"); | |
// Check for non-standard witness in P2WSH | |
if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, view)) | |
return state.DoS(0, false, REJECT_NONSTANDARD, "bad-witness-nonstandard", true); | |
int64_t nSigOpsCost = GetTransactionSigOpCost(tx, view, STANDARD_SCRIPT_VERIFY_FLAGS); | |
// nModifiedFees includes any fee deltas from PrioritiseTransaction | |
CAmount nModifiedFees = nFees; | |
pool.ApplyDelta(hash, nModifiedFees); | |
// Keep track of transactions that spend a coinbase, which we re-scan | |
// during reorgs to ensure COINBASE_MATURITY is still met. | |
bool fSpendsCoinbase = false; | |
for (const CTxIn &txin : tx.vin) { | |
const Coin &coin = view.AccessCoin(txin.prevout); | |
if (coin.IsCoinBase()) { | |
fSpendsCoinbase = true; | |
break; | |
} | |
} | |
CTxMemPoolEntry entry(ptx, nFees, nAcceptTime, chainActive.Height(), | |
fSpendsCoinbase, nSigOpsCost, lp); | |
unsigned int nSize = entry.GetTxSize(); | |
// Check that the transaction doesn't have an excessive number of | |
// sigops, making it impossible to mine. Since the coinbase transaction | |
// itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than | |
// MAX_BLOCK_SIGOPS; we still consider this an invalid rather than | |
// merely non-standard transaction. | |
if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST) | |
return state.DoS(0, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops", false, | |
strprintf("%d", nSigOpsCost)); | |
CAmount mempoolRejectFee = pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(nSize); | |
if (!bypass_limits && mempoolRejectFee > 0 && nModifiedFees < mempoolRejectFee) { | |
return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool min fee not met", false, strprintf("%d < %d", nModifiedFees, mempoolRejectFee)); | |
} | |
// No transactions are allowed below minRelayTxFee except from disconnected blocks | |
if (!bypass_limits && nModifiedFees < ::minRelayTxFee.GetFee(nSize)) { | |
return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "min relay fee not met", false, strprintf("%d < %d", nModifiedFees, ::minRelayTxFee.GetFee(nSize))); | |
} | |
if (nAbsurdFee && nFees > nAbsurdFee) | |
return state.Invalid(false, | |
REJECT_HIGHFEE, "absurdly-high-fee", | |
strprintf("%d > %d", nFees, nAbsurdFee)); | |
// Calculate in-mempool ancestors, up to a limit. | |
CTxMemPool::setEntries setAncestors; | |
size_t nLimitAncestors = gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT); | |
size_t nLimitAncestorSize = gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000; | |
size_t nLimitDescendants = gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT); | |
size_t nLimitDescendantSize = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000; | |
std::string errString; | |
if (!pool.CalculateMemPoolAncestors(entry, setAncestors, nLimitAncestors, nLimitAncestorSize, nLimitDescendants, nLimitDescendantSize, errString)) { | |
return state.DoS(0, false, REJECT_NONSTANDARD, "too-long-mempool-chain", false, errString); | |
} | |
// A transaction that spends outputs that would be replaced by it is invalid. Now | |
// that we have the set of all ancestors we can detect this | |
// pathological case by making sure setConflicts and setAncestors don't | |
// intersect. | |
for (CTxMemPool::txiter ancestorIt : setAncestors) | |
{ | |
const uint256 &hashAncestor = ancestorIt->GetTx().GetHash(); | |
if (setConflicts.count(hashAncestor)) | |
{ | |
return state.DoS(10, false, | |
REJECT_INVALID, "bad-txns-spends-conflicting-tx", false, | |
strprintf("%s spends conflicting transaction %s", | |
hash.ToString(), | |
hashAncestor.ToString())); | |
} | |
} | |
// Check if it's economically rational to mine this transaction rather | |
// than the ones it replaces. | |
CAmount nConflictingFees = 0; | |
size_t nConflictingSize = 0; | |
uint64_t nConflictingCount = 0; | |
CTxMemPool::setEntries allConflicting; | |
// If we don't hold the lock allConflicting might be incomplete; the | |
// subsequent RemoveStaged() and addUnchecked() calls don't guarantee | |
// mempool consistency for us. | |
const bool fReplacementTransaction = setConflicts.size(); | |
if (fReplacementTransaction) | |
{ | |
CFeeRate newFeeRate(nModifiedFees, nSize); | |
std::set<uint256> setConflictsParents; | |
const int maxDescendantsToVisit = 100; | |
CTxMemPool::setEntries setIterConflicting; | |
for (const uint256 &hashConflicting : setConflicts) | |
{ | |
CTxMemPool::txiter mi = pool.mapTx.find(hashConflicting); | |
if (mi == pool.mapTx.end()) | |
continue; | |
// Save these to avoid repeated lookups | |
setIterConflicting.insert(mi); | |
// Don't allow the replacement to reduce the feerate of the | |
// mempool. | |
// | |
// We usually don't want to accept replacements with lower | |
// feerates than what they replaced as that would lower the | |
// feerate of the next block. Requiring that the feerate always | |
// be increased is also an easy-to-reason about way to prevent | |
// DoS attacks via replacements. | |
// | |
// We only consider the feerates of transactions being directly | |
// replaced, not their indirect descendants. While that does | |
// mean high feerate children are ignored when deciding whether | |
// or not to replace, we do require the replacement to pay more | |
// overall fees too, mitigating most cases. | |
CFeeRate oldFeeRate(mi->GetModifiedFee(), mi->GetTxSize()); | |
if (newFeeRate <= oldFeeRate) | |
{ | |
return state.DoS(0, false, | |
REJECT_INSUFFICIENTFEE, "insufficient fee", false, | |
strprintf("rejecting replacement %s; new feerate %s <= old feerate %s", | |
hash.ToString(), | |
newFeeRate.ToString(), | |
oldFeeRate.ToString())); | |
} | |
for (const CTxIn &txin : mi->GetTx().vin) | |
{ | |
setConflictsParents.insert(txin.prevout.hash); | |
} | |
nConflictingCount += mi->GetCountWithDescendants(); | |
} | |
// This potentially overestimates the number of actual descendants | |
// but we just want to be conservative to avoid doing too much | |
// work. | |
if (nConflictingCount <= maxDescendantsToVisit) { | |
// If not too many to replace, then calculate the set of | |
// transactions that would have to be evicted | |
for (CTxMemPool::txiter it : setIterConflicting) { | |
pool.CalculateDescendants(it, allConflicting); | |
} | |
for (CTxMemPool::txiter it : allConflicting) { | |
nConflictingFees += it->GetModifiedFee(); | |
nConflictingSize += it->GetTxSize(); | |
} | |
} else { | |
return state.DoS(0, false, | |
REJECT_NONSTANDARD, "too many potential replacements", false, | |
strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n", | |
hash.ToString(), | |
nConflictingCount, | |
maxDescendantsToVisit)); | |
} | |
for (unsigned int j = 0; j < tx.vin.size(); j++) | |
{ | |
// We don't want to accept replacements that require low | |
// feerate junk to be mined first. Ideally we'd keep track of | |
// the ancestor feerates and make the decision based on that, | |
// but for now requiring all new inputs to be confirmed works. | |
if (!setConflictsParents.count(tx.vin[j].prevout.hash)) | |
{ | |
// Rather than check the UTXO set - potentially expensive - | |
// it's cheaper to just check if the new input refers to a | |
// tx that's in the mempool. | |
if (pool.mapTx.find(tx.vin[j].prevout.hash) != pool.mapTx.end()) | |
return state.DoS(0, false, | |
REJECT_NONSTANDARD, "replacement-adds-unconfirmed", false, | |
strprintf("replacement %s adds unconfirmed input, idx %d", | |
hash.ToString(), j)); | |
} | |
} | |
// The replacement must pay greater fees than the transactions it | |
// replaces - if we did the bandwidth used by those conflicting | |
// transactions would not be paid for. | |
if (nModifiedFees < nConflictingFees) | |
{ | |
return state.DoS(0, false, | |
REJECT_INSUFFICIENTFEE, "insufficient fee", false, | |
strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s", | |
hash.ToString(), FormatMoney(nModifiedFees), FormatMoney(nConflictingFees))); | |
} | |
// Finally in addition to paying more fees than the conflicts the | |
// new transaction must pay for its own bandwidth. | |
CAmount nDeltaFees = nModifiedFees - nConflictingFees; | |
if (nDeltaFees < ::incrementalRelayFee.GetFee(nSize)) | |
{ | |
return state.DoS(0, false, | |
REJECT_INSUFFICIENTFEE, "insufficient fee", false, | |
strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s", | |
hash.ToString(), | |
FormatMoney(nDeltaFees), | |
FormatMoney(::incrementalRelayFee.GetFee(nSize)))); | |
} | |
} | |
constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS; | |
// Check against previous transactions | |
// This is done last to help prevent CPU exhaustion denial-of-service attacks. | |
PrecomputedTransactionData txdata(tx); | |
if (!CheckInputs(tx, state, view, true, scriptVerifyFlags, true, false, txdata)) { | |
// SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we | |
// need to turn both off, and compare against just turning off CLEANSTACK | |
// to see if the failure is specifically due to witness validation. | |
CValidationState stateDummy; // Want reported failures to be from first CheckInputs | |
if (!tx.HasWitness() && CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) && | |
!CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) { | |
// Only the witness is missing, so the transaction itself may be fine. | |
state.SetCorruptionPossible(); | |
} | |
return false; // state filled in by CheckInputs | |
} | |
// Check again against the current block tip's script verification | |
// flags to cache our script execution flags. This is, of course, | |
// useless if the next block has different script flags from the | |
// previous one, but because the cache tracks script flags for us it | |
// will auto-invalidate and we'll just have a few blocks of extra | |
// misses on soft-fork activation. | |
// | |
// This is also useful in case of bugs in the standard flags that cause | |
// transactions to pass as valid when they're actually invalid. For | |
// instance the STRICTENC flag was incorrectly allowing certain | |
// CHECKSIG NOT scripts to pass, even though they were invalid. | |
// | |
// There is a similar check in CreateNewBlock() to prevent creating | |
// invalid blocks (using TestBlockValidity), however allowing such | |
// transactions into the mempool can be exploited as a DoS attack. | |
unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(chainActive.Tip(), Params().GetConsensus()); | |
if (!CheckInputsFromMempoolAndCache(tx, state, view, pool, currentBlockScriptVerifyFlags, true, txdata)) { | |
return error("%s: BUG! PLEASE REPORT THIS! CheckInputs failed against latest-block but not STANDARD flags %s, %s", | |
__func__, hash.ToString(), FormatStateMessage(state)); | |
} | |
if (test_accept) { | |
// Tx was accepted, but not added | |
return true; | |
} | |
// Remove conflicting transactions from the mempool | |
for (CTxMemPool::txiter it : allConflicting) | |
{ | |
LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s LBC additional fees, %d delta bytes\n", | |
it->GetTx().GetHash().ToString(), | |
hash.ToString(), | |
FormatMoney(nModifiedFees - nConflictingFees), | |
(int)nSize - (int)nConflictingSize); | |
if (plTxnReplaced) | |
plTxnReplaced->push_back(it->GetSharedTx()); | |
} | |
pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED); | |
// This transaction should only count for fee estimation if: | |
// - it isn't a BIP 125 replacement transaction (may not be widely supported) | |
// - it's not being re-added during a reorg which bypasses typical mempool fee limits | |
// - the node is not behind | |
// - the transaction is not dependent on any other transactions in the mempool | |
bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx); | |
// Store transaction in memory | |
pool.addUnchecked(hash, entry, setAncestors, validForFeeEstimation); | |
// trim mempool and check if tx was trimmed | |
if (!bypass_limits) { | |
LimitMempoolSize(pool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); | |
if (!pool.exists(hash)) | |
return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool full"); | |
} | |
} | |
GetMainSignals().TransactionAddedToMempool(ptx); | |
return true; | |
} | |
/** (try to) add transaction to memory pool with a specified acceptance time **/ | |
static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx, | |
bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced, | |
bool bypass_limits, const CAmount nAbsurdFee, bool test_accept) | |
{ | |
std::vector<COutPoint> coins_to_uncache; | |
bool res = AcceptToMemoryPoolWorker(chainparams, pool, state, tx, pfMissingInputs, nAcceptTime, plTxnReplaced, bypass_limits, nAbsurdFee, coins_to_uncache, test_accept); | |
if (!res) { | |
for (const COutPoint& hashTx : coins_to_uncache) | |
pcoinsTip->Uncache(hashTx); | |
} | |
// After we've (potentially) uncached entries, ensure our coins cache is still within its size limits | |
CValidationState stateDummy; | |
FlushStateToDisk(chainparams, stateDummy, FlushStateMode::PERIODIC); | |
return res; | |
} | |
bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx, | |
bool* pfMissingInputs, std::list<CTransactionRef>* plTxnReplaced, | |
bool bypass_limits, const CAmount nAbsurdFee, bool test_accept) | |
{ | |
const CChainParams& chainparams = Params(); | |
return AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, pfMissingInputs, GetTime(), plTxnReplaced, bypass_limits, nAbsurdFee, test_accept); | |
} | |
/** | |
* Return transaction in txOut, and if it was found inside a block, its hash is placed in hashBlock. | |
* If blockIndex is provided, the transaction is fetched from the corresponding block. | |
*/ | |
bool GetTransaction(const uint256& hash, CTransactionRef& txOut, const Consensus::Params& consensusParams, uint256& hashBlock, bool fAllowSlow, CBlockIndex* blockIndex) | |
{ | |
CBlockIndex* pindexSlow = blockIndex; | |
LOCK(cs_main); | |
if (!blockIndex) { | |
CTransactionRef ptx = mempool.get(hash); | |
if (ptx) { | |
txOut = ptx; | |
return true; | |
} | |
if (g_txindex) { | |
return g_txindex->FindTx(hash, hashBlock, txOut); | |
} | |
if (fAllowSlow) { // use coin database to locate block that contains transaction, and scan it | |
const Coin& coin = AccessByTxid(*pcoinsTip, hash); | |
if (!coin.IsSpent()) pindexSlow = chainActive[coin.nHeight]; | |
} | |
} | |
if (pindexSlow) { | |
CBlock block; | |
if (ReadBlockFromDisk(block, pindexSlow, consensusParams)) { | |
for (const auto& tx : block.vtx) { | |
if (tx->GetHash() == hash) { | |
txOut = tx; | |
hashBlock = pindexSlow->GetBlockHash(); | |
return true; | |
} | |
} | |
} | |
} | |
return false; | |
} | |
////////////////////////////////////////////////////////////////////////////// | |
// | |
// CBlock and CBlockIndex | |
// | |
static bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& messageStart) | |
{ | |
// Open history file to append | |
CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION); | |
if (fileout.IsNull()) | |
return error("WriteBlockToDisk: OpenBlockFile failed"); | |
// Write index header | |
unsigned int nSize = GetSerializeSize(fileout, block); | |
fileout << messageStart << nSize; | |
// Write block | |
long fileOutPos = ftell(fileout.Get()); | |
if (fileOutPos < 0) | |
return error("WriteBlockToDisk: ftell failed"); | |
pos.nPos = (unsigned int)fileOutPos; | |
fileout << block; | |
return true; | |
} | |
bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos, const Consensus::Params& consensusParams) | |
{ | |
block.SetNull(); | |
// Open history file to read | |
CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION); | |
if (filein.IsNull()) | |
return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString()); | |
// Read block | |
try { | |
filein >> block; | |
} | |
catch (const std::exception& e) { | |
return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString()); | |
} | |
// Check the header | |
if (!CheckProofOfWork(block.GetPoWHash(), block.nBits, consensusParams)) | |
return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString()); | |
return true; | |
} | |
bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams) | |
{ | |
CDiskBlockPos blockPos; | |
{ | |
LOCK(cs_main); | |
blockPos = pindex->GetBlockPos(); | |
} | |
if (!ReadBlockFromDisk(block, blockPos, consensusParams)) | |
return false; | |
if (block.GetHash() != pindex->GetBlockHash()) | |
return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s", | |
pindex->ToString(), pindex->GetBlockPos().ToString()); | |
return true; | |
} | |
bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& message_start) | |
{ | |
CDiskBlockPos hpos = pos; | |
hpos.nPos -= 8; // Seek back 8 bytes for meta header | |
CAutoFile filein(OpenBlockFile(hpos, true), SER_DISK, CLIENT_VERSION); | |
if (filein.IsNull()) { | |
return error("%s: OpenBlockFile failed for %s", __func__, pos.ToString()); | |
} | |
try { | |
CMessageHeader::MessageStartChars blk_start; | |
unsigned int blk_size; | |
filein >> blk_start >> blk_size; | |
if (memcmp(blk_start, message_start, CMessageHeader::MESSAGE_START_SIZE)) { | |
return error("%s: Block magic mismatch for %s: %s versus expected %s", __func__, pos.ToString(), | |
HexStr(blk_start, blk_start + CMessageHeader::MESSAGE_START_SIZE), | |
HexStr(message_start, message_start + CMessageHeader::MESSAGE_START_SIZE)); | |
} | |
if (blk_size > MAX_SIZE) { | |
return error("%s: Block data is larger than maximum deserialization size for %s: %s versus %s", __func__, pos.ToString(), | |
blk_size, MAX_SIZE); | |
} | |
block.resize(blk_size); // Zeroing of memory is intentional here | |
filein.read((char*)block.data(), blk_size); | |
} catch(const std::exception& e) { | |
return error("%s: Read from block file failed: %s for %s", __func__, e.what(), pos.ToString()); | |
} | |
return true; | |
} | |
bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start) | |
{ | |
CDiskBlockPos block_pos; | |
{ | |
LOCK(cs_main); | |
block_pos = pindex->GetBlockPos(); | |
} | |
return ReadRawBlockFromDisk(block, block_pos, message_start); | |
} | |
bool withinLevelBounds(int nReduction, int nLevel) | |
{ | |
if (((nReduction * nReduction + nReduction) >> 1) > nLevel) | |
return false; | |
nReduction += 1; | |
if (((nReduction * nReduction + nReduction) >> 1) <= nLevel) | |
return false; | |
return true; | |
} | |
CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams) | |
{ | |
if (nHeight == 0) | |
{ | |
return 400000000 * COIN; | |
} | |
else if (nHeight <= 5100) | |
{ | |
return 1 * COIN; | |
} | |
else if (nHeight <= 55000) | |
{ | |
int l = nHeight - 5000; | |
int nLevel = 0; | |
for (int i = 0; i < l; i+=100) | |
{ | |
nLevel++; | |
} | |
return nLevel * COIN; | |
} | |
CAmount nStartingSubsidy = 500 * COIN; | |
int nLevel = (nHeight - 55001) / consensusParams.nSubsidyLevelInterval; | |
int nReduction = ((-1 + (int)sqrt((8 * nLevel) + 1)) / 2); | |
while (!(withinLevelBounds(nReduction, nLevel))) | |
{ | |
if (((nReduction * nReduction + nReduction) >> 1) > nLevel) | |
{ | |
nReduction--; | |
} | |
else | |
{ | |
nReduction++; | |
} | |
} | |
CAmount nSubsidyReduction = nReduction * COIN; | |
if (nSubsidyReduction >= nStartingSubsidy) | |
return 0; | |
return nStartingSubsidy - nSubsidyReduction; | |
} | |
bool IsInitialBlockDownload() | |
{ | |
// Once this function has returned false, it must remain false. | |
static std::atomic<bool> latchToFalse{false}; | |
// Optimization: pre-test latch before taking the lock. | |
if (latchToFalse.load(std::memory_order_relaxed)) | |
return false; | |
LOCK(cs_main); | |
if (latchToFalse.load(std::memory_order_relaxed)) | |
return false; | |
if (fImporting || fReindex) | |
return true; | |
if (chainActive.Tip() == nullptr) | |
return true; | |
if (chainActive.Tip()->nChainWork < nMinimumChainWork) | |
return true; | |
if (chainActive.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge)) | |
return true; | |
LogPrintf("Leaving InitialBlockDownload (latching to false)\n"); | |
latchToFalse.store(true, std::memory_order_relaxed); | |
return false; | |
} | |
CBlockIndex *pindexBestForkTip = nullptr, *pindexBestForkBase = nullptr; | |
static void AlertNotify(const std::string& strMessage) | |
{ | |
uiInterface.NotifyAlertChanged(); | |
std::string strCmd = gArgs.GetArg("-alertnotify", ""); | |
if (strCmd.empty()) return; | |
// Alert text should be plain ascii coming from a trusted source, but to | |
// be safe we first strip anything not in safeChars, then add single quotes around | |
// the whole string before passing it to the shell: | |
std::string singleQuote("'"); | |
std::string safeStatus = SanitizeString(strMessage); | |
safeStatus = singleQuote+safeStatus+singleQuote; | |
boost::replace_all(strCmd, "%s", safeStatus); | |
std::thread t(runCommand, strCmd); | |
t.detach(); // thread runs free | |
} | |
static void CheckForkWarningConditions() | |
{ | |
AssertLockHeld(cs_main); | |
// Before we get past initial download, we cannot reliably alert about forks | |
// (we assume we don't get stuck on a fork before finishing our initial sync) | |
if (IsInitialBlockDownload()) | |
return; | |
// If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it) | |
// of our head, drop it | |
if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->nHeight >= 72) | |
pindexBestForkTip = nullptr; | |
if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > chainActive.Tip()->nChainWork + (GetBlockProof(*chainActive.Tip()) * 6))) | |
{ | |
if (!GetfLargeWorkForkFound() && pindexBestForkBase) | |
{ | |
std::string warning = std::string("'Warning: Large-work fork detected, forking after block ") + | |
pindexBestForkBase->phashBlock->ToString() + std::string("'"); | |
AlertNotify(warning); | |
} | |
if (pindexBestForkTip && pindexBestForkBase) | |
{ | |
LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__, | |
pindexBestForkBase->nHeight, pindexBestForkBase->phashBlock->ToString(), | |
pindexBestForkTip->nHeight, pindexBestForkTip->phashBlock->ToString()); | |
SetfLargeWorkForkFound(true); | |
} | |
else | |
{ | |
LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__); | |
SetfLargeWorkInvalidChainFound(true); | |
} | |
} | |
else | |
{ | |
SetfLargeWorkForkFound(false); | |
SetfLargeWorkInvalidChainFound(false); | |
} | |
} | |
static void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip) | |
{ | |
AssertLockHeld(cs_main); | |
// If we are on a fork that is sufficiently large, set a warning flag | |
CBlockIndex* pfork = pindexNewForkTip; | |
CBlockIndex* plonger = chainActive.Tip(); | |
while (pfork && pfork != plonger) | |
{ | |
while (plonger && plonger->nHeight > pfork->nHeight) | |
plonger = plonger->pprev; | |
if (pfork == plonger) | |
break; | |
pfork = pfork->pprev; | |
} | |
// We define a condition where we should warn the user about as a fork of at least 7 blocks | |
// with a tip within 72 blocks (+/- 12 hours if no one mines it) of ours | |
// We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network | |
// hash rate operating on the fork. | |
// or a chain that is entirely longer than ours and invalid (note that this should be detected by both) | |
// We define it this way because it allows us to only store the highest fork tip (+ base) which meets | |
// the 7-block condition and from this always have the most-likely-to-cause-warning fork | |
if (pfork && (!pindexBestForkTip || pindexNewForkTip->nHeight > pindexBestForkTip->nHeight) && | |
pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) && | |
chainActive.Height() - pindexNewForkTip->nHeight < 72) | |
{ | |
pindexBestForkTip = pindexNewForkTip; | |
pindexBestForkBase = pfork; | |
} | |
CheckForkWarningConditions(); | |
} | |
void static InvalidChainFound(CBlockIndex* pindexNew) | |
{ | |
if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork) | |
pindexBestInvalid = pindexNew; | |
LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__, | |
pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, | |
log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime())); | |
CBlockIndex *tip = chainActive.Tip(); | |
assert (tip); | |
LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__, | |
tip->GetBlockHash().ToString(), chainActive.Height(), log(tip->nChainWork.getdouble())/log(2.0), | |
FormatISO8601DateTime(tip->GetBlockTime())); | |
CheckForkWarningConditions(); | |
} | |
void CChainState::InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) { | |
if (!state.CorruptionPossible()) { | |
pindex->nStatus |= BLOCK_FAILED_VALID; | |
m_failed_blocks.insert(pindex); | |
setDirtyBlockIndex.insert(pindex); | |
setBlockIndexCandidates.erase(pindex); | |
InvalidChainFound(pindex); | |
} | |
} | |
void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight) | |
{ | |
// mark inputs spent | |
if (!tx.IsCoinBase()) { | |
txundo.vprevout.reserve(tx.vin.size()); | |
Coin coin; | |
for (const CTxIn &txin : tx.vin) { | |
bool is_spent = inputs.SpendCoin(txin.prevout, &coin); | |
assert(is_spent); | |
txundo.vprevout.emplace_back(coin.out, coin.IsCoinBase(), int(coin.nHeight)); | |
} | |
} | |
// add outputs | |
AddCoins(inputs, tx, nHeight); | |
} | |
void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight) | |
{ | |
CTxUndo txundo; | |
UpdateCoins(tx, inputs, txundo, nHeight); | |
} | |
bool CScriptCheck::operator()() { | |
const CScript &scriptSig = ptxTo->vin[nIn].scriptSig; | |
const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness; | |
return VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *txdata), &error); | |
} | |
int GetSpendHeight(const CCoinsViewCache& inputs) | |
{ | |
LOCK(cs_main); | |
CBlockIndex* pindexPrev = LookupBlockIndex(inputs.GetBestBlock()); | |
return pindexPrev->nHeight + 1; | |
} | |
static CuckooCache::cache<uint256, SignatureCacheHasher> scriptExecutionCache; | |
static uint256 scriptExecutionCacheNonce(GetRandHash()); | |
void InitScriptExecutionCache() { | |
// nMaxCacheSize is unsigned. If -maxsigcachesize is set to zero, | |
// setup_bytes creates the minimum possible cache (2 elements). | |
size_t nMaxCacheSize = std::min(std::max((int64_t)0, gArgs.GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) / 2), MAX_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20); | |
size_t nElems = scriptExecutionCache.setup_bytes(nMaxCacheSize); | |
LogPrintf("Using %zu MiB out of %zu/2 requested for script execution cache, able to store %zu elements\n", | |
(nElems*sizeof(uint256)) >>20, (nMaxCacheSize*2)>>20, nElems); | |
} | |
/** | |
* Check whether all inputs of this transaction are valid (no double spends, scripts & sigs, amounts) | |
* This does not modify the UTXO set. | |
* | |
* If pvChecks is not nullptr, script checks are pushed onto it instead of being performed inline. Any | |
* script checks which are not necessary (eg due to script execution cache hits) are, obviously, | |
* not pushed onto pvChecks/run. | |
* | |
* Setting cacheSigStore/cacheFullScriptStore to false will remove elements from the corresponding cache | |
* which are matched. This is useful for checking blocks where we will likely never need the cache | |
* entry again. | |
* | |
* Non-static (and re-declared) in src/test/txvalidationcache_tests.cpp | |
*/ | |
bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks) | |
{ | |
if (!tx.IsCoinBase()) | |
{ | |
if (pvChecks) | |
pvChecks->reserve(tx.vin.size()); | |
// The first loop above does all the inexpensive checks. | |
// Only if ALL inputs pass do we perform expensive ECDSA signature checks. | |
// Helps prevent CPU exhaustion attacks. | |
// Skip script verification when connecting blocks under the | |
// assumevalid block. Assuming the assumevalid block is valid this | |
// is safe because block merkle hashes are still computed and checked, | |
// Of course, if an assumed valid block is invalid due to false scriptSigs | |
// this optimization would allow an invalid chain to be accepted. | |
if (fScriptChecks) { | |
// First check if script executions have been cached with the same | |
// flags. Note that this assumes that the inputs provided are | |
// correct (ie that the transaction hash which is in tx's prevouts | |
// properly commits to the scriptPubKey in the inputs view of that | |
// transaction). | |
uint256 hashCacheEntry; | |
// We only use the first 19 bytes of nonce to avoid a second SHA | |
// round - giving us 19 + 32 + 4 = 55 bytes (+ 8 + 1 = 64) | |
static_assert(55 - sizeof(flags) - 32 >= 128/8, "Want at least 128 bits of nonce for script execution cache"); | |
CSHA256().Write(scriptExecutionCacheNonce.begin(), 55 - sizeof(flags) - 32).Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin()); | |
AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks | |
if (scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) { | |
return true; | |
} | |
for (unsigned int i = 0; i < tx.vin.size(); i++) { | |
const COutPoint &prevout = tx.vin[i].prevout; | |
const Coin& coin = inputs.AccessCoin(prevout); | |
assert(!coin.IsSpent()); | |
// We very carefully only pass in things to CScriptCheck which | |
// are clearly committed to by tx' witness hash. This provides | |
// a sanity check that our caching is not introducing consensus | |
// failures through additional data in, eg, the coins being | |
// spent being checked as a part of CScriptCheck. | |
// Verify signature | |
CScriptCheck check(coin.out, tx, i, flags, cacheSigStore, &txdata); | |
if (pvChecks) { | |
pvChecks->push_back(CScriptCheck()); | |
check.swap(pvChecks->back()); | |
} else if (!check()) { | |
if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) { | |
// Check whether the failure was caused by a | |
// non-mandatory script verification check, such as | |
// non-standard DER encodings or non-null dummy | |
// arguments; if so, don't trigger DoS protection to | |
// avoid splitting the network between upgraded and | |
// non-upgraded nodes. | |
CScriptCheck check2(coin.out, tx, i, | |
flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata); | |
if (check2()) | |
return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError()))); | |
} | |
// Failures of other flags indicate a transaction that is | |
// invalid in new blocks, e.g. an invalid P2SH. We DoS ban | |
// such nodes as they are not following the protocol. That | |
// said during an upgrade careful thought should be taken | |
// as to the correct behavior - we may want to continue | |
// peering with non-upgraded nodes even after soft-fork | |
// super-majority signaling has occurred. | |
return state.DoS(100,false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError()))); | |
} | |
} | |
if (cacheFullScriptStore && !pvChecks) { | |
// We executed all of the provided scripts, and were told to | |
// cache the result. Do so now. | |
scriptExecutionCache.insert(hashCacheEntry); | |
} | |
} | |
} | |
return true; | |
} | |
bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex *pindex) | |
{ | |
CDiskBlockPos pos = pindex->GetUndoPos(); | |
if (pos.IsNull()) { | |
return error("%s: no undo data available", __func__); | |
} | |
// Open history file to read | |
CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION); | |
if (filein.IsNull()) | |
return error("%s: OpenUndoFile failed", __func__); | |
// Read block | |
uint256 hashChecksum; | |
CHashVerifier<CAutoFile> verifier(&filein); // We need a CHashVerifier as reserializing may lose data | |
try { | |
verifier << pindex->pprev->GetBlockHash(); | |
verifier >> blockundo; | |
filein >> hashChecksum; | |
} | |
catch (const std::exception& e) { | |
return error("%s: Deserialize or I/O error - %s", __func__, e.what()); | |
} | |
// Verify checksum | |
if (hashChecksum != verifier.GetHash()) | |
return error("%s: Checksum mismatch", __func__); | |
return true; | |
} | |
namespace { | |
bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart) | |
{ | |
// Open history file to append | |
CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION); | |
if (fileout.IsNull()) | |
return error("%s: OpenUndoFile failed", __func__); | |
// Write index header | |
unsigned int nSize = GetSerializeSize(fileout, blockundo); | |
fileout << messageStart << nSize; | |
// Write undo data | |
long fileOutPos = ftell(fileout.Get()); | |
if (fileOutPos < 0) | |
return error("%s: ftell failed", __func__); | |
pos.nPos = (unsigned int)fileOutPos; | |
fileout << blockundo; | |
// calculate & write checksum | |
CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); | |
hasher << hashBlock; | |
hasher << blockundo; | |
fileout << hasher.GetHash(); | |
return true; | |
} | |
/** Abort with a message */ | |
static bool AbortNode(const std::string& strMessage, const std::string& userMessage="") | |
{ | |
SetMiscWarning(strMessage); | |
LogPrintf("*** %s\n", strMessage); | |
uiInterface.ThreadSafeMessageBox( | |
userMessage.empty() ? "Error: A fatal internal error occurred, see debug.log for details. System message: " + strMessage : userMessage, | |
"", CClientUIInterface::MSG_ERROR); | |
StartShutdown(); | |
return false; | |
} | |
static bool AbortNode(CValidationState& state, const std::string& strMessage, const std::string& userMessage="") | |
{ | |
AbortNode(strMessage, userMessage); | |
return state.Error(strMessage); | |
} | |
} // namespace | |
/** | |
* Restore the UTXO in a Coin at a given COutPoint | |
* @param undo The Coin to be restored. | |
* @param view The coins view to which to apply the changes. | |
* @param view The claim trieCache to which to apply the changes. | |
* @param out The out point that corresponds to the tx input. | |
* @return A DisconnectResult as an int | |
*/ | |
int ApplyTxInUndo(unsigned int index, CTxUndo& txUndo, CCoinsViewCache& view, CClaimTrieCache& trieCache, const COutPoint& out) | |
{ | |
auto& undo = txUndo.vprevout[index]; | |
bool fClean = true; | |
if (view.HaveCoin(out)) fClean = false; // overwriting transaction output | |
if (undo.nHeight == 0) { | |
// Missing undo metadata (height and coinbase, not txout). Older versions included this | |
// information only in undo records for the last spend of a transactions' | |
// outputs. This implies that it must be present for some other output of the same tx. | |
const Coin& alternate = AccessByTxid(view, out.hash); | |
if (!alternate.IsSpent()) { | |
undo.nHeight = alternate.nHeight; | |
undo.fCoinBase = alternate.fCoinBase; | |
} else { | |
return DISCONNECT_FAILED; // adding output for transaction without known metadata | |
} | |
} | |
// restore claim if applicable | |
if (undo.fIsClaim && !undo.txout.scriptPubKey.empty()) { | |
auto nValidHeight = undo.nClaimValidHeight; | |
if (nValidHeight > 0 && nValidHeight >= undo.nHeight) { | |
CClaimScriptUndoSpendOp undoSpend(COutPoint(out.hash, out.n), undo.txout.nValue, undo.nHeight, nValidHeight); | |
ProcessClaim(undoSpend, trieCache, undo.txout.scriptPubKey); | |
} else { | |
LogPrintf("%s: (txid: %s, nOut: %d) Not restoring claim/support to the claim trie because it expired before it was spent\n", __func__, out.hash.ToString(), out.n); | |
LogPrintf("%s: nValidHeight = %d, undo.nHeight = %d, nCurrentHeight = %d\n", __func__, nValidHeight, undo.nHeight, chainActive.Height()); | |
} | |
} | |
// The potential_overwrite parameter to AddCoin is only allowed to be false if we know for | |
// sure that the coin did not already exist in the cache. As we have queried for that above | |
// using HaveCoin, we don't need to guess. When fClean is false, a coin already existed and | |
// it is an overwrite. | |
Coin coin(undo.txout, int(undo.nHeight), undo.fCoinBase); | |
view.AddCoin(out, std::move(coin), !fClean); | |
return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN; | |
} | |
/** Undo the effects of this block (with given index) on the UTXO set represented by coins. | |
* When FAILED is returned, view is left in an indeterminate state. */ | |
DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view, CClaimTrieCache& trieCache) | |
{ | |
assert(pindex->GetBlockHash() == view.GetBestBlock()); | |
if (pindex->hashClaimTrie != trieCache.getMerkleHash()) { | |
LogPrintf("%s: Indexed claim hash doesn't match current: %s vs %s\n", | |
__func__, pindex->hashClaimTrie.ToString(), trieCache.getMerkleHash().ToString()); | |
assert(false); | |
} | |
bool fClean = true; | |
CBlockUndo blockUndo; | |
if (!UndoReadFromDisk(blockUndo, pindex)) { | |
error("DisconnectBlock(): failure reading undo data"); | |
return DISCONNECT_FAILED; | |
} | |
if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) { | |
error("DisconnectBlock(): block and undo data inconsistent"); | |
return DISCONNECT_FAILED; | |
} | |
const bool decremented = trieCache.decrementBlock(blockUndo.insertUndo, blockUndo.expireUndo, blockUndo.insertSupportUndo, blockUndo.expireSupportUndo); | |
assert(decremented); | |
// undo transactions in reverse order | |
for (int i = block.vtx.size() - 1; i >= 0; i--) { | |
const CTransaction &tx = *(block.vtx[i]); | |
uint256 hash = tx.GetHash(); | |
bool is_coinbase = tx.IsCoinBase(); | |
// Check that all outputs are available and match the outputs in the block itself | |
// exactly. | |
for (size_t o = 0; o < tx.vout.size(); o++) { | |
if (!tx.vout[o].scriptPubKey.IsUnspendable()) { | |
COutPoint out(hash, o); | |
Coin coin; | |
bool is_spent = view.SpendCoin(out, &coin); | |
if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) { | |
fClean = false; // transaction output mismatch | |
} | |
} | |
} | |
// remove any claims | |
for (size_t j = 0; j < tx.vout.size(); j++) | |
{ | |
const CTxOut& txout = tx.vout[j]; | |
if (!txout.scriptPubKey.empty()) { | |
CClaimScriptUndoAddOp undoAdd(COutPoint(hash, j), pindex->nHeight); | |
ProcessClaim(undoAdd, trieCache, txout.scriptPubKey); | |
} | |
} | |
// restore inputs | |
if (i > 0) { // not coinbases | |
CTxUndo &txundo = blockUndo.vtxundo[i-1]; | |
if (txundo.vprevout.size() != tx.vin.size()) { | |
error("DisconnectBlock(): transaction and undo data inconsistent"); | |
return DISCONNECT_FAILED; | |
} | |
for (unsigned int j = tx.vin.size(); j-- > 0;) { | |
const COutPoint &out = tx.vin[j].prevout; | |
int res = ApplyTxInUndo(j, txundo, view, trieCache, out); | |
if (res == DISCONNECT_FAILED) return DISCONNECT_FAILED; | |
fClean = fClean && res != DISCONNECT_UNCLEAN; | |
} | |
// At this point, all of txundo.vprevout should have been moved out. | |
// | |
// Note: This comment is no longer true, but doesn't | |
// affect anything either since it's no longer accessed. | |
} | |
} | |
// move best block pointer to prevout block | |
view.SetBestBlock(pindex->pprev->GetBlockHash()); | |
assert(trieCache.finalizeDecrement(blockUndo.takeoverHeightUndo)); | |
auto merkleHash = trieCache.getMerkleHash(); | |
if (merkleHash != pindex->pprev->hashClaimTrie) { | |
if (!trieCache.empty()) | |
trieCache.dumpToLog(trieCache.find({})); | |
LogPrintf("Hash comparison failure at block %d\n", pindex->nHeight); | |
assert(merkleHash == pindex->pprev->hashClaimTrie); | |
} | |
return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN; | |
} | |
void static FlushBlockFile(bool fFinalize = false) | |
{ | |
LOCK(cs_LastBlockFile); | |
CDiskBlockPos posOld(nLastBlockFile, 0); | |
bool status = true; | |
FILE *fileOld = OpenBlockFile(posOld); | |
if (fileOld) { | |
if (fFinalize) | |
status &= TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize); | |
status &= FileCommit(fileOld); | |
fclose(fileOld); | |
} | |
fileOld = OpenUndoFile(posOld); | |
if (fileOld) { | |
if (fFinalize) | |
status &= TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize); | |
status &= FileCommit(fileOld); | |
fclose(fileOld); | |
} | |
if (!status) { | |
AbortNode("Flushing block file to disk failed. This is likely the result of an I/O error."); | |
} | |
} | |
static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize); | |
static bool WriteUndoDataForBlock(const CBlockUndo& blockundo, CValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams) | |
{ | |
// Write undo information to disk | |
if (pindex->GetUndoPos().IsNull()) { | |
CDiskBlockPos _pos; | |
if (!FindUndoPos(state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) + 40)) | |
return error("WriteUndoDataForBlock(): FindUndoPos failed"); | |
if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart())) | |
return AbortNode(state, "Failed to write undo data"); | |
// update nUndoPos in block index | |
pindex->nUndoPos = _pos.nPos; | |
pindex->nStatus |= BLOCK_HAVE_UNDO; | |
setDirtyBlockIndex.insert(pindex); | |
} | |
return true; | |
} | |
static CCheckQueue<CScriptCheck> scriptcheckqueue(128); | |
void ThreadScriptCheck() { | |
RenameThread("lbrycrd-scriptch"); | |
scriptcheckqueue.Thread(); | |
} | |
// Protected by cs_main | |
VersionBitsCache versionbitscache; | |
int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params) | |
{ | |
LOCK(cs_main); | |
int32_t nVersion = VERSIONBITS_TOP_BITS; | |
for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) { | |
ThresholdState state = VersionBitsState(pindexPrev, params, static_cast<Consensus::DeploymentPos>(i), versionbitscache); | |
if (state == ThresholdState::LOCKED_IN || state == ThresholdState::STARTED) { | |
nVersion |= VersionBitsMask(params, static_cast<Consensus::DeploymentPos>(i)); | |
} | |
} | |
return nVersion; | |
} | |
/** | |
* Threshold condition checker that triggers when unknown versionbits are seen on the network. | |
*/ | |
class WarningBitsConditionChecker : public AbstractThresholdConditionChecker | |
{ | |
private: | |
int bit; | |
public: | |
explicit WarningBitsConditionChecker(int bitIn) : bit(bitIn) {} | |
int64_t BeginTime(const Consensus::Params& params) const override { return 0; } | |
int64_t EndTime(const Consensus::Params& params) const override { return std::numeric_limits<int64_t>::max(); } | |
int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; } | |
int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; } | |
bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override | |
{ | |
return ((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) && | |
((pindex->nVersion >> bit) & 1) != 0 && | |
((ComputeBlockVersion(pindex->pprev, params) >> bit) & 1) == 0; | |
} | |
}; | |
// Protected by cs_main | |
static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS]; | |
// 0.13.0 was shipped with a segwit deployment defined for testnet, but not for | |
// mainnet. We no longer need to support disabling the segwit deployment | |
// except for testing purposes, due to limitations of the functional test | |
// environment. See test/functional/p2p-segwit.py. | |
static bool IsScriptWitnessEnabled(const Consensus::Params& params) | |
{ | |
return params.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout != 0; | |
} | |
static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) { | |
AssertLockHeld(cs_main); | |
unsigned int flags = SCRIPT_VERIFY_NONE; | |
// BIP16 didn't become active until Apr 1 2012 (on mainnet, and | |
// retroactively applied to testnet) | |
// However, only one historical block violated the P2SH rules (on both | |
// mainnet and testnet), so for simplicity, always leave P2SH | |
// on except for the one violating block. | |
if (consensusparams.BIP16Exception.IsNull() || // no bip16 exception on this chain | |
pindex->phashBlock == nullptr || // this is a new candidate block, eg from TestBlockValidity() | |
*pindex->phashBlock != consensusparams.BIP16Exception) // this block isn't the historical exception | |
{ | |
flags |= SCRIPT_VERIFY_P2SH; | |
} | |
// Enforce WITNESS rules whenever P2SH is in effect (and the segwit | |
// deployment is defined). | |
if (flags & SCRIPT_VERIFY_P2SH && IsScriptWitnessEnabled(consensusparams)) { | |
flags |= SCRIPT_VERIFY_WITNESS; | |
} | |
// Start enforcing the DERSIG (BIP66) rule | |
if (pindex->nHeight >= consensusparams.BIP66Height) { | |
flags |= SCRIPT_VERIFY_DERSIG; | |
} | |
// Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule | |
if (pindex->nHeight >= consensusparams.BIP65Height) { | |
flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY; | |
} | |
// Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic. | |
if (VersionBitsState(pindex->pprev, consensusparams, Consensus::DEPLOYMENT_CSV, versionbitscache) == ThresholdState::ACTIVE) { | |
flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY; | |
} | |
if (IsNullDummyEnabled(pindex->pprev, consensusparams)) { | |
flags |= SCRIPT_VERIFY_NULLDUMMY; | |
} | |
return flags; | |
} | |
static int64_t nTimeCheck = 0; | |
static int64_t nTimeForks = 0; | |
static int64_t nTimeVerify = 0; | |
static int64_t nTimeConnect = 0; | |
static int64_t nTimeIndex = 0; | |
static int64_t nTimeCallbacks = 0; | |
static int64_t nTimeTotal = 0; | |
static int64_t nBlocksTotal = 0; | |
/** Apply the effects of this block (with given index) on the UTXO set represented by coins. | |
* Validity checks that depend on the UTXO set are also done; ConnectBlock() | |
* can fail if those validity checks fail (among other reasons). */ | |
bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pindex, | |
CCoinsViewCache& view, CClaimTrieCache& trieCache, const CChainParams& chainparams, bool fJustCheck) | |
{ | |
AssertLockHeld(cs_main); | |
assert(pindex); | |
assert(*pindex->phashBlock == block.GetHash()); | |
int64_t nTimeStart = GetTimeMicros(); | |
// Check it again in case a previous version let a bad block in | |
// NOTE: We don't currently (re-)invoke ContextualCheckBlock() or | |
// ContextualCheckBlockHeader() here. This means that if we add a new | |
// consensus rule that is enforced in one of those two functions, then we | |
// may have let in a block that violates the rule prior to updating the | |
// software, and we would NOT be enforcing the rule here. Fully solving | |
// upgrade from one software version to the next after a consensus rule | |
// change is potentially tricky and issue-specific (see RewindBlockIndex() | |
// for one general approach that was used for BIP 141 deployment). | |
// Also, currently the rule against blocks more than 2 hours in the future | |
// is enforced in ContextualCheckBlockHeader(); we wouldn't want to | |
// re-enforce that rule here (at least until we make it impossible for | |
// GetAdjustedTime() to go backward). | |
if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck)) { | |
if (state.CorruptionPossible()) { | |
// We don't write down blocks to disk if they may have been | |
// corrupted, so this should be impossible unless we're having hardware | |
// problems. | |
return AbortNode(state, "Corrupt block found indicating potential hardware failure; shutting down"); | |
} | |
return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state)); | |
} | |
// verify that the view's current state corresponds to the previous block | |
uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash(); | |
assert(hashPrevBlock == view.GetBestBlock()); | |
// also verify that the trie cache's current state corresponds to the previous block | |
if (pindex->pprev != nullptr && pindex->pprev->hashClaimTrie != trieCache.getMerkleHash()) { | |
LogPrintf("%s: Previous block claim hash doesn't match current: %s vs %s\n", | |
__func__, pindex->pprev->hashClaimTrie.ToString(), trieCache.getMerkleHash().ToString()); | |
assert(false); | |
} | |
// Special case for the genesis block, skipping connection of its transactions | |
// (its coinbase is unspendable) | |
if (block.GetHash() == chainparams.GetConsensus().hashGenesisBlock) { | |
if (!fJustCheck) | |
{ | |
view.SetBestBlock(pindex->GetBlockHash()); | |
} | |
/* return true; */ | |
} | |
nBlocksTotal++; | |
bool fScriptChecks = true; | |
if (!hashAssumeValid.IsNull()) { | |
// We've been configured with the hash of a block which has been externally verified to have a valid history. | |
// A suitable default value is included with the software and updated from time to time. Because validity | |
// relative to a piece of software is an objective fact these defaults can be easily reviewed. | |
// This setting doesn't force the selection of any particular chain but makes validating some faster by | |
// effectively caching the result of part of the verification. | |
BlockMap::const_iterator it = mapBlockIndex.find(hashAssumeValid); | |
if (it != mapBlockIndex.end()) { | |
if (it->second->GetAncestor(pindex->nHeight) == pindex && | |
pindexBestHeader->GetAncestor(pindex->nHeight) == pindex && | |
pindexBestHeader->nChainWork >= nMinimumChainWork) { | |
// This block is a member of the assumed verified chain and an ancestor of the best header. | |
// The equivalent time check discourages hash power from extorting the network via DOS attack | |
// into accepting an invalid block through telling users they must manually set assumevalid. | |
// Requiring a software change or burying the invalid block, regardless of the setting, makes | |
// it hard to hide the implication of the demand. This also avoids having release candidates | |
// that are hardly doing any signature verification at all in testing without having to | |
// artificially set the default assumed verified block further back. | |
// The test against nMinimumChainWork prevents the skipping when denied access to any chain at | |
// least as good as the expected chain. | |
fScriptChecks = (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, chainparams.GetConsensus()) <= 60 * 60 * 24 * 7 * 2); | |
} | |
} | |
} | |
int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart; | |
LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, nTimeCheck * MILLI / nBlocksTotal); | |
// Do not allow blocks that contain transactions which 'overwrite' older transactions, | |
// unless those are already completely spent. | |
// If such overwrites are allowed, coinbases and transactions depending upon those | |
// can be duplicated to remove the ability to spend the first instance -- even after | |
// being sent to another address. | |
// See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information. | |
// This logic is not necessary for memory pool transactions, as AcceptToMemoryPool | |
// already refuses previously-known transaction ids entirely. | |
// This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC. | |
// Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the | |
// two in the chain that violate it. This prevents exploiting the issue against nodes during their | |
// initial block download. | |
bool fEnforceBIP30 = !((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) || | |
(pindex->nHeight==91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721"))); | |
// Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting | |
// with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the | |
// time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first | |
// before the first had been spent. Since those coinbases are sufficiently buried it's no longer possible to create further | |
// duplicate transactions descending from the known pairs either. | |
// If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check. | |
// BIP34 requires that a block at height X (block X) has its coinbase | |
// scriptSig start with a CScriptNum of X (indicated height X). The above | |
// logic of no longer requiring BIP30 once BIP34 activates is flawed in the | |
// case that there is a block X before the BIP34 height of 227,931 which has | |
// an indicated height Y where Y is greater than X. The coinbase for block | |
// X would also be a valid coinbase for block Y, which could be a BIP30 | |
// violation. An exhaustive search of all mainnet coinbases before the | |
// BIP34 height which have an indicated height greater than the block height | |
// reveals many occurrences. The 3 lowest indicated heights found are | |
// 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3 | |
// heights would be the first opportunity for BIP30 to be violated. | |
// The search reveals a great many blocks which have an indicated height | |
// greater than 1,983,702, so we simply remove the optimization to skip | |
// BIP30 checking for blocks at height 1,983,702 or higher. Before we reach | |
// that block in another 25 years or so, we should take advantage of a | |
// future consensus change to do a new and improved version of BIP34 that | |
// will actually prevent ever creating any duplicate coinbases in the | |
// future. | |
static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702; | |
// There is no potential to create a duplicate coinbase at block 209,921 | |
// because this is still before the BIP34 height and so explicit BIP30 | |
// checking is still active. | |
// The final case is block 176,684 which has an indicated height of | |
// 490,897. Unfortunately, this issue was not discovered until about 2 weeks | |
// before block 490,897 so there was not much opportunity to address this | |
// case other than to carefully analyze it and determine it would not be a | |
// problem. Block 490,897 was, in fact, mined with a different coinbase than | |
// block 176,684, but it is important to note that even if it hadn't been or | |
// is remined on an alternate fork with a duplicate coinbase, we would still | |
// not run into a BIP30 violation. This is because the coinbase for 176,684 | |
// is spent in block 185,956 in transaction | |
// d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This | |
// spending transaction can't be duplicated because it also spends coinbase | |
// 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This | |
// coinbase has an indicated height of over 4.2 billion, and wouldn't be | |
// duplicatable until that height, and it's currently impossible to create a | |
// chain that long. Nevertheless we may wish to consider a future soft fork | |
// which retroactively prevents block 490,897 from creating a duplicate | |
// coinbase. The two historical BIP30 violations often provide a confusing | |
// edge case when manipulating the UTXO and it would be simpler not to have | |
// another edge case to deal with. | |
// testnet3 has no blocks before the BIP34 height with indicated heights | |
// post BIP34 before approximately height 486,000,000 and presumably will | |
// be reset before it reaches block 1,983,702 and starts doing unnecessary | |
// BIP30 checking again. | |
if (pindex->pprev) | |
{ | |
CBlockIndex *pindexBIP34height = pindex->pprev->GetAncestor(chainparams.GetConsensus().BIP34Height); | |
//Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond. | |
fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == chainparams.GetConsensus().BIP34Hash)); | |
// TODO: Remove BIP30 checking from block height 1,983,702 on, once we have a | |
// consensus change that ensures coinbases at those heights can not | |
// duplicate earlier coinbases. | |
if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) { | |
for (const auto& tx : block.vtx) { | |
for (size_t o = 0; o < tx->vout.size(); o++) { | |
if (view.HaveCoin(COutPoint(tx->GetHash(), o))) { | |
return state.DoS(100, error("ConnectBlock(): tried to overwrite transaction"), | |
REJECT_INVALID, "bad-txns-BIP30"); | |
} | |
} | |
} | |
} | |
} | |
// Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic. | |
int nLockTimeFlags = 0; | |
if (VersionBitsState(pindex->pprev, chainparams.GetConsensus(), Consensus::DEPLOYMENT_CSV, versionbitscache) == ThresholdState::ACTIVE) { | |
nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE; | |
} | |
// Get the script flags for this block | |
unsigned int flags = GetBlockScriptFlags(pindex, chainparams.GetConsensus()); | |
int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1; | |
LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime2 - nTime1), nTimeForks * MICRO, nTimeForks * MILLI / nBlocksTotal); | |
CBlockUndo blockundo; | |
CCheckQueueControl<CScriptCheck> control(fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : nullptr); | |
trieCache.initializeIncrement(); | |
std::vector<int> prevheights; | |
CAmount nFees = 0; | |
int nInputs = 0; | |
int64_t nSigOpsCost = 0; | |
CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size())); | |
std::vector<std::pair<uint256, CDiskTxPos> > vPos; | |
vPos.reserve(block.vtx.size()); | |
blockundo.vtxundo.reserve(block.vtx.size() - 1); | |
std::vector<PrecomputedTransactionData> txdata; | |
txdata.reserve(block.vtx.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated | |
for (unsigned int i = 0; i < block.vtx.size(); i++) | |
{ | |
const CTransaction &tx = *(block.vtx[i]); | |
std::map<unsigned int, unsigned int> mClaimUndoHeights; | |
nInputs += tx.vin.size(); | |
if (!tx.IsCoinBase()) | |
{ | |
CAmount txfee = 0; | |
if (!Consensus::CheckTxInputs(tx, state, view, pindex->nHeight, txfee)) { | |
return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state)); | |
} | |
nFees += txfee; | |
if (!MoneyRange(nFees)) { | |
return state.DoS(100, error("%s: accumulated fee in the block out of range.", __func__), | |
REJECT_INVALID, "bad-txns-accumulated-fee-outofrange"); | |
} | |
// Check that transaction is BIP68 final | |
// BIP68 lock checks (as opposed to nLockTime checks) must | |
// be in ConnectBlock because they require the UTXO set | |
prevheights.resize(tx.vin.size()); | |
for (size_t j = 0; j < tx.vin.size(); j++) { | |
prevheights[j] = view.AccessCoin(tx.vin[j].prevout).nHeight; | |
} | |
if (!SequenceLocks(tx, nLockTimeFlags, &prevheights, *pindex)) { | |
return state.DoS(100, error("%s: contains a non-BIP68-final transaction", __func__), | |
REJECT_INVALID, "bad-txns-nonfinal"); | |
} | |
} | |
// GetTransactionSigOpCost counts 3 types of sigops: | |
// * legacy (always) | |
// * p2sh (when P2SH enabled in flags and excludes coinbase) | |
// * witness (when witness enabled in flags and excludes coinbase) | |
nSigOpsCost += GetTransactionSigOpCost(tx, view, flags); | |
if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST) | |
return state.DoS(100, error("ConnectBlock(): too many sigops"), | |
REJECT_INVALID, "bad-blk-sigops"); | |
txdata.emplace_back(tx); | |
if (!tx.IsCoinBase()) | |
{ | |
std::vector<CScriptCheck> vChecks; | |
bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */ | |
if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : nullptr)) | |
return error("ConnectBlock(): CheckInputs on %s failed with %s", | |
tx.GetHash().ToString(), FormatStateMessage(state)); | |
control.Add(vChecks); | |
CUpdateCacheCallbacks callbacks = { | |
.findScriptKey = {}, | |
.claimUndoHeights = [&mClaimUndoHeights](int index, int nValidAtHeight) { | |
mClaimUndoHeights.emplace(index, nValidAtHeight); | |
} | |
}; | |
UpdateCache(tx, trieCache, view, pindex->nHeight, callbacks); | |
} | |
CTxUndo undoDummy; | |
if (i > 0) { | |
blockundo.vtxundo.push_back(CTxUndo()); | |
} | |
UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight); | |
if (i > 0 && !mClaimUndoHeights.empty()) | |
{ | |
auto& txinUndos = blockundo.vtxundo.back().vprevout; | |
for (auto itHeight = mClaimUndoHeights.begin(); itHeight != mClaimUndoHeights.end(); ++itHeight) | |
{ | |
txinUndos[itHeight->first].nClaimValidHeight = itHeight->second; | |
txinUndos[itHeight->first].fIsClaim = true; | |
} | |
} | |
// The CTxUndo vector contains the heights at which claims should be put into the trie. | |
// This is necessary because some claims are inserted immediately into the trie, and | |
// others are inserted after a delay, depending on the state of the claim trie at the time | |
// that the claim was originally inserted into the blockchain. That state will not be | |
// available when and if this block is disconnected. | |
// It also contains whether or not any given txin represents a claim that should | |
// be put back into the trie. If we didn't find a claim or support in the trie | |
// or cache when trying to spend it, we shouldn't try to put a claim or support back | |
// in. Some OP_UPDATE_CLAIM's, for example, may be invalid, and so may never have been | |
// inserted into the trie in the first place. | |
vPos.push_back(std::make_pair(tx.GetHash(), pos)); | |
pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION); | |
} | |
// TODO: if the "just check" flag is set, we should reduce the work done here. Incrementing blocks twice per mine is not efficient. | |
const auto incremented = trieCache.incrementBlock(blockundo.insertUndo, blockundo.expireUndo, blockundo.insertSupportUndo, blockundo.expireSupportUndo, blockundo.takeoverHeightUndo); | |
assert(incremented); | |
if (trieCache.getMerkleHash() != block.hashClaimTrie) | |
{ | |
if (!trieCache.empty()) // we could run checkConsistency here, but it would take a while | |
trieCache.dumpToLog(trieCache.find({})); | |
return state.DoS(100, error("ConnectBlock() : the merkle root of the claim trie does not match " | |
"(actual=%s vs block=%s on height=%d)", trieCache.getMerkleHash().GetHex(), | |
block.hashClaimTrie.GetHex(), pindex->nHeight), REJECT_INVALID, "bad-claim-merkle-hash"); | |
} | |
int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2; | |
LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal); | |
CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus()); | |
if (block.vtx[0]->GetValueOut() > blockReward) | |
return state.DoS(100, | |
error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)", | |
block.vtx[0]->GetValueOut(), blockReward), | |
REJECT_INVALID, "bad-cb-amount"); | |
if (!control.Wait()) | |
return state.DoS(100, error("%s: CheckQueue failed", __func__), REJECT_INVALID, "block-validation-failed"); | |
int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2; | |
LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal); | |
if (fJustCheck) | |
return true; | |
if (pindex->pprev != nullptr && | |
!WriteUndoDataForBlock(blockundo, state, pindex, chainparams) && | |
!pblocktree->WriteTxIndex(vPos)) | |
return false; | |
if (!pindex->IsValid(BLOCK_VALID_SCRIPTS)) { | |
pindex->RaiseValidity(BLOCK_VALID_SCRIPTS); | |
setDirtyBlockIndex.insert(pindex); | |
} | |
assert(pindex->phashBlock); | |
// add this block to the view's block chain | |
view.SetBestBlock(pindex->GetBlockHash()); | |
int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4; | |
LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime5 - nTime4), nTimeIndex * MICRO, nTimeIndex * MILLI / nBlocksTotal); | |
int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5; | |
LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime6 - nTime5), nTimeCallbacks * MICRO, nTimeCallbacks * MILLI / nBlocksTotal); | |
return true; | |
} | |
/** | |
* Update the on-disk chain state. | |
* The caches and indexes are flushed depending on the mode we're called with | |
* if they're too large, if it's been a while since the last write, | |
* or always and in all cases if we're in prune mode and are deleting files. | |
* | |
* If FlushStateMode::NONE is used, then FlushStateToDisk(...) won't do anything | |
* besides checking if we need to prune. | |
*/ | |
bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight) { | |
int64_t nMempoolUsage = mempool.DynamicMemoryUsage(); | |
LOCK(cs_main); | |
static int64_t nLastWrite = 0; | |
static int64_t nLastFlush = 0; | |
std::set<int> setFilesToPrune; | |
bool full_flush_completed = false; | |
try { | |
{ | |
bool fFlushForPrune = false; | |
bool fDoFullFlush = false; | |
LOCK(cs_LastBlockFile); | |
if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) { | |
if (nManualPruneHeight > 0) { | |
FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight); | |
} else { | |
FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight()); | |
fCheckForPruning = false; | |
} | |
if (!setFilesToPrune.empty()) { | |
fFlushForPrune = true; | |
if (!fHavePruned) { | |
pblocktree->WriteFlag("prunedblockfiles", true); | |
fHavePruned = true; | |
} | |
} | |
} | |
int64_t nNow = GetTimeMicros(); | |
// Avoid writing/flushing immediately after startup. | |
if (nLastWrite == 0) { | |
nLastWrite = nNow; | |
} | |
if (nLastFlush == 0) { | |
nLastFlush = nNow; | |
} | |
int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; | |
int64_t cacheSize = pcoinsTip->DynamicMemoryUsage(); | |
int64_t nTotalSpace = nCoinCacheUsage + std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0); | |
// The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing). | |
bool fCacheLarge = mode == FlushStateMode::PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024); | |
// The cache is over the limit, we have to write now. | |
bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cacheSize > nTotalSpace; | |
// It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash. | |
bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000; | |
// It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage. | |
bool fPeriodicFlush = mode == FlushStateMode::PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000; | |
// Combine all conditions that result in a full cache flush. | |
fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune; | |
// Write blocks and block index to disk. | |
if (fDoFullFlush || fPeriodicWrite) { | |
// Depend on nMinDiskSpace to ensure we can write block index | |
if (!CheckDiskSpace(0, true)) | |
return state.Error("out of disk space"); | |
// First make sure all block and undo data is flushed to disk. | |
FlushBlockFile(); | |
// Then update all block file information (which may refer to block and undo files). | |
{ | |
std::vector<std::pair<int, const CBlockFileInfo*> > vFiles; | |
vFiles.reserve(setDirtyFileInfo.size()); | |
for (std::set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) { | |
vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it])); | |
setDirtyFileInfo.erase(it++); | |
} | |
std::vector<const CBlockIndex*> vBlocks; | |
vBlocks.reserve(setDirtyBlockIndex.size()); | |
for (std::set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) { | |
vBlocks.push_back(*it); | |
setDirtyBlockIndex.erase(it++); | |
} | |
if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) { | |
return AbortNode(state, "Failed to write to block index database"); | |
} | |
} | |
// Finally remove any pruned files | |
if (fFlushForPrune) | |
UnlinkPrunedFiles(setFilesToPrune); | |
nLastWrite = nNow; | |
} | |
// Flush best chain related state. This can only be done if the blocks / block index write was also done. | |
if (fDoFullFlush && !pcoinsTip->GetBestBlock().IsNull()) { | |
// Typical Coin structures on disk are around 48 bytes in size. | |
// Pushing a new one to the database can cause it to be written | |
// twice (once in the log, and once in the tables). This is already | |
// an overestimation, as most will delete an existing entry or | |
// overwrite one. Still, use a conservative safety factor of 2. | |
if (!CheckDiskSpace(48 * 2 * 2 * pcoinsTip->GetCacheSize())) | |
return state.Error("out of disk space"); | |
if (mode == FlushStateMode::ALWAYS && !pclaimTrie->SyncToDisk()) | |
return state.Error("Failed to write to claim trie database"); | |
// Flush the chainstate (which may refer to block index entries). | |
if (!pcoinsTip->Flush()) | |
return AbortNode(state, "Failed to write to coin database"); | |
nLastFlush = nNow; | |
full_flush_completed = true; | |
} | |
} | |
if (full_flush_completed) { | |
// Update best block in wallet (so we can detect restored wallets). | |
GetMainSignals().ChainStateFlushed(chainActive.GetLocator()); | |
LogPrint(BCLog::BENCH, "Finished full disk flush in %.2fms\n", (GetTimeMicros() - nLastFlush) * MILLI); | |
} | |
} catch (const std::runtime_error& e) { | |
return AbortNode(state, std::string("System error while flushing: ") + e.what()); | |
} | |
return true; | |
} | |
void FlushStateToDisk() { | |
CValidationState state; | |
const CChainParams& chainparams = Params(); | |
if (!FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) { | |
LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state)); | |
} | |
} | |
void PruneAndFlush() { | |
CValidationState state; | |
fCheckForPruning = true; | |
const CChainParams& chainparams = Params(); | |
if (!FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) { | |
LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state)); | |
} | |
} | |
static void DoWarning(const std::string& strWarning) | |
{ | |
static bool fWarned = false; | |
SetMiscWarning(strWarning); | |
if (!fWarned) { | |
AlertNotify(strWarning); | |
fWarned = true; | |
} | |
} | |
/** Private helper function that concatenates warning messages. */ | |
static void AppendWarning(std::string& res, const std::string& warn) | |
{ | |
if (!res.empty()) res += ", "; | |
res += warn; | |
} | |
/** Check warning conditions and do some notifications on new chain tip set. */ | |
void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainParams) { | |
// New best block | |
mempool.AddTransactionsUpdated(1); | |
{ | |
WaitableLock lock(g_best_block_mutex); | |
g_best_block = pindexNew->GetBlockHash(); | |
g_best_block_cv.notify_all(); | |
} | |
std::string warningMessages; | |
auto isInitialBlockDownload = IsInitialBlockDownload(); | |
if (!isInitialBlockDownload) | |
{ | |
int nUpgraded = 0; | |
const CBlockIndex* pindex = pindexNew; | |
for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) { | |
WarningBitsConditionChecker checker(bit); | |
ThresholdState state = checker.GetStateFor(pindex, chainParams.GetConsensus(), warningcache[bit]); | |
if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) { | |
const std::string strWarning = strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit); | |
if (state == ThresholdState::ACTIVE) { | |
DoWarning(strWarning); | |
} else { | |
AppendWarning(warningMessages, strWarning); | |
} | |
} | |
} | |
// Check the version of the last 100 blocks to see if we need to upgrade: | |
for (int i = 0; i < 100 && pindex != nullptr; i++) | |
{ | |
int32_t nExpectedVersion = ComputeBlockVersion(pindex->pprev, chainParams.GetConsensus()); | |
if (pindex->nVersion > VERSIONBITS_LAST_OLD_BLOCK_VERSION && (pindex->nVersion & ~nExpectedVersion) != 0) | |
++nUpgraded; | |
pindex = pindex->pprev; | |
} | |
if (nUpgraded > 0) | |
AppendWarning(warningMessages, strprintf(_("%d of last 100 blocks have unexpected version"), nUpgraded)); | |
if (nUpgraded > 100/2) | |
{ | |
std::string strWarning = _("Warning: Unknown block versions being mined! It's possible unknown rules are in effect"); | |
// notify GetWarnings(), called by Qt and the JSON-RPC code to warn the user: | |
DoWarning(strWarning); | |
} | |
} | |
static int64_t lastBlockPrintTime = 0; | |
auto currentTime = GetAdjustedTime(); | |
if (!warningMessages.empty() || !isInitialBlockDownload || lastBlockPrintTime < currentTime - 15 || LogAcceptCategory(BCLog::CLAIMS)) { | |
lastBlockPrintTime = currentTime; | |
LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g txb=%lu tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s", | |
__func__, pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, pindexNew->nVersion, | |
log(pindexNew->nChainWork.getdouble()) / log(2.0), (unsigned long) pindexNew->nTx, | |
(unsigned long) pindexNew->nChainTx, FormatISO8601DateTime(pindexNew->GetBlockTime()), | |
GuessVerificationProgress(chainParams.TxData(), pindexNew), | |
pcoinsTip->DynamicMemoryUsage() * (1.0 / (1U << 20U)), pcoinsTip->GetCacheSize(), | |
isInitialBlockDownload ? " IBD" : ""); | |
if (!warningMessages.empty()) | |
LogPrintf(" warning='%s'", warningMessages); /* Continued */ | |
LogPrintf("\n"); | |
} | |
} | |
/** Disconnect chainActive's tip. | |
* After calling, the mempool will be in an inconsistent state, with | |
* transactions from disconnected blocks being added to disconnectpool. You | |
* should make the mempool consistent again by calling UpdateMempoolForReorg. | |
* with cs_main held. | |
* | |
* If disconnectpool is nullptr, then no disconnected transactions are added to | |
* disconnectpool (note that the caller is responsible for mempool consistency | |
* in any case). | |
*/ | |
bool CChainState::DisconnectTip(CValidationState& state, const CChainParams& chainparams, DisconnectedBlockTransactions *disconnectpool) | |
{ | |
CBlockIndex *pindexDelete = chainActive.Tip(); | |
assert(pindexDelete); | |
// Read block from disk. | |
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); | |
CBlock& block = *pblock; | |
if (!ReadBlockFromDisk(block, pindexDelete, chainparams.GetConsensus())) | |
return AbortNode(state, "Failed to read block"); | |
// Apply the block atomically to the chain state. | |
int64_t nStart = GetTimeMicros(); | |
{ | |
CCoinsViewCache view(pcoinsTip.get()); | |
CClaimTrieCache trieCache(pclaimTrie); | |
assert(view.GetBestBlock() == pindexDelete->GetBlockHash()); | |
if (DisconnectBlock(block, pindexDelete, view, trieCache) != DISCONNECT_OK) | |
return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); | |
bool flushed = view.Flush(); | |
assert(flushed); | |
assert(trieCache.flush()); | |
assert(pindexDelete->pprev->hashClaimTrie == trieCache.getMerkleHash()); | |
} | |
LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI); | |
// Write the chain state to disk, if necessary. | |
if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED)) | |
return false; | |
if (disconnectpool) { | |
// Save transactions to re-add to mempool at end of reorg | |
for (auto it = block.vtx.rbegin(); it != block.vtx.rend(); ++it) { | |
disconnectpool->addTransaction(*it); | |
} | |
while (disconnectpool->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE * 1000) { | |
// Drop the earliest entry, and remove its children from the mempool. | |
auto it = disconnectpool->queuedTx.get<insertion_order>().begin(); | |
mempool.removeRecursive(**it, MemPoolRemovalReason::REORG); | |
disconnectpool->removeEntry(it); | |
} | |
} | |
chainActive.SetTip(pindexDelete->pprev); | |
UpdateTip(pindexDelete->pprev, chainparams); | |
// Let wallets know transactions went from 1-confirmed to | |
// 0-confirmed or conflicted: | |
GetMainSignals().BlockDisconnected(pblock); | |
return true; | |
} | |
static int64_t nTimeReadFromDisk = 0; | |
static int64_t nTimeConnectTotal = 0; | |
static int64_t nTimeFlush = 0; | |
static int64_t nTimeChainState = 0; | |
static int64_t nTimePostConnect = 0; | |
struct PerBlockConnectTrace { | |
CBlockIndex* pindex = nullptr; | |
std::shared_ptr<const CBlock> pblock; | |
std::shared_ptr<std::vector<CTransactionRef>> conflictedTxs; | |
PerBlockConnectTrace() : conflictedTxs(std::make_shared<std::vector<CTransactionRef>>()) {} | |
}; | |
/** | |
* Used to track blocks whose transactions were applied to the UTXO state as a | |
* part of a single ActivateBestChainStep call. | |
* | |
* This class also tracks transactions that are removed from the mempool as | |
* conflicts (per block) and can be used to pass all those transactions | |
* through SyncTransaction. | |
* | |
* This class assumes (and asserts) that the conflicted transactions for a given | |
* block are added via mempool callbacks prior to the BlockConnected() associated | |
* with those transactions. If any transactions are marked conflicted, it is | |
* assumed that an associated block will always be added. | |
* | |
* This class is single-use, once you call GetBlocksConnected() you have to throw | |
* it away and make a new one. | |
*/ | |
class ConnectTrace { | |
private: | |
std::vector<PerBlockConnectTrace> blocksConnected; | |
CTxMemPool &pool; | |
public: | |
explicit ConnectTrace(CTxMemPool &_pool) : blocksConnected(1), pool(_pool) { | |
pool.NotifyEntryRemoved.connect(boost::bind(&ConnectTrace::NotifyEntryRemoved, this, _1, _2)); | |
} | |
~ConnectTrace() { | |
pool.NotifyEntryRemoved.disconnect(boost::bind(&ConnectTrace::NotifyEntryRemoved, this, _1, _2)); | |
} | |
void BlockConnected(CBlockIndex* pindex, std::shared_ptr<const CBlock> pblock) { | |
assert(!blocksConnected.back().pindex); | |
assert(pindex); | |
assert(pblock); | |
blocksConnected.back().pindex = pindex; | |
blocksConnected.back().pblock = std::move(pblock); | |
blocksConnected.emplace_back(); | |
} | |
std::vector<PerBlockConnectTrace>& GetBlocksConnected() { | |
// We always keep one extra block at the end of our list because | |
// blocks are added after all the conflicted transactions have | |
// been filled in. Thus, the last entry should always be an empty | |
// one waiting for the transactions from the next block. We pop | |
// the last entry here to make sure the list we return is sane. | |
assert(!blocksConnected.back().pindex); | |
assert(blocksConnected.back().conflictedTxs->empty()); | |
blocksConnected.pop_back(); | |
return blocksConnected; | |
} | |
void NotifyEntryRemoved(CTransactionRef txRemoved, MemPoolRemovalReason reason) { | |
assert(!blocksConnected.back().pindex); | |
if (reason == MemPoolRemovalReason::CONFLICT) { | |
blocksConnected.back().conflictedTxs->emplace_back(std::move(txRemoved)); | |
} | |
} | |
}; | |
/** | |
* Connect a new block to chainActive. pblock is either nullptr or a pointer to a CBlock | |
* corresponding to pindexNew, to bypass loading it again from disk. | |
* | |
* The block is added to connectTrace if connection succeeds. | |
*/ | |
bool CChainState::ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions &disconnectpool) | |
{ | |
assert(pindexNew->pprev == chainActive.Tip()); | |
// Read block from disk. | |
int64_t nTime1 = GetTimeMicros(); | |
std::shared_ptr<const CBlock> pthisBlock; | |
if (!pblock) { | |
std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>(); | |
if (!ReadBlockFromDisk(*pblockNew, pindexNew, chainparams.GetConsensus())) | |
return AbortNode(state, "Failed to read block"); | |
pthisBlock = pblockNew; | |
} else { | |
pthisBlock = pblock; | |
} | |
const CBlock& blockConnecting = *pthisBlock; | |
// Apply the block atomically to the chain state. | |
int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1; | |
int64_t nTime3; | |
LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO); | |
{ | |
CCoinsViewCache view(pcoinsTip.get()); | |
CClaimTrieCache trieCache(pclaimTrie); | |
bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, trieCache, chainparams); | |
GetMainSignals().BlockChecked(blockConnecting, state); | |
if (!rv) { | |
if (state.IsInvalid()) | |
InvalidBlockFound(pindexNew, state); | |
return error("ConnectTip(): ConnectBlock %s failed", pindexNew->GetBlockHash().ToString()); | |
} | |
nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2; | |
LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal); | |
bool flushed = view.Flush(); | |
assert(flushed); | |
flushed = trieCache.flush(); | |
assert(flushed); | |
} | |
int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3; | |
LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO, nTimeFlush * MILLI / nBlocksTotal); | |
// Write the chain state to disk, if necessary. | |
if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED)) | |
return false; | |
int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4; | |
LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal); | |
// Remove conflicting transactions from the mempool.; | |
mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight); | |
disconnectpool.removeForBlock(blockConnecting.vtx); | |
// Update chainActive & related variables. | |
chainActive.SetTip(pindexNew); | |
UpdateTip(pindexNew, chainparams); | |
int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1; | |
LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO, nTimePostConnect * MILLI / nBlocksTotal); | |
LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO, nTimeTotal * MILLI / nBlocksTotal); | |
connectTrace.BlockConnected(pindexNew, std::move(pthisBlock)); | |
return true; | |
} | |
/** | |
* Return the tip of the chain with the most work in it, that isn't | |
* known to be invalid (it's however far from certain to be valid). | |
*/ | |
CBlockIndex* CChainState::FindMostWorkChain() { | |
do { | |
CBlockIndex *pindexNew = nullptr; | |
// Find the best candidate header. | |
{ | |
std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin(); | |
if (it == setBlockIndexCandidates.rend()) | |
return nullptr; | |
pindexNew = *it; | |
} | |
// Check whether all blocks on the path between the currently active chain and the candidate are valid. | |
// Just going until the active chain is an optimization, as we know all blocks in it are valid already. | |
CBlockIndex *pindexTest = pindexNew; | |
bool fInvalidAncestor = false; | |
while (pindexTest && !chainActive.Contains(pindexTest)) { | |
assert(pindexTest->nChainTx || pindexTest->nHeight == 0); | |
// Pruned nodes may have entries in setBlockIndexCandidates for | |
// which block files have been deleted. Remove those as candidates | |
// for the most work chain if we come across them; we can't switch | |
// to a chain unless we have all the non-active-chain parent blocks. | |
bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK; | |
bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA); | |
if (fFailedChain || fMissingData) { | |
// Candidate chain is not usable (either invalid or missing data) | |
if (fFailedChain && (pindexBestInvalid == nullptr || pindexNew->nChainWork > pindexBestInvalid->nChainWork)) | |
pindexBestInvalid = pindexNew; | |
CBlockIndex *pindexFailed = pindexNew; | |
// Remove the entire chain from the set. | |
while (pindexTest != pindexFailed) { | |
if (fFailedChain) { | |
pindexFailed->nStatus |= BLOCK_FAILED_CHILD; | |
} else if (fMissingData) { | |
// If we're missing data, then add back to mapBlocksUnlinked, | |
// so that if the block arrives in the future we can try adding | |
// to setBlockIndexCandidates again. | |
mapBlocksUnlinked.insert(std::make_pair(pindexFailed->pprev, pindexFailed)); | |
} | |
setBlockIndexCandidates.erase(pindexFailed); | |
pindexFailed = pindexFailed->pprev; | |
} | |
setBlockIndexCandidates.erase(pindexTest); | |
fInvalidAncestor = true; | |
break; | |
} | |
pindexTest = pindexTest->pprev; | |
} | |
if (!fInvalidAncestor) | |
return pindexNew; | |
} while(true); | |
} | |
/** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */ | |
void CChainState::PruneBlockIndexCandidates() { | |
// Note that we can't delete the current block itself, as we may need to return to it later in case a | |
// reorganization to a better block fails. | |
std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin(); | |
while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, chainActive.Tip())) { | |
setBlockIndexCandidates.erase(it++); | |
} | |
// Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates. | |
assert(!setBlockIndexCandidates.empty()); | |
} | |
/** | |
* Try to make some progress towards making pindexMostWork the active block. | |
* pblock is either nullptr or a pointer to a CBlock corresponding to pindexMostWork. | |
*/ | |
bool CChainState::ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace) | |
{ | |
AssertLockHeld(cs_main); | |
const CBlockIndex *pindexOldTip = chainActive.Tip(); | |
const CBlockIndex *pindexFork = chainActive.FindFork(pindexMostWork); | |
// Disconnect active blocks which are no longer in the best chain. | |
bool fBlocksDisconnected = false; | |
DisconnectedBlockTransactions disconnectpool; | |
while (chainActive.Tip() && chainActive.Tip() != pindexFork) { | |
if (!DisconnectTip(state, chainparams, &disconnectpool)) { | |
// This is likely a fatal error, but keep the mempool consistent, | |
// just in case. Only remove from the mempool in this case. | |
UpdateMempoolForReorg(disconnectpool, false); | |
return false; | |
} | |
fBlocksDisconnected = true; | |
} | |
// Build list of new blocks to connect. | |
std::vector<CBlockIndex*> vpindexToConnect; | |
bool fContinue = true; | |
int nHeight = pindexFork ? pindexFork->nHeight : -1; | |
while (fContinue && nHeight != pindexMostWork->nHeight) { | |
// Don't iterate the entire list of potential improvements toward the best tip, as we likely only need | |
// a few blocks along the way. | |
int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight); | |
vpindexToConnect.clear(); | |
vpindexToConnect.reserve(nTargetHeight - nHeight); | |
CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight); | |
while (pindexIter && pindexIter->nHeight != nHeight) { | |
vpindexToConnect.push_back(pindexIter); | |
pindexIter = pindexIter->pprev; | |
} | |
nHeight = nTargetHeight; | |
// Connect new blocks. | |
for (CBlockIndex *pindexConnect : reverse_iterate(vpindexToConnect)) { | |
if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) { | |
if (state.IsInvalid()) { | |
// The block violates a consensus rule. | |
if (!state.CorruptionPossible()) { | |
InvalidChainFound(vpindexToConnect.front()); | |
} | |
state = CValidationState(); | |
fInvalidFound = true; | |
fContinue = false; | |
break; | |
} else { | |
// A system error occurred (disk space, database error, ...). | |
// Make the mempool consistent with the current tip, just in case | |
// any observers try to use it before shutdown. | |
UpdateMempoolForReorg(disconnectpool, false); | |
return false; | |
} | |
} else { | |
PruneBlockIndexCandidates(); | |
if (!pindexOldTip || chainActive.Tip()->nChainWork > pindexOldTip->nChainWork) { | |
// We're in a better position than we were. Return temporarily to release the lock. | |
fContinue = false; | |
break; | |
} | |
} | |
} | |
} | |
if (fBlocksDisconnected) { | |
// If any blocks were disconnected, disconnectpool may be non empty. Add | |
// any disconnected transactions back to the mempool. | |
UpdateMempoolForReorg(disconnectpool, true); | |
} | |
mempool.check(pcoinsTip.get()); | |
// Callbacks/notifications for a new best chain. | |
if (fInvalidFound) | |
CheckForkWarningConditionsOnNewFork(vpindexToConnect.back()); | |
else | |
CheckForkWarningConditions(); | |
return true; | |
} | |
static void NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) { | |
bool fNotify = false; | |
bool fInitialBlockDownload = false; | |
static CBlockIndex* pindexHeaderOld = nullptr; | |
CBlockIndex* pindexHeader = nullptr; | |
{ | |
LOCK(cs_main); | |
pindexHeader = pindexBestHeader; | |
if (pindexHeader != pindexHeaderOld) { | |
fNotify = true; | |
fInitialBlockDownload = IsInitialBlockDownload(); | |
pindexHeaderOld = pindexHeader; | |
} | |
} | |
// Send block tip changed notifications without cs_main | |
if (fNotify) { | |
uiInterface.NotifyHeaderTip(fInitialBlockDownload, pindexHeader); | |
} | |
} | |
/** | |
* Make the best chain active, in multiple steps. The result is either failure | |
* or an activated best chain. pblock is either nullptr or a pointer to a block | |
* that is already loaded (to avoid loading it again from disk). | |
* | |
* ActivateBestChain is split into steps (see ActivateBestChainStep) so that | |
* we avoid holding cs_main for an extended period of time; the length of this | |
* call may be quite long during reindexing or a substantial reorg. | |
*/ | |
bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) { | |
// Note that while we're often called here from ProcessNewBlock, this is | |
// far from a guarantee. Things in the P2P/RPC will often end up calling | |
// us in the middle of ProcessNewBlock - do not assume pblock is set | |
// sanely for performance or correctness! | |
AssertLockNotHeld(cs_main); | |
// ABC maintains a fair degree of expensive-to-calculate internal state | |
// because this function periodically releases cs_main so that it does not lock up other threads for too long | |
// during large connects - and to allow for e.g. the callback queue to drain | |
// we use m_cs_chainstate to enforce mutual exclusion so that only one caller may execute this function at a time | |
LOCK(m_cs_chainstate); | |
CBlockIndex *pindexMostWork = nullptr; | |
CBlockIndex *pindexNewTip = nullptr; | |
int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT); | |
do { | |
boost::this_thread::interruption_point(); | |
if (GetMainSignals().CallbacksPending() > 10) { | |
// Block until the validation queue drains. This should largely | |
// never happen in normal operation, however may happen during | |
// reindex, causing memory blowup if we run too far ahead. | |
// Note that if a validationinterface callback ends up calling | |
// ActivateBestChain this may lead to a deadlock! We should | |
// probably have a DEBUG_LOCKORDER test for this in the future. | |
SyncWithValidationInterfaceQueue(); | |
} | |
{ | |
LOCK(cs_main); | |
CBlockIndex* starting_tip = chainActive.Tip(); | |
bool blocks_connected = false; | |
do { | |
// We absolutely may not unlock cs_main until we've made forward progress | |
// (with the exception of shutdown due to hardware issues, low disk space, etc). | |
ConnectTrace connectTrace(mempool); // Destructed before cs_main is unlocked | |
if (pindexMostWork == nullptr) { | |
pindexMostWork = FindMostWorkChain(); | |
} | |
// Whether we have anything to do at all. | |
if (pindexMostWork == nullptr || pindexMostWork == chainActive.Tip()) { | |
break; | |
} | |
bool fInvalidFound = false; | |
std::shared_ptr<const CBlock> nullBlockPtr; | |
if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) | |
return false; | |
blocks_connected = true; | |
if (fInvalidFound) { | |
// Wipe cache, we may need another branch now. | |
pindexMostWork = nullptr; | |
} | |
pindexNewTip = chainActive.Tip(); | |
for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) { | |
assert(trace.pblock && trace.pindex); | |
GetMainSignals().BlockConnected(trace.pblock, trace.pindex, trace.conflictedTxs); | |
} | |
} while (!chainActive.Tip() || (starting_tip && CBlockIndexWorkComparator()(chainActive.Tip(), starting_tip))); | |
if (!blocks_connected) return true; | |
const CBlockIndex* pindexFork = chainActive.FindFork(starting_tip); | |
bool fInitialDownload = IsInitialBlockDownload(); | |
// Notify external listeners about the new tip. | |
// Enqueue while holding cs_main to ensure that UpdatedBlockTip is called in the order in which blocks are connected | |
if (pindexFork != pindexNewTip) { | |
// Notify ValidationInterface subscribers | |
GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload); | |
// Always notify the UI if a new block tip was connected | |
uiInterface.NotifyBlockTip(fInitialDownload, pindexNewTip); | |
} | |
} | |
// When we reach this point, we switched to a new tip (stored in pindexNewTip). | |
if (nStopAtHeight && pindexNewTip && pindexNewTip->nHeight >= nStopAtHeight) StartShutdown(); | |
// We check shutdown only after giving ActivateBestChainStep a chance to run once so that we | |
// never shutdown before connecting the genesis block during LoadChainTip(). Previously this | |
// caused an assert() failure during shutdown in such cases as the UTXO DB flushing checks | |
// that the best block hash is non-null. | |
if (ShutdownRequested()) | |
break; | |
} while (pindexNewTip != pindexMostWork); | |
auto& consensus = chainparams.GetConsensus(); | |
CheckBlockIndex(consensus); | |
auto flushMode = FlushStateMode::PERIODIC; | |
if (pindexNewTip && chainparams.NetworkIDString() != CBaseChainParams::REGTEST | |
&& pindexNewTip->nTime + consensus.nPowTargetSpacing > GetAdjustedTime()) { | |
// trying to ensure that we flush to disk after new blocks when we're caught up to the chain | |
// they're technically allowed to be two hours late, but experience says one minute is more likely | |
flushMode = FlushStateMode::ALWAYS; | |
} | |
return FlushStateToDisk(chainparams, state, flushMode); | |
} | |
bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) { | |
return g_chainstate.ActivateBestChain(state, chainparams, std::move(pblock)); | |
} | |
bool CChainState::PreciousBlock(CValidationState& state, const CChainParams& params, CBlockIndex *pindex) | |
{ | |
{ | |
LOCK(cs_main); | |
if (pindex->nChainWork < chainActive.Tip()->nChainWork) { | |
// Nothing to do, this block is not at the tip. | |
return true; | |
} | |
if (chainActive.Tip()->nChainWork > nLastPreciousChainwork) { | |
// The chain has been extended since the last call, reset the counter. | |
nBlockReverseSequenceId = -1; | |
} | |
nLastPreciousChainwork = chainActive.Tip()->nChainWork; | |
setBlockIndexCandidates.erase(pindex); | |
pindex->nSequenceId = nBlockReverseSequenceId; | |
if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) { | |
// We can't keep reducing the counter if somebody really wants to | |
// call preciousblock 2**31-1 times on the same set of tips... | |
nBlockReverseSequenceId--; | |
} | |
if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->nChainTx) { | |
setBlockIndexCandidates.insert(pindex); | |
PruneBlockIndexCandidates(); | |
} | |
} | |
return ActivateBestChain(state, params, std::shared_ptr<const CBlock>()); | |
} | |
bool PreciousBlock(CValidationState& state, const CChainParams& params, CBlockIndex *pindex) { | |
return g_chainstate.PreciousBlock(state, params, pindex); | |
} | |
bool CChainState::InvalidateBlock(CValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex) | |
{ | |
AssertLockHeld(cs_main); | |
// We first disconnect backwards and then mark the blocks as invalid. | |
// This prevents a case where pruned nodes may fail to invalidateblock | |
// and be left unable to start as they have no tip candidates (as there | |
// are no blocks that meet the "have data and are not invalid per | |
// nStatus" criteria for inclusion in setBlockIndexCandidates). | |
bool pindex_was_in_chain = false; | |
CBlockIndex *invalid_walk_tip = chainActive.Tip(); | |
DisconnectedBlockTransactions disconnectpool; | |
while (chainActive.Contains(pindex)) { | |
pindex_was_in_chain = true; | |
// ActivateBestChain considers blocks already in chainActive | |
// unconditionally valid already, so force disconnect away from it. | |
if (!DisconnectTip(state, chainparams, &disconnectpool)) { | |
// It's probably hopeless to try to make the mempool consistent | |
// here if DisconnectTip failed, but we can try. | |
UpdateMempoolForReorg(disconnectpool, false); | |
return false; | |
} | |
} | |
// Now mark the blocks we just disconnected as descendants invalid | |
// (note this may not be all descendants). | |
while (pindex_was_in_chain && invalid_walk_tip != pindex) { | |
invalid_walk_tip->nStatus |= BLOCK_FAILED_CHILD; | |
setDirtyBlockIndex.insert(invalid_walk_tip); | |
setBlockIndexCandidates.erase(invalid_walk_tip); | |
invalid_walk_tip = invalid_walk_tip->pprev; | |
} | |
// Mark the block itself as invalid. | |
pindex->nStatus |= BLOCK_FAILED_VALID; | |
setDirtyBlockIndex.insert(pindex); | |
setBlockIndexCandidates.erase(pindex); | |