Permalink
Fetching contributors…
Cannot retrieve contributors at this time
4986 lines (4221 sloc) 175 KB
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2012 The Bitcoin developers
// Copyright (c) 2011-2012 Tenebrix, Litecoin developers
// Copyright (c) 2012-2013 Freicoin developers
// Copyright (c) 2013-2079 Dr. Kimoto Chan
// Copyright (c) 2013-2079 The Megacoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "alert.h"
#include "checkpoints.h"
#include "db.h"
#include "txdb.h"
#include "net.h"
#include "init.h"
#include "util.h" // using DoubleToNumeratorDenominator
#include "ui_interface.h"
#include "checkqueue.h"
#include <boost/algorithm/string/replace.hpp>
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
using namespace std;
using namespace boost;
//
// Global state
//
CCriticalSection cs_setpwalletRegistered;
set<CWallet*> setpwalletRegistered;
CCriticalSection cs_main;
CTxMemPool mempool;
unsigned int nTransactionsUpdated = 0;
map<uint256, CBlockIndex*> mapBlockIndex;
std::vector<CBlockIndex*> vBlockIndexByHeight;
uint256 hashGenesisBlock("0x7520788e2d99eec7cf6cf7315577e1268e177fff94cb0a7caf6a458ceeea9ac2");
static CBigNum bnProofOfWorkLimit(~uint256(0) >> 20);
CBlockIndex* pindexGenesisBlock = NULL;
int nBestHeight = -1;
uint256 nBestChainWork = 0;
uint256 nBestInvalidWork = 0;
uint256 hashBestChain = 0;
CBlockIndex* pindexBest = NULL;
set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexValid; // may contain all CBlockIndex*'s that have validness >=BLOCK_VALID_TRANSACTIONS, and must contain those who aren't failed
int64 nTimeBestReceived = 0;
int nScriptCheckThreads = 0;
bool fImporting = false;
bool fReindex = false;
bool fBenchmark = false;
bool fTxIndex = false;
unsigned int nCoinCacheSize = 5000;
/** Fees smaller than this (in satoshi) are considered zero fee (for transaction creation) */
int64 CTransaction::nMinTxFee = 10000; // Override with -mintxfee
/** Fees smaller than this (in satoshi) are considered zero fee (for relaying) */
int64 CTransaction::nMinRelayTxFee = 10000;
CMedianFilter<int> cPeerBlockCounts(8, 0); // Amount of blocks that other nodes claim to have
map<uint256, CBlock*> mapOrphanBlocks;
multimap<uint256, CBlock*> mapOrphanBlocksByPrev;
map<uint256, CDataStream*> mapOrphanTransactions;
map<uint256, map<uint256, CDataStream*> > mapOrphanTransactionsByPrev;
// Constant stuff for coinbase transactions we create:
CScript COINBASE_FLAGS;
const string strMessageMagic = "Megacoin Signed Message:\n";
double dHashesPerSec = 0.0;
int64 nHPSTimerStart = 0;
// Settings
int64 nTransactionFee = 0;
//////////////////////////////////////////////////////////////////////////////
//
// dispatching functions
//
// These functions dispatch to one or all registered wallets
void RegisterWallet(CWallet* pwalletIn)
{
{
LOCK(cs_setpwalletRegistered);
setpwalletRegistered.insert(pwalletIn);
}
}
void UnregisterWallet(CWallet* pwalletIn)
{
{
LOCK(cs_setpwalletRegistered);
setpwalletRegistered.erase(pwalletIn);
}
}
// get the wallet transaction with the given hash (if it exists)
bool static GetTransaction(const uint256& hashTx, CWalletTx& wtx)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
if (pwallet->GetTransaction(hashTx,wtx))
return true;
return false;
}
// erases transaction with the given hash from all wallets
void static EraseFromWallets(uint256 hash)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->EraseFromWallet(hash);
}
// make sure all wallets know about the given transaction, in the given block
void SyncWithWallets(const uint256 &hash, const CTransaction& tx, const CBlock* pblock, bool fUpdate)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->AddToWalletIfInvolvingMe(hash, tx, pblock, fUpdate);
}
// notify wallets about a new best chain
void static SetBestChain(const CBlockLocator& loc)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->SetBestChain(loc);
}
// notify wallets about an updated transaction
void static UpdatedTransaction(const uint256& hashTx)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->UpdatedTransaction(hashTx);
}
// dump all wallets
void static PrintWallets(const CBlock& block)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->PrintWallet(block);
}
// notify wallets about an incoming inventory (for request counts)
void static Inventory(const uint256& hash)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->Inventory(hash);
}
// ask wallets to resend their transactions
void static ResendWalletTransactions()
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->ResendWalletTransactions();
}
//////////////////////////////////////////////////////////////////////////////
//
// CCoinsView implementations
//
bool CCoinsView::GetCoins(const uint256 &txid, CCoins &coins) { return false; }
bool CCoinsView::SetCoins(const uint256 &txid, const CCoins &coins) { return false; }
bool CCoinsView::HaveCoins(const uint256 &txid) { return false; }
CBlockIndex *CCoinsView::GetBestBlock() { return NULL; }
bool CCoinsView::SetBestBlock(CBlockIndex *pindex) { return false; }
bool CCoinsView::BatchWrite(const std::map<uint256, CCoins> &mapCoins, CBlockIndex *pindex) { return false; }
bool CCoinsView::GetStats(CCoinsStats &stats) { return false; }
CCoinsViewBacked::CCoinsViewBacked(CCoinsView &viewIn) : base(&viewIn) { }
bool CCoinsViewBacked::GetCoins(const uint256 &txid, CCoins &coins) { return base->GetCoins(txid, coins); }
bool CCoinsViewBacked::SetCoins(const uint256 &txid, const CCoins &coins) { return base->SetCoins(txid, coins); }
bool CCoinsViewBacked::HaveCoins(const uint256 &txid) { return base->HaveCoins(txid); }
CBlockIndex *CCoinsViewBacked::GetBestBlock() { return base->GetBestBlock(); }
bool CCoinsViewBacked::SetBestBlock(CBlockIndex *pindex) { return base->SetBestBlock(pindex); }
void CCoinsViewBacked::SetBackend(CCoinsView &viewIn) { base = &viewIn; }
bool CCoinsViewBacked::BatchWrite(const std::map<uint256, CCoins> &mapCoins, CBlockIndex *pindex) { return base->BatchWrite(mapCoins, pindex); }
bool CCoinsViewBacked::GetStats(CCoinsStats &stats) { return base->GetStats(stats); }
CCoinsViewCache::CCoinsViewCache(CCoinsView &baseIn, bool fDummy) : CCoinsViewBacked(baseIn), pindexTip(NULL) { }
bool CCoinsViewCache::GetCoins(const uint256 &txid, CCoins &coins) {
if (cacheCoins.count(txid)) {
coins = cacheCoins[txid];
return true;
}
if (base->GetCoins(txid, coins)) {
cacheCoins[txid] = coins;
return true;
}
return false;
}
std::map<uint256,CCoins>::iterator CCoinsViewCache::FetchCoins(const uint256 &txid) {
std::map<uint256,CCoins>::iterator it = cacheCoins.lower_bound(txid);
if (it != cacheCoins.end() && it->first == txid)
return it;
CCoins tmp;
if (!base->GetCoins(txid,tmp))
return cacheCoins.end();
std::map<uint256,CCoins>::iterator ret = cacheCoins.insert(it, std::make_pair(txid, CCoins()));
tmp.swap(ret->second);
return ret;
}
CCoins &CCoinsViewCache::GetCoins(const uint256 &txid) {
std::map<uint256,CCoins>::iterator it = FetchCoins(txid);
assert(it != cacheCoins.end());
return it->second;
}
bool CCoinsViewCache::SetCoins(const uint256 &txid, const CCoins &coins) {
cacheCoins[txid] = coins;
return true;
}
bool CCoinsViewCache::HaveCoins(const uint256 &txid) {
return FetchCoins(txid) != cacheCoins.end();
}
CBlockIndex *CCoinsViewCache::GetBestBlock() {
if (pindexTip == NULL)
pindexTip = base->GetBestBlock();
return pindexTip;
}
bool CCoinsViewCache::SetBestBlock(CBlockIndex *pindex) {
pindexTip = pindex;
return true;
}
bool CCoinsViewCache::BatchWrite(const std::map<uint256, CCoins> &mapCoins, CBlockIndex *pindex) {
for (std::map<uint256, CCoins>::const_iterator it = mapCoins.begin(); it != mapCoins.end(); it++)
cacheCoins[it->first] = it->second;
pindexTip = pindex;
return true;
}
bool CCoinsViewCache::Flush() {
bool fOk = base->BatchWrite(cacheCoins, pindexTip);
if (fOk)
cacheCoins.clear();
return fOk;
}
unsigned int CCoinsViewCache::GetCacheSize() {
return cacheCoins.size();
}
/** CCoinsView that brings transactions from a memorypool into view.
It does not check for spendings by memory pool transactions. */
CCoinsViewMemPool::CCoinsViewMemPool(CCoinsView &baseIn, CTxMemPool &mempoolIn) : CCoinsViewBacked(baseIn), mempool(mempoolIn) { }
bool CCoinsViewMemPool::GetCoins(const uint256 &txid, CCoins &coins) {
if (base->GetCoins(txid, coins))
return true;
if (mempool.exists(txid)) {
const CTransaction &tx = mempool.lookup(txid);
coins = CCoins(tx, MEMPOOL_HEIGHT);
return true;
}
return false;
}
bool CCoinsViewMemPool::HaveCoins(const uint256 &txid) {
return mempool.exists(txid) || base->HaveCoins(txid);
}
CCoinsViewCache *pcoinsTip = NULL;
CBlockTreeDB *pblocktree = NULL;
//////////////////////////////////////////////////////////////////////////////
//
// mapOrphanTransactions
//
bool AddOrphanTx(const CDataStream& vMsg)
{
CTransaction tx;
CDataStream(vMsg) >> tx;
uint256 hash = tx.GetHash();
if (mapOrphanTransactions.count(hash))
return false;
CDataStream* pvMsg = new CDataStream(vMsg);
// Ignore big transactions, to avoid a
// send-big-orphans memory exhaustion attack. If a peer has a legitimate
// large transaction with a missing parent then we assume
// it will rebroadcast it later, after the parent transaction(s)
// have been mined or received.
// 10,000 orphans, each of which is at most 5,000 bytes big is
// at most 500 megabytes of orphans:
if (pvMsg->size() > 5000)
{
printf("ignoring large orphan tx (size: %"PRIszu", hash: %s)\n", pvMsg->size(), hash.ToString().c_str());
delete pvMsg;
return false;
}
mapOrphanTransactions[hash] = pvMsg;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
mapOrphanTransactionsByPrev[txin.prevout.hash].insert(make_pair(hash, pvMsg));
printf("stored orphan tx %s (mapsz %"PRIszu")\n", hash.ToString().c_str(),
mapOrphanTransactions.size());
return true;
}
void static EraseOrphanTx(uint256 hash)
{
if (!mapOrphanTransactions.count(hash))
return;
const CDataStream* pvMsg = mapOrphanTransactions[hash];
CTransaction tx;
CDataStream(*pvMsg) >> tx;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
{
mapOrphanTransactionsByPrev[txin.prevout.hash].erase(hash);
if (mapOrphanTransactionsByPrev[txin.prevout.hash].empty())
mapOrphanTransactionsByPrev.erase(txin.prevout.hash);
}
delete pvMsg;
mapOrphanTransactions.erase(hash);
}
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
{
unsigned int nEvicted = 0;
while (mapOrphanTransactions.size() > nMaxOrphans)
{
// Evict a random orphan:
uint256 randomhash = GetRandHash();
map<uint256, CDataStream*>::iterator it = mapOrphanTransactions.lower_bound(randomhash);
if (it == mapOrphanTransactions.end())
it = mapOrphanTransactions.begin();
EraseOrphanTx(it->first);
++nEvicted;
}
return nEvicted;
}
//////////////////////////////////////////////////////////////////////////////
//
// CTransaction / CTxOut
//
bool CTxOut::IsDust() const
{
// "Dust" is defined in terms of CTransaction::nMinRelayTxFee,
// which has units satoshis-per-kilobyte.
// If you'd pay more than 1/3 in fees
// to spend something, then we consider it dust.
// A typical txout is 33 bytes big, and will
// need a CTxIn of at least 148 bytes to spend,
// so dust is a txout less than 54 uMEC
// (5430 satoshis) with default nMinRelayTxFee
return ((nValue*1000)/(3*((int)GetSerializeSize(SER_DISK,0)+148)) < CTransaction::nMinRelayTxFee);
}
bool CTransaction::IsStandard() const
{
if (nVersion > CTransaction::CURRENT_VERSION)
return false;
if (!IsFinal())
return false;
// Extremely large transactions with lots of inputs can cost the network
// almost as much to process as they cost the sender in fees, because
// computing signature hashes is O(ninputs*txsize). Limiting transactions
// to MAX_STANDARD_TX_SIZE mitigates CPU exhaustion attacks.
unsigned int sz = this->GetSerializeSize(SER_NETWORK, CTransaction::CURRENT_VERSION);
if (sz >= MAX_STANDARD_TX_SIZE)
return false;
BOOST_FOREACH(const CTxIn& txin, vin)
{
// Biggest 'standard' txin is a 3-signature 3-of-3 CHECKMULTISIG
// pay-to-script-hash, which is 3 ~80-byte signatures, 3
// ~65-byte public keys, plus a few script ops.
if (txin.scriptSig.size() > 500)
return false;
if (!txin.scriptSig.IsPushOnly())
return false;
}
BOOST_FOREACH(const CTxOut& txout, vout) {
if (!::IsStandard(txout.scriptPubKey))
return false;
if (txout.IsDust())
return false;
}
return true;
}
//
// Check transaction inputs, and make sure any
// pay-to-script-hash transactions are evaluating IsStandard scripts
//
// Why bother? To avoid denial-of-service attacks; an attacker
// can submit a standard HASH... OP_EQUAL transaction,
// which will get accepted into blocks. The redemption
// script can be anything; an attacker could use a very
// expensive-to-check-upon-redemption script like:
// DUP CHECKSIG DROP ... repeated 100 times... OP_1
//
bool CTransaction::AreInputsStandard(CCoinsViewCache& mapInputs) const
{
if (IsCoinBase())
return true; // Coinbases don't use vin normally
for (unsigned int i = 0; i < vin.size(); i++)
{
const CTxOut& prev = GetOutputFor(vin[i], mapInputs);
vector<vector<unsigned char> > vSolutions;
txnouttype whichType;
// get the scriptPubKey corresponding to this input:
const CScript& prevScript = prev.scriptPubKey;
if (!Solver(prevScript, whichType, vSolutions))
return false;
int nArgsExpected = ScriptSigArgsExpected(whichType, vSolutions);
if (nArgsExpected < 0)
return false;
// Transactions with extra stuff in their scriptSigs are
// non-standard. Note that this EvalScript() call will
// be quick, because if there are any operations
// beside "push data" in the scriptSig the
// IsStandard() call returns false
vector<vector<unsigned char> > stack;
if (!EvalScript(stack, vin[i].scriptSig, *this, i, false, 0))
return false;
if (whichType == TX_SCRIPTHASH)
{
if (stack.empty())
return false;
CScript subscript(stack.back().begin(), stack.back().end());
vector<vector<unsigned char> > vSolutions2;
txnouttype whichType2;
if (!Solver(subscript, whichType2, vSolutions2))
return false;
if (whichType2 == TX_SCRIPTHASH)
return false;
int tmpExpected;
tmpExpected = ScriptSigArgsExpected(whichType2, vSolutions2);
if (tmpExpected < 0)
return false;
nArgsExpected += tmpExpected;
}
if (stack.size() != (unsigned int)nArgsExpected)
return false;
}
return true;
}
unsigned int CTransaction::GetLegacySigOpCount() const
{
unsigned int nSigOps = 0;
BOOST_FOREACH(const CTxIn& txin, vin)
{
nSigOps += txin.scriptSig.GetSigOpCount(false);
}
BOOST_FOREACH(const CTxOut& txout, vout)
{
nSigOps += txout.scriptPubKey.GetSigOpCount(false);
}
return nSigOps;
}
int CMerkleTx::SetMerkleBranch(const CBlock* pblock)
{
CBlock blockTmp;
if (pblock == NULL) {
CCoins coins;
if (pcoinsTip->GetCoins(GetHash(), coins)) {
CBlockIndex *pindex = FindBlockByHeight(coins.nHeight);
if (pindex) {
if (!blockTmp.ReadFromDisk(pindex))
return 0;
pblock = &blockTmp;
}
}
}
if (pblock) {
// Update the tx's hashBlock
hashBlock = pblock->GetHash();
// Locate the transaction
for (nIndex = 0; nIndex < (int)pblock->vtx.size(); nIndex++)
if (pblock->vtx[nIndex] == *(CTransaction*)this)
break;
if (nIndex == (int)pblock->vtx.size())
{
vMerkleBranch.clear();
nIndex = -1;
printf("ERROR: SetMerkleBranch() : couldn't find tx in block\n");
return 0;
}
// Fill in merkle branch
vMerkleBranch = pblock->GetMerkleBranch(nIndex);
}
// Is the tx in a block that's in the main chain
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashBlock);
if (mi == mapBlockIndex.end())
return 0;
CBlockIndex* pindex = (*mi).second;
if (!pindex || !pindex->IsInMainChain())
return 0;
return pindexBest->nHeight - pindex->nHeight + 1;
}
bool CTransaction::CheckTransaction(CValidationState &state) const
{
// Basic checks that don't depend on any context
if (vin.empty())
return state.DoS(10, error("CTransaction::CheckTransaction() : vin empty"));
if (vout.empty())
return state.DoS(10, error("CTransaction::CheckTransaction() : vout empty"));
// Size limits
if (::GetSerializeSize(*this, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE)
return state.DoS(100, error("CTransaction::CheckTransaction() : size limits failed"));
// Check for negative or overflow output values
int64 nValueOut = 0;
BOOST_FOREACH(const CTxOut& txout, vout)
{
if (txout.nValue < 0)
return state.DoS(100, error("CTransaction::CheckTransaction() : txout.nValue negative"));
if (txout.nValue > MAX_MONEY)
return state.DoS(100, error("CTransaction::CheckTransaction() : txout.nValue too high"));
nValueOut += txout.nValue;
if (!MoneyRange(nValueOut))
return state.DoS(100, error("CTransaction::CheckTransaction() : txout total out of range"));
}
// Check for duplicate inputs
set<COutPoint> vInOutPoints;
BOOST_FOREACH(const CTxIn& txin, vin)
{
if (vInOutPoints.count(txin.prevout))
return state.DoS(100, error("CTransaction::CheckTransaction() : duplicate inputs"));
vInOutPoints.insert(txin.prevout);
}
if (IsCoinBase())
{
if (vin[0].scriptSig.size() < 2 || vin[0].scriptSig.size() > 100)
return state.DoS(100, error("CTransaction::CheckTransaction() : coinbase script size"));
}
else
{
BOOST_FOREACH(const CTxIn& txin, vin)
if (txin.prevout.IsNull())
return state.DoS(10, error("CTransaction::CheckTransaction() : prevout is null"));
}
return true;
}
int64 CTransaction::GetMinFee(unsigned int nBlockSize, bool fAllowFree,
enum GetMinFee_mode mode) const
{
// Base fee is either nMinTxFee or nMinRelayTxFee
int64 nBaseFee = (mode == GMF_RELAY) ? nMinRelayTxFee : nMinTxFee;
unsigned int nBytes = ::GetSerializeSize(*this, SER_NETWORK, PROTOCOL_VERSION);
unsigned int nNewBlockSize = nBlockSize + nBytes;
int64 nMinFee = (1 + (int64)nBytes / 1000) * nBaseFee;
if (fAllowFree)
{
if (nBlockSize == 1)
{
// Transactions under 10K are free
// (about 4500 MEC if made of 50 MEC inputs)
if (nBytes < 10000)
nMinFee = 0;
}
else
{
// Free transaction area
if (nNewBlockSize < 27000)
nMinFee = 0;
}
}
// To limit dust spam, require base fee if any output is less than 0.01
if (nMinFee < nBaseFee)
{
BOOST_FOREACH(const CTxOut& txout, vout)
if (txout.nValue < CENT)
nMinFee = nBaseFee;
}
// Raise the price as the block approaches full
if (nBlockSize != 1 && nNewBlockSize >= MAX_BLOCK_SIZE_GEN/2)
{
if (nNewBlockSize >= MAX_BLOCK_SIZE_GEN)
return MAX_MONEY;
nMinFee *= MAX_BLOCK_SIZE_GEN / (MAX_BLOCK_SIZE_GEN - nNewBlockSize);
}
if (!MoneyRange(nMinFee))
nMinFee = MAX_MONEY;
return nMinFee;
}
void CTxMemPool::pruneSpent(const uint256 &hashTx, CCoins &coins)
{
LOCK(cs);
std::map<COutPoint, CInPoint>::iterator it = mapNextTx.lower_bound(COutPoint(hashTx, 0));
// iterate over all COutPoints in mapNextTx whose hash equals the provided hashTx
while (it != mapNextTx.end() && it->first.hash == hashTx) {
coins.Spend(it->first.n); // and remove those outputs from coins
it++;
}
}
bool CTxMemPool::accept(CValidationState &state, CTransaction &tx, bool fCheckInputs, bool fLimitFree,
bool* pfMissingInputs)
{
if (pfMissingInputs)
*pfMissingInputs = false;
if (!tx.CheckTransaction(state))
return error("CTxMemPool::accept() : CheckTransaction failed");
// Coinbase is only valid in a block, not as a loose transaction
if (tx.IsCoinBase())
return state.DoS(100, error("CTxMemPool::accept() : coinbase as individual tx"));
// To help v0.1.5 clients who would see it as a negative number
if ((int64)tx.nLockTime > std::numeric_limits<int>::max())
return error("CTxMemPool::accept() : not accepting nLockTime beyond 2038 yet");
// Rather not work on nonstandard transactions (unless -testnet)
if (!fTestNet && !tx.IsStandard())
return error("CTxMemPool::accept() : nonstandard transaction type");
// is it already in the memory pool?
uint256 hash = tx.GetHash();
{
LOCK(cs);
if (mapTx.count(hash))
return false;
}
// Check for conflicts with in-memory transactions
CTransaction* ptxOld = NULL;
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
COutPoint outpoint = tx.vin[i].prevout;
if (mapNextTx.count(outpoint))
{
// Disable replacement feature for now
return false;
// Allow replacing with a newer version of the same transaction
if (i != 0)
return false;
ptxOld = mapNextTx[outpoint].ptx;
if (ptxOld->IsFinal())
return false;
if (!tx.IsNewerThan(*ptxOld))
return false;
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
COutPoint outpoint = tx.vin[i].prevout;
if (!mapNextTx.count(outpoint) || mapNextTx[outpoint].ptx != ptxOld)
return false;
}
break;
}
}
if (fCheckInputs)
{
CCoinsView dummy;
CCoinsViewCache view(dummy);
{
LOCK(cs);
CCoinsViewMemPool viewMemPool(*pcoinsTip, *this);
view.SetBackend(viewMemPool);
// do we already have it?
if (view.HaveCoins(hash))
return false;
// do all inputs exist?
// Note that this does not check for the presence of actual outputs (see the next check for that),
// only helps filling in pfMissingInputs (to determine missing vs spent).
BOOST_FOREACH(const CTxIn txin, tx.vin) {
if (!view.HaveCoins(txin.prevout.hash)) {
if (pfMissingInputs)
*pfMissingInputs = true;
return false;
}
}
// are the actual inputs available?
if (!tx.HaveInputs(view))
return state.Invalid(error("CTxMemPool::accept() : inputs already spent"));
// Bring the best block into scope
view.GetBestBlock();
// we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
view.SetBackend(dummy);
}
// Check for non-standard pay-to-script-hash in inputs
if (!tx.AreInputsStandard(view) && !fTestNet)
return error("CTxMemPool::accept() : nonstandard transaction input");
// Note: if you modify this code to accept non-standard transactions, then
// you should add code here to check that the transaction does a
// reasonable number of ECDSA signature verifications.
int64 nFees = tx.GetValueIn(view)-tx.GetValueOut();
unsigned int nSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION);
// Don't accept it if it can't get into a block
int64 txMinFee = tx.GetMinFee(1000, true, GMF_RELAY);
if (fLimitFree && nFees < txMinFee)
return error("CTxMemPool::accept() : not enough fees %s, %"PRI64d" < %"PRI64d,
hash.ToString().c_str(),
nFees, txMinFee);
// Continuously rate-limit free transactions
// This mitigates 'penny-flooding' -- sending thousands of free transactions just to
// be annoying or make others' transactions take longer to confirm.
if (fLimitFree && nFees < CTransaction::nMinRelayTxFee)
{
static double dFreeCount;
static int64 nLastTime;
int64 nNow = GetTime();
LOCK(cs);
// Use an exponentially decaying ~10-minute window:
dFreeCount *= pow(1.0 - 1.0/600.0, (double)(nNow - nLastTime));
nLastTime = nNow;
// -limitfreerelay unit is thousand-bytes-per-minute
// At default rate it would take over a month to fill 1GB
if (dFreeCount >= GetArg("-limitfreerelay", 15)*10*1000)
return error("CTxMemPool::accept() : free transaction rejected by rate limiter");
if (fDebug)
printf("Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount+nSize);
dFreeCount += nSize;
}
// Check against previous transactions
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
if (!tx.CheckInputs(state, view, true, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_STRICTENC))
{
return error("CTxMemPool::accept() : ConnectInputs failed %s", hash.ToString().c_str());
}
}
// Store transaction in memory
{
LOCK(cs);
if (ptxOld)
{
printf("CTxMemPool::accept() : replacing tx %s with new version\n", ptxOld->GetHash().ToString().c_str());
remove(*ptxOld);
}
addUnchecked(hash, tx);
}
///// are we sure this is ok when loading transactions or restoring block txes
// If updated, erase old tx from wallet
if (ptxOld)
EraseFromWallets(ptxOld->GetHash());
SyncWithWallets(hash, tx, NULL, true);
printf("CTxMemPool::accept() : accepted %s (poolsz %"PRIszu")\n",
hash.ToString().c_str(),
mapTx.size());
return true;
}
bool CTransaction::AcceptToMemoryPool(CValidationState &state, bool fCheckInputs, bool fLimitFree, bool* pfMissingInputs)
{
try {
return mempool.accept(state, *this, fCheckInputs, fLimitFree, pfMissingInputs);
} catch(std::runtime_error &e) {
return state.Abort(_("System error: ") + e.what());
}
}
bool CTxMemPool::addUnchecked(const uint256& hash, CTransaction &tx)
{
// Add to memory pool without checking anything. Don't call this directly,
// call CTxMemPool::accept to properly check the transaction first.
{
mapTx[hash] = tx;
for (unsigned int i = 0; i < tx.vin.size(); i++)
mapNextTx[tx.vin[i].prevout] = CInPoint(&mapTx[hash], i);
nTransactionsUpdated++;
}
return true;
}
bool CTxMemPool::remove(const CTransaction &tx, bool fRecursive)
{
// Remove transaction from memory pool
{
LOCK(cs);
uint256 hash = tx.GetHash();
if (mapTx.count(hash))
{
if (fRecursive) {
for (unsigned int i = 0; i < tx.vout.size(); i++) {
std::map<COutPoint, CInPoint>::iterator it = mapNextTx.find(COutPoint(hash, i));
if (it != mapNextTx.end())
remove(*it->second.ptx, true);
}
}
BOOST_FOREACH(const CTxIn& txin, tx.vin)
mapNextTx.erase(txin.prevout);
mapTx.erase(hash);
nTransactionsUpdated++;
}
}
return true;
}
bool CTxMemPool::removeConflicts(const CTransaction &tx)
{
// Remove transactions which depend on inputs of tx, recursively
LOCK(cs);
BOOST_FOREACH(const CTxIn &txin, tx.vin) {
std::map<COutPoint, CInPoint>::iterator it = mapNextTx.find(txin.prevout);
if (it != mapNextTx.end()) {
const CTransaction &txConflict = *it->second.ptx;
if (txConflict != tx)
remove(txConflict, true);
}
}
return true;
}
void CTxMemPool::clear()
{
LOCK(cs);
mapTx.clear();
mapNextTx.clear();
++nTransactionsUpdated;
}
void CTxMemPool::queryHashes(std::vector<uint256>& vtxid)
{
vtxid.clear();
LOCK(cs);
vtxid.reserve(mapTx.size());
for (map<uint256, CTransaction>::iterator mi = mapTx.begin(); mi != mapTx.end(); ++mi)
vtxid.push_back((*mi).first);
}
int CMerkleTx::GetDepthInMainChain(CBlockIndex* &pindexRet) const
{
if (hashBlock == 0 || nIndex == -1)
return 0;
// Find the block it claims to be in
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashBlock);
if (mi == mapBlockIndex.end())
return 0;
CBlockIndex* pindex = (*mi).second;
if (!pindex || !pindex->IsInMainChain())
return 0;
// Make sure the merkle branch connects to this block
if (!fMerkleVerified)
{
if (CBlock::CheckMerkleBranch(GetHash(), vMerkleBranch, nIndex) != pindex->hashMerkleRoot)
return 0;
fMerkleVerified = true;
}
pindexRet = pindex;
return pindexBest->nHeight - pindex->nHeight + 1;
}
int CMerkleTx::GetBlocksToMaturity() const
{
if (!IsCoinBase())
return 0;
return max(0, (COINBASE_MATURITY+20) - GetDepthInMainChain());
}
bool CMerkleTx::AcceptToMemoryPool(bool fCheckInputs, bool fLimitFree)
{
CValidationState state;
return CTransaction::AcceptToMemoryPool(state, fCheckInputs, fLimitFree);
}
bool CWalletTx::AcceptWalletTransaction(bool fCheckInputs)
{
{
LOCK(mempool.cs);
// Add previous supporting transactions first
BOOST_FOREACH(CMerkleTx& tx, vtxPrev)
{
if (!tx.IsCoinBase())
{
uint256 hash = tx.GetHash();
if (!mempool.exists(hash) && pcoinsTip->HaveCoins(hash))
tx.AcceptToMemoryPool(fCheckInputs, false);
}
}
return AcceptToMemoryPool(fCheckInputs, false);
}
return false;
}
// Return transaction in tx, and if it was found inside a block, its hash is placed in hashBlock
bool GetTransaction(const uint256 &hash, CTransaction &txOut, uint256 &hashBlock, bool fAllowSlow)
{
CBlockIndex *pindexSlow = NULL;
{
LOCK(cs_main);
{
LOCK(mempool.cs);
if (mempool.exists(hash))
{
txOut = mempool.lookup(hash);
return true;
}
}
if (fTxIndex) {
CDiskTxPos postx;
if (pblocktree->ReadTxIndex(hash, postx)) {
CAutoFile file(OpenBlockFile(postx, true), SER_DISK, CLIENT_VERSION);
CBlockHeader header;
try {
file >> header;
fseek(file, postx.nTxOffset, SEEK_CUR);
file >> txOut;
} catch (std::exception &e) {
return error("%s() : deserialize or I/O error", __PRETTY_FUNCTION__);
}
hashBlock = header.GetHash();
if (txOut.GetHash() != hash)
return error("%s() : txid mismatch", __PRETTY_FUNCTION__);
return true;
}
}
if (fAllowSlow) { // use coin database to locate block that contains transaction, and scan it
int nHeight = -1;
{
CCoinsViewCache &view = *pcoinsTip;
CCoins coins;
if (view.GetCoins(hash, coins))
nHeight = coins.nHeight;
}
if (nHeight > 0)
pindexSlow = FindBlockByHeight(nHeight);
}
}
if (pindexSlow) {
CBlock block;
if (block.ReadFromDisk(pindexSlow)) {
BOOST_FOREACH(const CTransaction &tx, block.vtx) {
if (tx.GetHash() == hash) {
txOut = tx;
hashBlock = pindexSlow->GetBlockHash();
return true;
}
}
}
}
return false;
}
//////////////////////////////////////////////////////////////////////////////
//
// CBlock and CBlockIndex
//
static CBlockIndex* pblockindexFBBHLast;
CBlockIndex* FindBlockByHeight(int nHeight)
{
if (nHeight >= (int)vBlockIndexByHeight.size())
return NULL;
return vBlockIndexByHeight[nHeight];
}
bool CBlock::ReadFromDisk(const CBlockIndex* pindex)
{
if (!ReadFromDisk(pindex->GetBlockPos()))
return false;
if (GetHash() != pindex->GetBlockHash())
return error("CBlock::ReadFromDisk() : GetHash() doesn't match index");
return true;
}
uint256 static GetOrphanRoot(const CBlockHeader* pblock)
{
// Work back to the first block in the orphan chain
while (mapOrphanBlocks.count(pblock->hashPrevBlock))
pblock = mapOrphanBlocks[pblock->hashPrevBlock];
return pblock->GetHash();
}
int64 static GetBlockValue(int nHeight, int64 nFees)
{
int64 nSubsidy = 500 * COIN;
/*
Total Coins: 42 Million
* 1st 5 Months, 21 Million Coins will be generated
Every 21,000 Blocks (1 Month) the reward steps down from 500, 250, 125, 75, 50.
* Through the next few decades the Remaining 21 Million will be generated
Every 420,000 Blocks (2 Years), The reward starts at 25 and is Halved each period
10.5 Million come from first 2 Years of 420K Blocks
*/
int BlockCountA = 21000;
int BlockCountB = 420000;
if (nHeight >= BlockCountA * 5)
{
nSubsidy = 25 * COIN;
nSubsidy >>= ((nHeight - (BlockCountA * 5)) / (BlockCountB)); // Subsidy is cut in half every 420000 blocks
}
else if (nHeight >= BlockCountA * 4) { nSubsidy = 50 * COIN; }
else if (nHeight >= BlockCountA * 3) { nSubsidy = 75 * COIN; }
else if (nHeight >= BlockCountA * 2) { nSubsidy = 125 * COIN; }
else if (nHeight >= BlockCountA) { nSubsidy = 250 * COIN; }
else { nSubsidy = 500 * COIN; }
return nSubsidy + nFees;
}
static const int64 nTargetSpacing = 2.5 * 60; // 2.5 minutes
static const int64 nOriginalInterval = 2016;
static const int64 nFilteredInterval = 9;
static const int64 nOriginalTargetTimespan = nOriginalInterval * nTargetSpacing; // 3.5 days
static const int64 nFilteredTargetTimespan = nFilteredInterval * nTargetSpacing; // 22.5 minutes
//
// minimum amount of work that could possibly be required nTime after
// minimum work required was nBase
//
unsigned int ComputeMinWork(unsigned int nBase, int64 nTime)
{
// Testnet has min-difficulty blocks
// after nTargetSpacing*2 time between blocks:
if (fTestNet && nTime > nTargetSpacing*2)
return bnProofOfWorkLimit.GetCompact();
CBigNum bnResult;
bnResult.SetCompact(nBase);
while (nTime > 0 && bnResult < bnProofOfWorkLimit)
{
// Maximum 400% adjustment...
bnResult *= 4;
// ... in best-case exactly 4-times-normal target time
nTime -= nOriginalTargetTimespan*4;
}
if (bnResult > bnProofOfWorkLimit)
bnResult = bnProofOfWorkLimit;
return bnResult.GetCompact();
}
unsigned int static GetNextWorkRequired_V1(const CBlockIndex* pindexLast, const CBlockHeader *pblock)
{
/* previous difficulty formula, ported from freicoin */
static unsigned int nProofOfWorkLimit = bnProofOfWorkLimit.GetCompact();
#define WINDOW 144
static double kOne = 1.0;
static double kTwoToTheThirtyOne = double(2147483648);
static double kGain = 0.025;
static double kLimiterUp = 1.055;
static double kLimiterDown = double(200) / double(211);
static double kTargetInterval = double(nTargetSpacing);
static int32_t kFilterCoeff[WINDOW] = {
-845859, -459003, -573589, -703227, -848199, -1008841,
-1183669, -1372046, -1573247, -1787578, -2011503, -2243311,
-2482346, -2723079, -2964681, -3202200, -3432186, -3650186,
-3851924, -4032122, -4185340, -4306430, -4389146, -4427786,
-4416716, -4349289, -4220031, -4022692, -3751740, -3401468,
-2966915, -2443070, -1825548, -1110759, -295281, 623307,
1646668, 2775970, 4011152, 5351560, 6795424, 8340274,
9982332, 11717130, 13539111, 15441640, 17417389, 19457954,
21554056, 23695744, 25872220, 28072119, 30283431, 32493814,
34690317, 36859911, 38989360, 41065293, 43074548, 45004087,
46841170, 48573558, 50189545, 51678076, 53028839, 54232505,
55280554, 56165609, 56881415, 57422788, 57785876, 57968085,
57968084, 57785876, 57422788, 56881415, 56165609, 55280554,
54232505, 53028839, 51678076, 50189545, 48573558, 46841170,
45004087, 43074548, 41065293, 38989360, 36859911, 34690317,
32493814, 30283431, 28072119, 25872220, 23695744, 21554057,
19457953, 17417389, 15441640, 13539111, 11717130, 9982332,
8340274, 6795424, 5351560, 4011152, 2775970, 1646668,
623307, -295281, -1110759, -1825548, -2443070, -2966915,
-3401468, -3751740, -4022692, -4220031, -4349289, -4416715,
-4427787, -4389146, -4306430, -4185340, -4032122, -3851924,
-3650186, -3432186, -3202200, -2964681, -2723079, -2482346,
-2243311, -2011503, -1787578, -1573247, -1372046, -1183669,
-1008841, -848199, -703227, -573589, -459003, -845858
};
// Genesis block
if (pindexLast == NULL)
return nProofOfWorkLimit;
bool fUseFilter =
(fTestNet && pindexLast->nHeight>=(DIFF_FILTER_THRESHOLD_TESTNET-1)) ||
(!fTestNet && pindexLast->nHeight>=(DIFF_FILTER_THRESHOLD-1));
int64 nInterval = nFilteredInterval;
int64 nTargetTimespan = nFilteredTargetTimespan;
if ( !fUseFilter ) {
nInterval = nOriginalInterval;
nTargetTimespan = nOriginalTargetTimespan;
}
// Only change once per interval
if ( (fUseFilter && (pindexLast->nHeight+1) % nInterval != 0) ||
(!fUseFilter && (pindexLast->nHeight+1) % 2016 != 0))
{
// Special difficulty rule for testnet:
if (fTestNet)
{
// If the new block's timestamp is more than nTargetSpacing*2
// then allow mining of a min-difficulty block.
if (pblock->nTime > pindexLast->nTime + nTargetSpacing*2)
return nProofOfWorkLimit;
else
{
// Return the last non-special-min-difficulty-rules-block
const CBlockIndex* pindex = pindexLast;
while (pindex->pprev && pindex->nHeight % nInterval != 0 && pindex->nBits == nProofOfWorkLimit)
pindex = pindex->pprev;
return pindex->nBits;
}
}
return pindexLast->nBits;
}
double dAdjustmentFactor;
if ( fUseFilter ) {
int32_t vTimeDelta[WINDOW];
size_t idx = 0;
const CBlockIndex *pitr = pindexLast;
for ( ; idx!=WINDOW && pitr && pitr->pprev; ++idx, pitr=pitr->pprev )
vTimeDelta[idx] = (int32_t)(pitr->GetBlockTime() - pitr->pprev->GetBlockTime());
for ( ; idx!=WINDOW; ++idx )
vTimeDelta[idx] = (int32_t)nTargetSpacing;
int64_t vFilteredTime = 0;
for ( idx=0; idx<WINDOW; ++idx )
vFilteredTime += (int64_t)kFilterCoeff[idx] * (int64_t)vTimeDelta[idx];
double dFilteredInterval = double(vFilteredTime) / kTwoToTheThirtyOne;
dAdjustmentFactor = kOne - kGain * (dFilteredInterval - kTargetInterval) / kTargetInterval;
if ( dAdjustmentFactor > kLimiterUp ) {
dAdjustmentFactor = kLimiterUp;
}
else if ( dAdjustmentFactor < kLimiterDown ) {
dAdjustmentFactor = kLimiterDown;
}
} else {
// This fixes an issue where a 51% attack can change difficulty at will.
// Go back the full period unless it's the first retarget after genesis.
// Code courtesy of Art Forz
int blockstogoback = nInterval-1;
if ((pindexLast->nHeight+1) != nInterval)
blockstogoback = nInterval;
// Go back by what we want to be 14 days worth of blocks
const CBlockIndex* pindexFirst = pindexLast;
for (int i = 0; pindexFirst && i < blockstogoback; i++)
pindexFirst = pindexFirst->pprev;
assert(pindexFirst);
// Limit adjustment step
int64 nActualTimespan = pindexLast->GetBlockTime() - pindexFirst->GetBlockTime();
printf(" nActualTimespan = %"PRI64d" before bounds\n", nActualTimespan);
if (nActualTimespan < nTargetTimespan/4)
nActualTimespan = nTargetTimespan/4;
if (nActualTimespan > nTargetTimespan*4)
nActualTimespan = nTargetTimespan*4;
dAdjustmentFactor = double(nTargetTimespan) /
double(nActualTimespan);
}
// Retarget
CBigNum bnNew;
bnNew.SetCompact(pindexLast->nBits);
int64 dAdjustmentFactorNumerator;
int64 dAdjustmentFactorDenominator;
DoubleToNumeratorDenominator(dAdjustmentFactor, &dAdjustmentFactorNumerator, &dAdjustmentFactorDenominator);
bnNew *= dAdjustmentFactorDenominator;
bnNew /= dAdjustmentFactorNumerator;
if (bnNew > bnProofOfWorkLimit)
bnNew = bnProofOfWorkLimit;
/// debug print
printf("GetNextWorkRequired RETARGET\n");
printf("dAdjustmentFactor = %g\n", dAdjustmentFactor);
printf("Before: %08x %s\n", pindexLast->nBits, CBigNum().SetCompact(pindexLast->nBits).getuint256().ToString().c_str());
printf("After: %08x %s\n", bnNew.GetCompact(), bnNew.getuint256().ToString().c_str());
return bnNew.GetCompact();
}
unsigned int static KimotoGravityWell(const CBlockIndex* pindexLast, const CBlockHeader *pblock, uint64 TargetBlocksSpacingSeconds, uint64 PastBlocksMin, uint64 PastBlocksMax) {
/* current difficulty formula, megacoin - kimoto gravity well */
const CBlockIndex *BlockLastSolved = pindexLast;
const CBlockIndex *BlockReading = pindexLast;
const CBlockHeader *BlockCreating = pblock;
BlockCreating = BlockCreating;
uint64 PastBlocksMass = 0;
int64 PastRateActualSeconds = 0;
int64 PastRateTargetSeconds = 0;
double PastRateAdjustmentRatio = double(1);
CBigNum PastDifficultyAverage;
CBigNum PastDifficultyAveragePrev;
double EventHorizonDeviation;
double EventHorizonDeviationFast;
double EventHorizonDeviationSlow;
if (BlockLastSolved == NULL || BlockLastSolved->nHeight == 0 || (uint64)BlockLastSolved->nHeight < PastBlocksMin) { return bnProofOfWorkLimit.GetCompact(); }
for (unsigned int i = 1; BlockReading && BlockReading->nHeight > 0; i++) {
if (PastBlocksMax > 0 && i > PastBlocksMax) { break; }
PastBlocksMass++;
if (i == 1) { PastDifficultyAverage.SetCompact(BlockReading->nBits); }
else { PastDifficultyAverage = ((CBigNum().SetCompact(BlockReading->nBits) - PastDifficultyAveragePrev) / i) + PastDifficultyAveragePrev; }
PastDifficultyAveragePrev = PastDifficultyAverage;
PastRateActualSeconds = BlockLastSolved->GetBlockTime() - BlockReading->GetBlockTime();
PastRateTargetSeconds = TargetBlocksSpacingSeconds * PastBlocksMass;
PastRateAdjustmentRatio = double(1);
if (PastRateActualSeconds < 0) { PastRateActualSeconds = 0; }
if (PastRateActualSeconds != 0 && PastRateTargetSeconds != 0) {
PastRateAdjustmentRatio = double(PastRateTargetSeconds) / double(PastRateActualSeconds);
}
EventHorizonDeviation = 1 + (0.7084 * pow((double(PastBlocksMass)/double(144)), -1.228));
EventHorizonDeviationFast = EventHorizonDeviation;
EventHorizonDeviationSlow = 1 / EventHorizonDeviation;
if (PastBlocksMass >= PastBlocksMin) {
if ((PastRateAdjustmentRatio <= EventHorizonDeviationSlow) || (PastRateAdjustmentRatio >= EventHorizonDeviationFast)) { assert(BlockReading); break; }
}
if (BlockReading->pprev == NULL) { assert(BlockReading); break; }
BlockReading = BlockReading->pprev;
}
CBigNum bnNew(PastDifficultyAverage);
if (PastRateActualSeconds != 0 && PastRateTargetSeconds != 0) {
bnNew *= PastRateActualSeconds;
bnNew /= PastRateTargetSeconds;
}
if (bnNew > bnProofOfWorkLimit) { bnNew = bnProofOfWorkLimit; }
/// debug print
printf("Difficulty Retarget - Kimoto Gravity Well\n");
printf("PastRateAdjustmentRatio = %g\n", PastRateAdjustmentRatio);
printf("Before: %08x %s\n", BlockLastSolved->nBits, CBigNum().SetCompact(BlockLastSolved->nBits).getuint256().ToString().c_str());
printf("After: %08x %s\n", bnNew.GetCompact(), bnNew.getuint256().ToString().c_str());
return bnNew.GetCompact();
}
unsigned int static GetNextWorkRequired_V2(const CBlockIndex* pindexLast, const CBlockHeader *pblock)
{
static const int64 BlocksTargetSpacing = 2.5 * 60; // 2.5 minutes
unsigned int TimeDaySeconds = 60 * 60 * 24;
int64 PastSecondsMin = TimeDaySeconds * 0.25;
int64 PastSecondsMax = TimeDaySeconds * 7;
uint64 PastBlocksMin = PastSecondsMin / BlocksTargetSpacing;
uint64 PastBlocksMax = PastSecondsMax / BlocksTargetSpacing;
return KimotoGravityWell(pindexLast, pblock, BlocksTargetSpacing, PastBlocksMin, PastBlocksMax);
}
unsigned int static GetNextWorkRequired(const CBlockIndex* pindexLast, const CBlockHeader *pblock)
{
int DiffMode = 1;
if (fTestNet) {
if (pindexLast->nHeight+1 >= 2237) { DiffMode = 2; }
}
else {
if (pindexLast->nHeight+1 >= 62773) { DiffMode = 2; }
}
if (DiffMode == 1) { return GetNextWorkRequired_V1(pindexLast, pblock); }
else if (DiffMode == 2) { return GetNextWorkRequired_V2(pindexLast, pblock); }
return GetNextWorkRequired_V2(pindexLast, pblock);
}
bool CheckProofOfWork(uint256 hash, unsigned int nBits)
{
CBigNum bnTarget;
bnTarget.SetCompact(nBits);
// Check range
if (bnTarget <= 0 || bnTarget > bnProofOfWorkLimit)
return error("CheckProofOfWork() : nBits below minimum work");
// Check proof of work matches claimed amount
if (hash > bnTarget.getuint256())
return error("CheckProofOfWork() : hash doesn't match nBits");
return true;
}
// Return maximum amount of blocks that other nodes claim to have
int GetNumBlocksOfPeers()
{
return std::max(cPeerBlockCounts.median(), Checkpoints::GetTotalBlocksEstimate());
}
bool IsInitialBlockDownload()
{
if (pindexBest == NULL || fImporting || fReindex || nBestHeight < Checkpoints::GetTotalBlocksEstimate())
return true;
static int64 nLastUpdate;
static CBlockIndex* pindexLastBest;
if (pindexBest != pindexLastBest)
{
pindexLastBest = pindexBest;
nLastUpdate = GetTime();
}
return (GetTime() - nLastUpdate < 10 &&
pindexBest->GetBlockTime() < GetTime() - 24 * 60 * 60);
}
void static InvalidChainFound(CBlockIndex* pindexNew)
{
if (pindexNew->nChainWork > nBestInvalidWork)
{
nBestInvalidWork = pindexNew->nChainWork;
pblocktree->WriteBestInvalidWork(CBigNum(nBestInvalidWork));
uiInterface.NotifyBlocksChanged();
}
printf("InvalidChainFound: invalid block=%s height=%d log2_work=%.8g date=%s\n",
pindexNew->GetBlockHash().ToString().c_str(), pindexNew->nHeight,
log(pindexNew->nChainWork.getdouble())/log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
pindexNew->GetBlockTime()).c_str());
printf("InvalidChainFound: current best=%s height=%d log2_work=%.8g date=%s\n",
hashBestChain.ToString().c_str(), nBestHeight, log(nBestChainWork.getdouble())/log(2.0),
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexBest->GetBlockTime()).c_str());
if (pindexBest && nBestInvalidWork > nBestChainWork + (pindexBest->GetBlockWork() * 6).getuint256())
printf("InvalidChainFound: Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.\n");
}
void static InvalidBlockFound(CBlockIndex *pindex) {
pindex->nStatus |= BLOCK_FAILED_VALID;
pblocktree->WriteBlockIndex(CDiskBlockIndex(pindex));
setBlockIndexValid.erase(pindex);
InvalidChainFound(pindex);
if (pindex->GetNextInMainChain()) {
CValidationState stateDummy;
ConnectBestBlock(stateDummy); // reorganise away from the failed block
}
}
bool ConnectBestBlock(CValidationState &state) {
do {
CBlockIndex *pindexNewBest;
{
std::set<CBlockIndex*,CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexValid.rbegin();
if (it == setBlockIndexValid.rend())
return true;
pindexNewBest = *it;
}
if (pindexNewBest == pindexBest || (pindexBest && pindexNewBest->nChainWork == pindexBest->nChainWork))
return true; // nothing to do
// check ancestry
CBlockIndex *pindexTest = pindexNewBest;
std::vector<CBlockIndex*> vAttach;
do {
if (pindexTest->nStatus & BLOCK_FAILED_MASK) {
// mark descendants failed
CBlockIndex *pindexFailed = pindexNewBest;
while (pindexTest != pindexFailed) {
pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
setBlockIndexValid.erase(pindexFailed);
pblocktree->WriteBlockIndex(CDiskBlockIndex(pindexFailed));
pindexFailed = pindexFailed->pprev;
}
InvalidChainFound(pindexNewBest);
break;
}
if (pindexBest == NULL || pindexTest->nChainWork > pindexBest->nChainWork)
vAttach.push_back(pindexTest);
if (pindexTest->pprev == NULL || pindexTest->GetNextInMainChain()) {
reverse(vAttach.begin(), vAttach.end());
BOOST_FOREACH(CBlockIndex *pindexSwitch, vAttach) {
boost::this_thread::interruption_point();
try {
if (!SetBestChain(state, pindexSwitch))
return false;
} catch(std::runtime_error &e) {
return state.Abort(_("System error: ") + e.what());
}
}
return true;
}
pindexTest = pindexTest->pprev;
} while(true);
} while(true);
}
void CBlockHeader::UpdateTime(const CBlockIndex* pindexPrev)
{
nTime = max(pindexPrev->GetMedianTimePast()+1, GetAdjustedTime());
// Updating time can change work required on testnet:
if (fTestNet)
nBits = GetNextWorkRequired(pindexPrev, this);
}
const CTxOut &CTransaction::GetOutputFor(const CTxIn& input, CCoinsViewCache& view)
{
const CCoins &coins = view.GetCoins(input.prevout.hash);
assert(coins.IsAvailable(input.prevout.n));
return coins.vout[input.prevout.n];
}
int64 CTransaction::GetValueIn(CCoinsViewCache& inputs) const
{
if (IsCoinBase())
return 0;
int64 nResult = 0;
for (unsigned int i = 0; i < vin.size(); i++)
nResult += GetOutputFor(vin[i], inputs).nValue;
return nResult;
}
unsigned int CTransaction::GetP2SHSigOpCount(CCoinsViewCache& inputs) const
{
if (IsCoinBase())
return 0;
unsigned int nSigOps = 0;
for (unsigned int i = 0; i < vin.size(); i++)
{
const CTxOut &prevout = GetOutputFor(vin[i], inputs);
if (prevout.scriptPubKey.IsPayToScriptHash())
nSigOps += prevout.scriptPubKey.GetSigOpCount(vin[i].scriptSig);
}
return nSigOps;
}
void CTransaction::UpdateCoins(CValidationState &state, CCoinsViewCache &inputs, CTxUndo &txundo, int nHeight, const uint256 &txhash) const
{
// mark inputs spent
if (!IsCoinBase()) {
BOOST_FOREACH(const CTxIn &txin, vin) {
CCoins &coins = inputs.GetCoins(txin.prevout.hash);
CTxInUndo undo;
assert(coins.Spend(txin.prevout, undo));
txundo.vprevout.push_back(undo);
}
}
// add outputs
assert(inputs.SetCoins(txhash, CCoins(*this, nHeight)));
}
bool CTransaction::HaveInputs(CCoinsViewCache &inputs) const
{
if (!IsCoinBase()) {
// first check whether information about the prevout hash is available
for (unsigned int i = 0; i < vin.size(); i++) {
const COutPoint &prevout = vin[i].prevout;
if (!inputs.HaveCoins(prevout.hash))
return false;
}
// then check whether the actual outputs are available
for (unsigned int i = 0; i < vin.size(); i++) {
const COutPoint &prevout = vin[i].prevout;
const CCoins &coins = inputs.GetCoins(prevout.hash);
if (!coins.IsAvailable(prevout.n))
return false;
}
}
return true;
}
bool CScriptCheck::operator()() const {
const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
if (!VerifyScript(scriptSig, scriptPubKey, *ptxTo, nIn, nFlags, nHashType))
return error("CScriptCheck() : %s VerifySignature failed", ptxTo->GetHash().ToString().c_str());
return true;
}
bool VerifySignature(const CCoins& txFrom, const CTransaction& txTo, unsigned int nIn, unsigned int flags, int nHashType)
{
return CScriptCheck(txFrom, txTo, nIn, flags, nHashType)();
}
bool CTransaction::CheckInputs(CValidationState &state, CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, std::vector<CScriptCheck> *pvChecks) const
{
if (!IsCoinBase())
{
if (pvChecks)
pvChecks->reserve(vin.size());
// This doesn't trigger the DoS code on purpose; if it did, it would make it easier
// for an attacker to attempt to split the network.
if (!HaveInputs(inputs))
return state.Invalid(error("CheckInputs() : %s inputs unavailable", GetHash().ToString().c_str()));
// While checking, GetBestBlock() refers to the parent block.
// This is also true for mempool checks.
int nSpendHeight = inputs.GetBestBlock()->nHeight + 1;
int64 nValueIn = 0;
int64 nFees = 0;
for (unsigned int i = 0; i < vin.size(); i++)
{
const COutPoint &prevout = vin[i].prevout;
const CCoins &coins = inputs.GetCoins(prevout.hash);
// If prev is coinbase, check that it's matured
if (coins.IsCoinBase()) {
if (nSpendHeight - coins.nHeight < COINBASE_MATURITY)
return state.Invalid(error("CheckInputs() : tried to spend coinbase at depth %d", nSpendHeight - coins.nHeight));
}
// Check for negative or overflow input values
nValueIn += coins.vout[prevout.n].nValue;
if (!MoneyRange(coins.vout[prevout.n].nValue) || !MoneyRange(nValueIn))
return state.DoS(100, error("CheckInputs() : txin values out of range"));
}
if (nValueIn < GetValueOut())
return state.DoS(100, error("CheckInputs() : %s value in < value out", GetHash().ToString().c_str()));
// Tally transaction fees
int64 nTxFee = nValueIn - GetValueOut();
if (nTxFee < 0)
return state.DoS(100, error("CheckInputs() : %s nTxFee < 0", GetHash().ToString().c_str()));
nFees += nTxFee;
if (!MoneyRange(nFees))
return state.DoS(100, error("CheckInputs() : nFees out of range"));
// The first loop above does all the inexpensive checks.
// Only if ALL inputs pass do we perform expensive ECDSA signature checks.
// Helps prevent CPU exhaustion attacks.
// Skip ECDSA signature verification when connecting blocks
// before the last block chain checkpoint. This is safe because block merkle hashes are
// still computed and checked, and any change will be caught at the next checkpoint.
if (fScriptChecks) {
for (unsigned int i = 0; i < vin.size(); i++) {
const COutPoint &prevout = vin[i].prevout;
const CCoins &coins = inputs.GetCoins(prevout.hash);
// Verify signature
CScriptCheck check(coins, *this, i, flags, 0);
if (pvChecks) {
pvChecks->push_back(CScriptCheck());
check.swap(pvChecks->back());
} else if (!check()) {
if (flags & SCRIPT_VERIFY_STRICTENC) {
// For now, check whether the failure was caused by non-canonical
// encodings or not; if so, don't trigger DoS protection.
CScriptCheck check(coins, *this, i, flags & (~SCRIPT_VERIFY_STRICTENC), 0);
if (check())
return state.Invalid();
}
return state.DoS(100,false);
}
}
}
}
return true;
}
bool CBlock::DisconnectBlock(CValidationState &state, CBlockIndex *pindex, CCoinsViewCache &view, bool *pfClean)
{
assert(pindex == view.GetBestBlock());
if (pfClean)
*pfClean = false;
bool fClean = true;
CBlockUndo blockUndo;
CDiskBlockPos pos = pindex->GetUndoPos();
if (pos.IsNull())
return error("DisconnectBlock() : no undo data available");
if (!blockUndo.ReadFromDisk(pos, pindex->pprev->GetBlockHash()))
return error("DisconnectBlock() : failure reading undo data");
if (blockUndo.vtxundo.size() + 1 != vtx.size())
return error("DisconnectBlock() : block and undo data inconsistent");
// undo transactions in reverse order
for (int i = vtx.size() - 1; i >= 0; i--) {
const CTransaction &tx = vtx[i];
uint256 hash = tx.GetHash();
// check that all outputs are available
if (!view.HaveCoins(hash)) {
fClean = fClean && error("DisconnectBlock() : outputs still spent? database corrupted");
view.SetCoins(hash, CCoins());
}
CCoins &outs = view.GetCoins(hash);
CCoins outsBlock = CCoins(tx, pindex->nHeight);
if (outs != outsBlock)
fClean = fClean && error("DisconnectBlock() : added transaction mismatch? database corrupted");
// remove outputs
outs = CCoins();
// restore inputs
if (i > 0) { // not coinbases
const CTxUndo &txundo = blockUndo.vtxundo[i-1];
if (txundo.vprevout.size() != tx.vin.size())
return error("DisconnectBlock() : transaction and undo data inconsistent");
for (unsigned int j = tx.vin.size(); j-- > 0;) {
const COutPoint &out = tx.vin[j].prevout;
const CTxInUndo &undo = txundo.vprevout[j];
CCoins coins;
view.GetCoins(out.hash, coins); // this can fail if the prevout was already entirely spent
if (undo.nHeight != 0) {
// undo data contains height: this is the last output of the prevout tx being spent
if (!coins.IsPruned())
fClean = fClean && error("DisconnectBlock() : undo data overwriting existing transaction");
coins = CCoins();
coins.fCoinBase = undo.fCoinBase;
coins.nHeight = undo.nHeight;
coins.nVersion = undo.nVersion;
} else {
if (coins.IsPruned())
fClean = fClean && error("DisconnectBlock() : undo data adding output to missing transaction");
}
if (coins.IsAvailable(out.n))
fClean = fClean && error("DisconnectBlock() : undo data overwriting existing output");
if (coins.vout.size() < out.n+1)
coins.vout.resize(out.n+1);
coins.vout[out.n] = undo.txout;
if (!view.SetCoins(out.hash, coins))
return error("DisconnectBlock() : cannot restore coin inputs");
}
}
}
// move best block pointer to prevout block
view.SetBestBlock(pindex->pprev);
if (pfClean) {
*pfClean = fClean;
return true;
} else {
return fClean;
}
}
void static FlushBlockFile(bool fFinalize = false)
{
LOCK(cs_LastBlockFile);
CDiskBlockPos posOld(nLastBlockFile, 0);
FILE *fileOld = OpenBlockFile(posOld);
if (fileOld) {
if (fFinalize)
TruncateFile(fileOld, infoLastBlockFile.nSize);
FileCommit(fileOld);
fclose(fileOld);
}
fileOld = OpenUndoFile(posOld);
if (fileOld) {
if (fFinalize)
TruncateFile(fileOld, infoLastBlockFile.nUndoSize);
FileCommit(fileOld);
fclose(fileOld);
}
}
bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize);
static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
void ThreadScriptCheck() {
RenameThread("megacoin-scriptch");
scriptcheckqueue.Thread();
}
bool CBlock::ConnectBlock(CValidationState &state, CBlockIndex* pindex, CCoinsViewCache &view, bool fJustCheck)
{
// Check it again in case a previous version let a bad block in
if (!CheckBlock(state, !fJustCheck, !fJustCheck))
return false;
// verify that the view's current state corresponds to the previous block
assert(pindex->pprev == view.GetBestBlock());
// Special case for the genesis block, skipping connection of its transactions
// (its coinbase is unspendable)
if (GetHash() == hashGenesisBlock) {
view.SetBestBlock(pindex);
pindexGenesisBlock = pindex;
return true;
}
bool fScriptChecks = pindex->nHeight >= Checkpoints::GetTotalBlocksEstimate();
// Do not allow blocks that contain transactions which 'overwrite' older transactions,
// unless those are already completely spent.
// If such overwrites are allowed, coinbases and transactions depending upon those
// can be duplicated to remove the ability to spend the first instance -- even after
// being sent to another address.
// See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information.
// This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
// already refuses previously-known transaction ids entirely.
// This rule was originally applied all blocks whose timestamp was after March 15, 2012, 0:00 UTC.
// Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
// two in the chain that violate it. This prevents exploiting the issue against nodes in their
// initial block download.
bool fEnforceBIP30 = (!pindex->phashBlock) || // Enforce on CreateNewBlock invocations which don't have a hash.
!((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
(pindex->nHeight==91880 && pindex->GetBlockHash() == uint256("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
if (fEnforceBIP30) {
for (unsigned int i=0; i<vtx.size(); i++) {
uint256 hash = GetTxHash(i);
if (view.HaveCoins(hash) && !view.GetCoins(hash).IsPruned())
return state.DoS(100, error("ConnectBlock() : tried to overwrite transaction"));
}
}
// BIP16 didn't become active until Apr 1 2012
int64 nBIP16SwitchTime = 1333238400;
bool fStrictPayToScriptHash = (pindex->nTime >= nBIP16SwitchTime);
unsigned int flags = SCRIPT_VERIFY_NOCACHE |
(fStrictPayToScriptHash ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE);
CBlockUndo blockundo;
CCheckQueueControl<CScriptCheck> control(fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : NULL);
int64 nStart = GetTimeMicros();
int64 nFees = 0;
int nInputs = 0;
unsigned int nSigOps = 0;
CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(vtx.size()));
std::vector<std::pair<uint256, CDiskTxPos> > vPos;
vPos.reserve(vtx.size());
for (unsigned int i=0; i<vtx.size(); i++)
{
const CTransaction &tx = vtx[i];
nInputs += tx.vin.size();
nSigOps += tx.GetLegacySigOpCount();
if (nSigOps > MAX_BLOCK_SIGOPS)
return state.DoS(100, error("ConnectBlock() : too many sigops"));
if (!tx.IsCoinBase())
{
if (!tx.HaveInputs(view))
return state.DoS(100, error("ConnectBlock() : inputs missing/spent"));
if (fStrictPayToScriptHash)
{
// Add in sigops done by pay-to-script-hash inputs;
// this is to prevent a "rogue miner" from creating
// an incredibly-expensive-to-validate block.
nSigOps += tx.GetP2SHSigOpCount(view);
if (nSigOps > MAX_BLOCK_SIGOPS)
return state.DoS(100, error("ConnectBlock() : too many sigops"));
}
nFees += tx.GetValueIn(view)-tx.GetValueOut();
std::vector<CScriptCheck> vChecks;
if (!tx.CheckInputs(state, view, fScriptChecks, flags, nScriptCheckThreads ? &vChecks : NULL))
return false;
control.Add(vChecks);
}
CTxUndo txundo;
tx.UpdateCoins(state, view, txundo, pindex->nHeight, GetTxHash(i));
if (!tx.IsCoinBase())
blockundo.vtxundo.push_back(txundo);
vPos.push_back(std::make_pair(GetTxHash(i), pos));
pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION);
}
int64 nTime = GetTimeMicros() - nStart;
if (fBenchmark)
printf("- Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin)\n", (unsigned)vtx.size(), 0.001 * nTime, 0.001 * nTime / vtx.size(), nInputs <= 1 ? 0 : 0.001 * nTime / (nInputs-1));
if (vtx[0].GetValueOut() > GetBlockValue(pindex->nHeight, nFees))
return state.DoS(100, error("ConnectBlock() : coinbase pays too much (actual=%"PRI64d" vs limit=%"PRI64d")", vtx[0].GetValueOut(), GetBlockValue(pindex->nHeight, nFees)));
if (!control.Wait())
return state.DoS(100, false);
int64 nTime2 = GetTimeMicros() - nStart;
if (fBenchmark)
printf("- Verify %u txins: %.2fms (%.3fms/txin)\n", nInputs - 1, 0.001 * nTime2, nInputs <= 1 ? 0 : 0.001 * nTime2 / (nInputs-1));
if (fJustCheck)
return true;
// Write undo information to disk
if (pindex->GetUndoPos().IsNull() || (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS)
{
if (pindex->GetUndoPos().IsNull()) {
CDiskBlockPos pos;
if (!FindUndoPos(state, pindex->nFile, pos, ::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) + 40))
return error("ConnectBlock() : FindUndoPos failed");
if (!blockundo.WriteToDisk(pos, pindex->pprev->GetBlockHash()))
return state.Abort(_("Failed to write undo data"));
// update nUndoPos in block index
pindex->nUndoPos = pos.nPos;
pindex->nStatus |= BLOCK_HAVE_UNDO;
}
pindex->nStatus = (pindex->nStatus & ~BLOCK_VALID_MASK) | BLOCK_VALID_SCRIPTS;
CDiskBlockIndex blockindex(pindex);
if (!pblocktree->WriteBlockIndex(blockindex))
return state.Abort(_("Failed to write block index"));
}
if (fTxIndex)
if (!pblocktree->WriteTxIndex(vPos))
return state.Abort(_("Failed to write transaction index"));
// add this block to the view's block chain
assert(view.SetBestBlock(pindex));
// Watch for transactions paying to me
for (unsigned int i=0; i<vtx.size(); i++)
SyncWithWallets(GetTxHash(i), vtx[i], this, true);
return true;
}
bool SetBestChain(CValidationState &state, CBlockIndex* pindexNew)
{
// All modifications to the coin state will be done in this cache.
// Only when all have succeeded, we push it to pcoinsTip.
CCoinsViewCache view(*pcoinsTip, true);
// Find the fork (typically, there is none)
CBlockIndex* pfork = view.GetBestBlock();
CBlockIndex* plonger = pindexNew;
while (pfork && pfork != plonger)
{
while (plonger->nHeight > pfork->nHeight) {
plonger = plonger->pprev;
assert(plonger != NULL);
}
if (pfork == plonger)
break;
pfork = pfork->pprev;
assert(pfork != NULL);
}
// List of what to disconnect (typically nothing)
vector<CBlockIndex*> vDisconnect;
for (CBlockIndex* pindex = view.GetBestBlock(); pindex != pfork; pindex = pindex->pprev)
vDisconnect.push_back(pindex);
// List of what to connect (typically only pindexNew)
vector<CBlockIndex*> vConnect;
for (CBlockIndex* pindex = pindexNew; pindex != pfork; pindex = pindex->pprev)
vConnect.push_back(pindex);
reverse(vConnect.begin(), vConnect.end());
if (vDisconnect.size() > 0) {
printf("REORGANIZE: Disconnect %"PRIszu" blocks; %s..\n", vDisconnect.size(), pfork->GetBlockHash().ToString().c_str());
printf("REORGANIZE: Connect %"PRIszu" blocks; ..%s\n", vConnect.size(), pindexNew->GetBlockHash().ToString().c_str());
}
// Disconnect shorter branch
vector<CTransaction> vResurrect;
BOOST_FOREACH(CBlockIndex* pindex, vDisconnect) {
CBlock block;
if (!block.ReadFromDisk(pindex))
return state.Abort(_("Failed to read block"));
int64 nStart = GetTimeMicros();
if (!block.DisconnectBlock(state, pindex, view))
return error("SetBestBlock() : DisconnectBlock %s failed", pindex->GetBlockHash().ToString().c_str());
if (fBenchmark)
printf("- Disconnect: %.2fms\n", (GetTimeMicros() - nStart) * 0.001);
// Queue memory transactions to resurrect.
// We only do this for blocks after the last checkpoint (reorganisation before that
// point should only happen with -reindex/-loadblock, or a misbehaving peer.
BOOST_FOREACH(const CTransaction& tx, block.vtx)
if (!tx.IsCoinBase() && pindex->nHeight > Checkpoints::GetTotalBlocksEstimate())
vResurrect.push_back(tx);
}
// Connect longer branch
vector<CTransaction> vDelete;
BOOST_FOREACH(CBlockIndex *pindex, vConnect) {
CBlock block;
if (!block.ReadFromDisk(pindex))
return state.Abort(_("Failed to read block"));
int64 nStart = GetTimeMicros();
if (!block.ConnectBlock(state, pindex, view)) {
if (state.IsInvalid()) {
InvalidChainFound(pindexNew);
InvalidBlockFound(pindex);
}
return error("SetBestBlock() : ConnectBlock %s failed", pindex->GetBlockHash().ToString().c_str());
}
if (fBenchmark)
printf("- Connect: %.2fms\n", (GetTimeMicros() - nStart) * 0.001);
// Queue memory transactions to delete
BOOST_FOREACH(const CTransaction& tx, block.vtx)
vDelete.push_back(tx);
}
// Flush changes to global coin state
int64 nStart = GetTimeMicros();
int nModified = view.GetCacheSize();
assert(view.Flush());
int64 nTime = GetTimeMicros() - nStart;
if (fBenchmark)
printf("- Flush %i transactions: %.2fms (%.4fms/tx)\n", nModified, 0.001 * nTime, 0.001 * nTime / nModified);
// Make sure it's successfully written to disk before changing memory structure
bool fIsInitialDownload = IsInitialBlockDownload();
if (!fIsInitialDownload || pcoinsTip->GetCacheSize() > nCoinCacheSize) {
// Typical CCoins structures on disk are around 100 bytes in size.
// Pushing a new one to the database can cause it to be written
// twice (once in the log, and once in the tables). This is already
// an overestimation, as most will delete an existing entry or
// overwrite one. Still, use a conservative safety factor of 2.
if (!CheckDiskSpace(100 * 2 * 2 * pcoinsTip->GetCacheSize()))
return state.Error();
FlushBlockFile();
pblocktree->Sync();
if (!pcoinsTip->Flush())
return state.Abort(_("Failed to write to coin database"));
}
// At this point, all changes have been done to the database.
// Proceed by updating the memory structures.
// Register new best chain
vBlockIndexByHeight.resize(pindexNew->nHeight + 1);
BOOST_FOREACH(CBlockIndex* pindex, vConnect)
vBlockIndexByHeight[pindex->nHeight] = pindex;
// Resurrect memory transactions that were in the disconnected branch
BOOST_FOREACH(CTransaction& tx, vResurrect) {
// ignore validation errors in resurrected transactions
CValidationState stateDummy;
tx.AcceptToMemoryPool(stateDummy, true, false);
}
// Delete redundant memory transactions that are in the connected branch
BOOST_FOREACH(CTransaction& tx, vDelete) {
mempool.remove(tx);
mempool.removeConflicts(tx);
}
// Update best block in wallet (so we can detect restored wallets)
if ((pindexNew->nHeight % 20160) == 0 || (!fIsInitialDownload && (pindexNew->nHeight % 144) == 0))
{
const CBlockLocator locator(pindexNew);
::SetBestChain(locator);
}
// New best block
hashBestChain = pindexNew->GetBlockHash();
pindexBest = pindexNew;
pblockindexFBBHLast = NULL;
nBestHeight = pindexBest->nHeight;
nBestChainWork = pindexNew->nChainWork;
nTimeBestReceived = GetTime();
nTransactionsUpdated++;
printf("SetBestChain: new best=%s height=%d log2_work=%.8g tx=%lu date=%s progress=%f\n",
hashBestChain.ToString().c_str(), nBestHeight, log(nBestChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx,
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexBest->GetBlockTime()).c_str(),
Checkpoints::GuessVerificationProgress(pindexBest));
// Check the version of the last 100 blocks to see if we need to upgrade:
if (!fIsInitialDownload)
{
int nUpgraded = 0;
const CBlockIndex* pindex = pindexBest;
for (int i = 0; i < 100 && pindex != NULL; i++)
{
if (pindex->nVersion > CBlock::CURRENT_VERSION)
++nUpgraded;
pindex = pindex->pprev;
}
if (nUpgraded > 0)
printf("SetBestChain: %d of last 100 blocks above version %d\n", nUpgraded, CBlock::CURRENT_VERSION);
if (nUpgraded > 100/2)
// strMiscWarning is read by GetWarnings(), called by Qt and the JSON-RPC code to warn the user:
strMiscWarning = _("Warning: This version is obsolete, upgrade required!");
}
std::string strCmd = GetArg("-blocknotify", "");
if (!fIsInitialDownload && !strCmd.empty())
{
boost::replace_all(strCmd, "%s", hashBestChain.GetHex());
boost::thread t(runCommand, strCmd); // thread runs free
}
return true;
}
bool CBlock::AddToBlockIndex(CValidationState &state, const CDiskBlockPos &pos)
{
// Check for duplicate
uint256 hash = GetHash();
if (mapBlockIndex.count(hash))
return state.Invalid(error("AddToBlockIndex() : %s already exists", hash.ToString().c_str()));
// Construct new block index object
CBlockIndex* pindexNew = new CBlockIndex(*this);
assert(pindexNew);
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
map<uint256, CBlockIndex*>::iterator miPrev = mapBlockIndex.find(hashPrevBlock);
if (miPrev != mapBlockIndex.end())
{
pindexNew->pprev = (*miPrev).second;
pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
}
pindexNew->nTx = vtx.size();
pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + pindexNew->GetBlockWork().getuint256();
pindexNew->nChainTx = (pindexNew->pprev ? pindexNew->pprev->nChainTx : 0) + pindexNew->nTx;
pindexNew->nFile = pos.nFile;
pindexNew->nDataPos = pos.nPos;
pindexNew->nUndoPos = 0;
pindexNew->nStatus = BLOCK_VALID_TRANSACTIONS | BLOCK_HAVE_DATA;
setBlockIndexValid.insert(pindexNew);
if (!pblocktree->WriteBlockIndex(CDiskBlockIndex(pindexNew)))
return state.Abort(_("Failed to write block index"));
// New best?
if (!ConnectBestBlock(state))
return false;
if (pindexNew == pindexBest)
{
// Notify UI to display prev block's coinbase if it was ours
static uint256 hashPrevBestCoinBase;
UpdatedTransaction(hashPrevBestCoinBase);
hashPrevBestCoinBase = GetTxHash(0);
}
if (!pblocktree->Flush())
return state.Abort(_("Failed to sync block index"));
uiInterface.NotifyBlocksChanged();
return true;
}
bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAddSize, unsigned int nHeight, uint64 nTime, bool fKnown = false)
{
bool fUpdatedLast = false;
LOCK(cs_LastBlockFile);
if (fKnown) {
if (nLastBlockFile != pos.nFile) {
nLastBlockFile = pos.nFile;
infoLastBlockFile.SetNull();
pblocktree->ReadBlockFileInfo(nLastBlockFile, infoLastBlockFile);
fUpdatedLast = true;
}
} else {
while (infoLastBlockFile.nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
printf("Leaving block file %i: %s\n", nLastBlockFile, infoLastBlockFile.ToString().c_str());
FlushBlockFile(true);
nLastBlockFile++;
infoLastBlockFile.SetNull();
pblocktree->ReadBlockFileInfo(nLastBlockFile, infoLastBlockFile); // check whether data for the new file somehow already exist; can fail just fine
fUpdatedLast = true;
}
pos.nFile = nLastBlockFile;
pos.nPos = infoLastBlockFile.nSize;
}
infoLastBlockFile.nSize += nAddSize;
infoLastBlockFile.AddBlock(nHeight, nTime);
if (!fKnown) {
unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
unsigned int nNewChunks = (infoLastBlockFile.nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
if (nNewChunks > nOldChunks) {
if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) {
FILE *file = OpenBlockFile(pos);
if (file) {
printf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile);
AllocateFileRange(file, pos.nPos, nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos);
fclose(file);
}
}
else
return state.Error();
}
}
if (!pblocktree->WriteBlockFileInfo(nLastBlockFile, infoLastBlockFile))
return state.Abort(_("Failed to write file info"));
if (fUpdatedLast)
pblocktree->WriteLastBlockFile(nLastBlockFile);
return true;
}
bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize)
{
pos.nFile = nFile;
LOCK(cs_LastBlockFile);
unsigned int nNewSize;
if (nFile == nLastBlockFile) {
pos.nPos = infoLastBlockFile.nUndoSize;
nNewSize = (infoLastBlockFile.nUndoSize += nAddSize);
if (!pblocktree->WriteBlockFileInfo(nLastBlockFile, infoLastBlockFile))
return state.Abort(_("Failed to write block info"));
} else {
CBlockFileInfo info;
if (!pblocktree->ReadBlockFileInfo(nFile, info))
return state.Abort(_("Failed to read block info"));
pos.nPos = info.nUndoSize;
nNewSize = (info.nUndoSize += nAddSize);
if (!pblocktree->WriteBlockFileInfo(nFile, info))
return state.Abort(_("Failed to write block info"));
}
unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
unsigned int nNewChunks = (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
if (nNewChunks > nOldChunks) {
if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) {
FILE *file = OpenUndoFile(pos);
if (file) {
printf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile);
AllocateFileRange(file, pos.nPos, nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos);
fclose(file);
}
}
else
return state.Error();
}
return true;
}
bool CBlock::CheckBlock(CValidationState &state, bool fCheckPOW, bool fCheckMerkleRoot) const
{
// These are checks that are independent of context
// that can be verified before saving an orphan block.
// Size limits
if (vtx.empty() || vtx.size() > MAX_BLOCK_SIZE || ::GetSerializeSize(*this, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE)
return state.DoS(100, error("CheckBlock() : size limits failed"));
// Check proof of work matches claimed amount
//if (fCheckPOW && !CheckProofOfWork(GetHash(), nBits)) // -Scrypt
if (fCheckPOW && !CheckProofOfWork(GetPoWHash(), nBits)) // +Scrypt
return state.DoS(50, error("CheckBlock() : proof of work failed"));
// Check timestamp
if (GetBlockTime() > GetAdjustedTime() + 2 * 60 * 60)
return state.Invalid(error("CheckBlock() : block timestamp too far in the future"));
// First transaction must be coinbase, the rest must not be
if (vtx.empty() || !vtx[0].IsCoinBase())
return state.DoS(100, error("CheckBlock() : first tx is not coinbase"));
for (unsigned int i = 1; i < vtx.size(); i++)
if (vtx[i].IsCoinBase())
return state.DoS(100, error("CheckBlock() : more than one coinbase"));
// Check transactions
BOOST_FOREACH(const CTransaction& tx, vtx)
if (!tx.CheckTransaction(state))
return error("CheckBlock() : CheckTransaction failed");
// Build the merkle tree already. We need it anyway later, and it makes the
// block cache the transaction hashes, which means they don't need to be
// recalculated many times during this block's validation.
BuildMerkleTree();
// Check for duplicate txids. This is caught by ConnectInputs(),
// but catching it earlier avoids a potential DoS attack:
set<uint256> uniqueTx;
for (unsigned int i=0; i<vtx.size(); i++) {
uniqueTx.insert(GetTxHash(i));
}
if (uniqueTx.size() != vtx.size())
return state.DoS(100, error("CheckBlock() : duplicate transaction"));
unsigned int nSigOps = 0;
BOOST_FOREACH(const CTransaction& tx, vtx)
{
nSigOps += tx.GetLegacySigOpCount();
}
if (nSigOps > MAX_BLOCK_SIGOPS)
return state.DoS(100, error("CheckBlock() : out-of-bounds SigOpCount"));
// Check merkle root
if (fCheckMerkleRoot && hashMerkleRoot != BuildMerkleTree())
return state.DoS(100, error("CheckBlock() : hashMerkleRoot mismatch"));
return true;
}
bool CBlock::AcceptBlock(CValidationState &state, CDiskBlockPos *dbp)
{
// Check for duplicate
uint256 hash = GetHash();
if (mapBlockIndex.count(hash))
return state.Invalid(error("AcceptBlock() : block already in mapBlockIndex"));
// Get prev block index
CBlockIndex* pindexPrev = NULL;
int nHeight = 0;
if (hash != hashGenesisBlock) {
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashPrevBlock);
if (mi == mapBlockIndex.end())
return state.DoS(10, error("AcceptBlock() : prev block not found"));
pindexPrev = (*mi).second;
nHeight = pindexPrev->nHeight+1;
// Check proof of work
if (nBits != GetNextWorkRequired(pindexPrev, this))
return state.DoS(100, error("AcceptBlock() : incorrect proof of work"));
// Check timestamp against prev
if (GetBlockTime() <= pindexPrev->GetMedianTimePast())
return state.Invalid(error("AcceptBlock() : block's timestamp is too early"));
// Check that all transactions are finalized
BOOST_FOREACH(const CTransaction& tx, vtx)
if (!tx.IsFinal(nHeight, GetBlockTime()))
return state.DoS(10, error("AcceptBlock() : contains a non-final transaction"));
// Check that the block chain matches the known block chain up to a checkpoint
if (!Checkpoints::CheckBlock(nHeight, hash))
return state.DoS(100, error("AcceptBlock() : rejected by checkpoint lock-in at %d", nHeight));
// Reject block.nVersion=1 blocks when 95% (75% on testnet) of the network has upgraded:
if (nVersion < 2)
{
if ((!fTestNet && CBlockIndex::IsSuperMajority(2, pindexPrev, 950, 1000)) ||
(fTestNet && CBlockIndex::IsSuperMajority(2, pindexPrev, 75, 100)))
{
return state.Invalid(error("AcceptBlock() : rejected nVersion=1 block"));
}
}
// Enforce block.nVersion=2 rule that the coinbase starts with serialized block height
if (nVersion >= 2)
{
// if 750 of the last 1,000 blocks are version 2 or greater (51/100 if testnet):
if ((!fTestNet && CBlockIndex::IsSuperMajority(2, pindexPrev, 750, 1000)) ||
(fTestNet && CBlockIndex::IsSuperMajority(2, pindexPrev, 51, 100)))
{
CScript expect = CScript() << nHeight;
if (!std::equal(expect.begin(), expect.end(), vtx[0].vin[0].scriptSig.begin()))
return state.DoS(100, error("AcceptBlock() : block height mismatch in coinbase"));
}
}
}
// Write block to history file
try {
unsigned int nBlockSize = ::GetSerializeSize(*this, SER_DISK, CLIENT_VERSION);
CDiskBlockPos blockPos;
if (dbp != NULL)
blockPos = *dbp;
if (!FindBlockPos(state, blockPos, nBlockSize+8, nHeight, nTime, dbp != NULL))
return error("AcceptBlock() : FindBlockPos failed");
if (dbp == NULL)
if (!WriteToDisk(blockPos))
return state.Abort(_("Failed to write block"));
if (!AddToBlockIndex(state, blockPos))
return error("AcceptBlock() : AddToBlockIndex failed");
} catch(std::runtime_error &e) {
return state.Abort(_("System error: ") + e.what());
}
// Relay inventory, but don't relay old inventory during initial block download
int nBlockEstimate = Checkpoints::GetTotalBlocksEstimate();
if (hashBestChain == hash)
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
if (nBestHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate))
pnode->PushInventory(CInv(MSG_BLOCK, hash));
}
return true;
}
bool CBlockIndex::IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned int nRequired, unsigned int nToCheck)
{
unsigned int nFound = 0;
for (unsigned int i = 0; i < nToCheck && nFound < nRequired && pstart != NULL; i++)
{
if (pstart->nVersion >= minVersion)
++nFound;
pstart = pstart->pprev;
}
return (nFound >= nRequired);
}
bool ProcessBlock(CValidationState &state, CNode* pfrom, CBlock* pblock, CDiskBlockPos *dbp)
{
// Check for duplicate
uint256 hash = pblock->GetHash();
if (mapBlockIndex.count(hash))
return state.Invalid(error("ProcessBlock() : already have block %d %s", mapBlockIndex[hash]->nHeight, hash.ToString().c_str()));
if (mapOrphanBlocks.count(hash))
return state.Invalid(error("ProcessBlock() : already have block (orphan) %s", hash.ToString().c_str()));
// Preliminary checks
if (!pblock->CheckBlock(state))
return error("ProcessBlock() : CheckBlock FAILED");
CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(mapBlockIndex);
if (pcheckpoint && pblock->hashPrevBlock != hashBestChain)
{
// Extra checks to prevent "fill up memory by spamming with bogus blocks"
int64 deltaTime = pblock->GetBlockTime() - pcheckpoint->nTime;
if (deltaTime < 0)
{
return state.DoS(100, error("ProcessBlock() : block with timestamp before last checkpoint"));
}
#if 0
// Now that we are using a FIR filter (see above) this is no longer
// a straightforward calculation.
CBigNum bnNewBlock;
bnNewBlock.SetCompact(pblock->nBits);
CBigNum bnRequired;
bnRequired.SetCompact(ComputeMinWork(pcheckpoint->nBits, deltaTime));
if (bnNewBlock > bnRequired)
{
return state.DoS(100, error("ProcessBlock() : block with too little proof-of-work"));
}
#endif
}
// If we don't already have its previous block, shunt it off to holding area until we get it
if (pblock->hashPrevBlock != 0 && !mapBlockIndex.count(pblock->hashPrevBlock))
{
printf("ProcessBlock: ORPHAN BLOCK, prev=%s\n", pblock->hashPrevBlock.ToString().c_str());
// Accept orphans as long as there is a node to request its parents from
if (pfrom) {
CBlock* pblock2 = new CBlock(*pblock);
mapOrphanBlocks.insert(make_pair(hash, pblock2));
mapOrphanBlocksByPrev.insert(make_pair(pblock2->hashPrevBlock, pblock2));
// Ask this guy to fill in what we're missing
pfrom->PushGetBlocks(pindexBest, GetOrphanRoot(pblock2));
}
return true;
}
// Store to disk
if (!pblock->AcceptBlock(state, dbp))
return error("ProcessBlock() : AcceptBlock FAILED");
// Recursively process any orphan blocks that depended on this one
vector<uint256> vWorkQueue;
vWorkQueue.push_back(hash);
for (unsigned int i = 0; i < vWorkQueue.size(); i++)
{
uint256 hashPrev = vWorkQueue[i];
for (multimap<uint256, CBlock*>::iterator mi = mapOrphanBlocksByPrev.lower_bound(hashPrev);
mi != mapOrphanBlocksByPrev.upper_bound(hashPrev);
++mi)
{
CBlock* pblockOrphan = (*mi).second;
// Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan resolution (that is, feeding people an invalid block based on LegitBlockX in order to get anyone relaying LegitBlockX banned)
CValidationState stateDummy;
if (pblockOrphan->AcceptBlock(stateDummy))
vWorkQueue.push_back(pblockOrphan->GetHash());
mapOrphanBlocks.erase(pblockOrphan->GetHash());
delete pblockOrphan;
}
mapOrphanBlocksByPrev.erase(hashPrev);
}
printf("ProcessBlock: ACCEPTED\n");
return true;
}
CMerkleBlock::CMerkleBlock(const CBlock& block, CBloomFilter& filter)
{
header = block.GetBlockHeader();
vector<bool> vMatch;
vector<uint256> vHashes;
vMatch.reserve(block.vtx.size());
vHashes.reserve(block.vtx.size());
for (unsigned int i = 0; i < block.vtx.size(); i++)
{
uint256 hash = block.vtx[i].GetHash();
if (filter.IsRelevantAndUpdate(block.vtx[i], hash))
{
vMatch.push_back(true);
vMatchedTxn.push_back(make_pair(i, hash));
}
else
vMatch.push_back(false);
vHashes.push_back(hash);
}
txn = CPartialMerkleTree(vHashes, vMatch);
}
uint256 CPartialMerkleTree::CalcHash(int height, unsigned int pos, const std::vector<uint256> &vTxid) {
if (height == 0) {
// hash at height 0 is the txids themself
return vTxid[pos];
} else {
// calculate left hash
uint256 left = CalcHash(height-1, pos*2, vTxid), right;
// calculate right hash if not beyong the end of the array - copy left hash otherwise1
if (pos*2+1 < CalcTreeWidth(height-1))
right = CalcHash(height-1, pos*2+1, vTxid);
else
right = left;
// combine subhashes
return Hash(BEGIN(left), END(left), BEGIN(right), END(right));
}
}
void CPartialMerkleTree::TraverseAndBuild(int height, unsigned int pos, const std::vector<uint256> &vTxid, const std::vector<bool> &vMatch) {
// determine whether this node is the parent of at least one matched txid
bool fParentOfMatch = false;
for (unsigned int p = pos << height; p < (pos+1) << height && p < nTransactions; p++)
fParentOfMatch |= vMatch[p];
// store as flag bit
vBits.push_back(fParentOfMatch);
if (height==0 || !fParentOfMatch) {
// if at height 0, or nothing interesting below, store hash and stop
vHash.push_back(CalcHash(height, pos, vTxid));
} else {
// otherwise, don't store any hash, but descend into the subtrees
TraverseAndBuild(height-1, pos*2, vTxid, vMatch);
if (pos*2+1 < CalcTreeWidth(height-1))
TraverseAndBuild(height-1, pos*2+1, vTxid, vMatch);
}
}
uint256 CPartialMerkleTree::TraverseAndExtract(int height, unsigned int pos, unsigned int &nBitsUsed, unsigned int &nHashUsed, std::vector<uint256> &vMatch) {
if (nBitsUsed >= vBits.size()) {
// overflowed the bits array - failure
fBad = true;
return 0;
}
bool fParentOfMatch = vBits[nBitsUsed++];
if (height==0 || !fParentOfMatch) {
// if at height 0, or nothing interesting below, use stored hash and do not descend
if (nHashUsed >= vHash.size()) {
// overflowed the hash array - failure
fBad = true;
return 0;
}
const uint256 &hash = vHash[nHashUsed++];
if (height==0 && fParentOfMatch) // in case of height 0, we have a matched txid
vMatch.push_back(hash);
return hash;
} else {
// otherwise, descend into the subtrees to extract matched txids and hashes
uint256 left = TraverseAndExtract(height-1, pos*2, nBitsUsed, nHashUsed, vMatch), right;
if (pos*2+1 < CalcTreeWidth(height-1))
right = TraverseAndExtract(height-1, pos*2+1, nBitsUsed, nHashUsed, vMatch);
else
right = left;
// and combine them before returning
return Hash(BEGIN(left), END(left), BEGIN(right), END(right));
}
}
CPartialMerkleTree::CPartialMerkleTree(const std::vector<uint256> &vTxid, const std::vector<bool> &vMatch) : nTransactions(vTxid.size()), fBad(false) {
// reset state
vBits.clear();
vHash.clear();
// calculate height of tree
int nHeight = 0;
while (CalcTreeWidth(nHeight) > 1)
nHeight++;
// traverse the partial tree
TraverseAndBuild(nHeight, 0, vTxid, vMatch);
}
CPartialMerkleTree::CPartialMerkleTree() : nTransactions(0), fBad(true) {}
uint256 CPartialMerkleTree::ExtractMatches(std::vector<uint256> &vMatch) {
vMatch.clear();
// An empty set will not work
if (nTransactions == 0)
return 0;
// check for excessively high numbers of transactions
if (nTransactions > MAX_BLOCK_SIZE / 60) // 60 is the lower bound for the size of a serialized CTransaction
return 0;
// there can never be more hashes provided than one for every txid
if (vHash.size() > nTransactions)
return 0;
// there must be at least one bit per node in the partial tree, and at least one node per hash
if (vBits.size() < vHash.size())
return 0;
// calculate height of tree
int nHeight = 0;
while (CalcTreeWidth(nHeight) > 1)
nHeight++;
// traverse the partial tree
unsigned int nBitsUsed = 0, nHashUsed = 0;
uint256 hashMerkleRoot = TraverseAndExtract(nHeight, 0, nBitsUsed, nHashUsed, vMatch);
// verify that no problems occured during the tree traversal
if (fBad)
return 0;
// verify that all bits were consumed (except for the padding caused by serializing it as a byte sequence)
if ((nBitsUsed+7)/8 != (vBits.size()+7)/8)
return 0;
// verify that all hashes were consumed
if (nHashUsed != vHash.size())
return 0;
return hashMerkleRoot;
}
bool AbortNode(const std::string &strMessage) {
strMiscWarning = strMessage;
printf("*** %s\n", strMessage.c_str());
uiInterface.ThreadSafeMessageBox(strMessage, "", CClientUIInterface::MSG_ERROR);
StartShutdown();
return false;
}
bool CheckDiskSpace(uint64 nAdditionalBytes)
{
uint64 nFreeBytesAvailable = filesystem::space(GetDataDir()).available;
// Check for nMinDiskSpace bytes (currently 50MB)
if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes)
return AbortNode(_("Error: Disk space is low!"));
return true;
}
CCriticalSection cs_LastBlockFile;
CBlockFileInfo infoLastBlockFile;
int nLastBlockFile = 0;
FILE* OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fReadOnly)
{
if (pos.IsNull())
return NULL;
boost::filesystem::path path = GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix, pos.nFile);
boost::filesystem::create_directories(path.parent_path());
FILE* file = fopen(path.string().c_str(), "rb+");
if (!file && !fReadOnly)
file = fopen(path.string().c_str(), "wb+");
if (!file) {
printf("Unable to open file %s\n", path.string().c_str());
return NULL;
}
if (pos.nPos) {
if (fseek(file, pos.nPos, SEEK_SET)) {
printf("Unable to seek to position %u of %s\n", pos.nPos, path.string().c_str());
fclose(file);
return NULL;
}
}
return file;
}
FILE* OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly) {
return OpenDiskFile(pos, "blk", fReadOnly);
}
FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) {
return OpenDiskFile(pos, "rev", fReadOnly);
}
CBlockIndex * InsertBlockIndex(uint256 hash)
{
if (hash == 0)
return NULL;
// Return existing
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hash);
if (mi != mapBlockIndex.end())
return (*mi).second;
// Create new
CBlockIndex* pindexNew = new CBlockIndex();
if (!pindexNew)
throw runtime_error("LoadBlockIndex() : new CBlockIndex failed");
mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
return pindexNew;
}
bool static LoadBlockIndexDB()
{
if (!pblocktree->LoadBlockIndexGuts())
return false;
boost::this_thread::interruption_point();
// Calculate nChainWork
vector<pair<int, CBlockIndex*> > vSortedByHeight;
vSortedByHeight.reserve(mapBlockIndex.size());
BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex)
{
CBlockIndex* pindex = item.second;
vSortedByHeight.push_back(make_pair(pindex->nHeight, pindex));
}
sort(vSortedByHeight.begin(), vSortedByHeight.end());
BOOST_FOREACH(const PAIRTYPE(int, CBlockIndex*)& item, vSortedByHeight)
{
CBlockIndex* pindex = item.second;
pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + pindex->GetBlockWork().getuint256();
pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS && !(pindex->nStatus & BLOCK_FAILED_MASK))
setBlockIndexValid.insert(pindex);
}
// Load block file info
pblocktree->ReadLastBlockFile(nLastBlockFile);
printf("LoadBlockIndexDB(): last block file = %i\n", nLastBlockFile);
if (pblocktree->ReadBlockFileInfo(nLastBlockFile, infoLastBlockFile))
printf("LoadBlockIndexDB(): last block file info: %s\n", infoLastBlockFile.ToString().c_str());
// Load nBestInvalidWork, OK if it doesn't exist
CBigNum bnBestInvalidWork;
pblocktree->ReadBestInvalidWork(bnBestInvalidWork);
nBestInvalidWork = bnBestInvalidWork.getuint256();
// Check whether we need to continue reindexing
bool fReindexing = false;
pblocktree->ReadReindexing(fReindexing);
fReindex |= fReindexing;
// Check whether we have a transaction index
pblocktree->ReadFlag("txindex", fTxIndex);
printf("LoadBlockIndexDB(): transaction index %s\n", fTxIndex ? "enabled" : "disabled");
// Load hashBestChain pointer to end of best chain
pindexBest = pcoinsTip->GetBestBlock();
if (pindexBest == NULL)
return true;
hashBestChain = pindexBest->GetBlockHash();
nBestHeight = pindexBest->nHeight;
nBestChainWork = pindexBest->nChainWork;
// register best chain
CBlockIndex *pindex = pindexBest;
vBlockIndexByHeight.resize(pindexBest->nHeight + 1);
while(pindex != NULL) {
vBlockIndexByHeight[pindex->nHeight] = pindex;
pindex = pindex->pprev;
}
printf("LoadBlockIndexDB(): hashBestChain=%s height=%d date=%s\n",
hashBestChain.ToString().c_str(), nBestHeight,
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexBest->GetBlockTime()).c_str());
return true;
}
bool VerifyDB() {
if (pindexBest == NULL || pindexBest->pprev == NULL)
return true;
// Verify blocks in the best chain
int nCheckLevel = GetArg("-checklevel", 3);
int nCheckDepth = GetArg( "-checkblocks", 288);
if (nCheckDepth == 0)
nCheckDepth = 1000000000; // suffices until the year 19000
if (nCheckDepth > nBestHeight)
nCheckDepth = nBestHeight;
nCheckLevel = std::max(0, std::min(4, nCheckLevel));
printf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
CCoinsViewCache coins(*pcoinsTip, true);
CBlockIndex* pindexState = pindexBest;
CBlockIndex* pindexFailure = NULL;
int nGoodTransactions = 0;
CValidationState state;
for (CBlockIndex* pindex = pindexBest; pindex && pindex->pprev; pindex = pindex->pprev)
{
boost::this_thread::interruption_point();
if (pindex->nHeight < nBestHeight-nCheckDepth)
break;
CBlock block;
// check level 0: read from disk
if (!block.ReadFromDisk(pindex))
return error("VerifyDB() : *** block.ReadFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString().c_str());
// check level 1: verify block validity
if (nCheckLevel >= 1 && !block.CheckBlock(state))
return error("VerifyDB() : *** found bad block at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString().c_str());
// check level 2: verify undo validity
if (nCheckLevel >= 2 && pindex) {
CBlockUndo undo;
CDiskBlockPos pos = pindex->GetUndoPos();
if (!pos.IsNull()) {
if (!undo.ReadFromDisk(pos, pindex->pprev->GetBlockHash()))
return error("VerifyDB() : *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString().c_str());
}
}
// check level 3: check for inconsistencies during memory-only disconnect of tip blocks
if (nCheckLevel >= 3 && pindex == pindexState && (coins.GetCacheSize() + pcoinsTip->GetCacheSize()) <= 2*nCoinCacheSize + 32000) {
bool fClean = true;
if (!block.DisconnectBlock(state, pindex, coins, &fClean))
return error("VerifyDB() : *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString().c_str());
pindexState = pindex->pprev;
if (!fClean) {
nGoodTransactions = 0;
pindexFailure = pindex;
} else
nGoodTransactions += block.vtx.size();
}
}
if (pindexFailure)
return error("VerifyDB() : *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", pindexBest->nHeight - pindexFailure->nHeight + 1, nGoodTransactions);
// check level 4: try reconnecting blocks
if (nCheckLevel >= 4) {
CBlockIndex *pindex = pindexState;
while (pindex != pindexBest) {
boost::this_thread::interruption_point();
pindex = pindex->GetNextInMainChain();
CBlock block;
if (!block.ReadFromDisk(pindex))
return error("VerifyDB() : *** block.ReadFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString().c_str());
if (!block.ConnectBlock(state, pindex, coins))
return error("VerifyDB() : *** found unconnectable block at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString().c_str());
}
}
printf("No coin database inconsistencies in last %i blocks (%i transactions)\n", pindexBest->nHeight - pindexState->nHeight, nGoodTransactions);
return true;
}
void UnloadBlockIndex()
{
mapBlockIndex.clear();
setBlockIndexValid.clear();
pindexGenesisBlock = NULL;
nBestHeight = 0;
nBestChainWork = 0;
nBestInvalidWork = 0;
hashBestChain = 0;
pindexBest = NULL;
}
bool LoadBlockIndex()
{
if (fTestNet)
{
pchMessageStart[0] = 0xfd;
pchMessageStart[1] = 0xf0;
pchMessageStart[2] = 0xf4;
pchMessageStart[3] = 0xfe;
hashGenesisBlock = uint256("0xf9e3f18eaa2404b23c57ed9961d7e777e95a1f2c88dfd373e5ddadc8ed343cc1");
}
//
// Load block index from databases
//
if (!fReindex && !LoadBlockIndexDB())
return false;
return true;
}
bool InitBlockIndex() {
// Check whether we're already initialized
if (pindexGenesisBlock != NULL)
return true;
// Use the provided setting for -txindex in the new database
fTxIndex = GetBoolArg("-txindex", false);
pblocktree->WriteFlag("txindex", fTxIndex);
printf("Initializing databases...\n");
// Only add the genesis block if not reindexing (in which case we reuse the one already on disk)
if (!fReindex) {
// Genesis Block (Main):
// CBlock(hash=7520788e2d99eec, PoW=00000f308aff6ee4125a, ver=1, hashPrevBlock=00000000000000000000, hashMerkleRoot=6065d08d75, nTime=1369197853, nBits=1e0ffff0, nNonce=2084576387, vtx=1)
// CTransaction(hash=6065d08d75, ver=1, vin.size=1, vout.size=1, nLockTime=0)
// CTxIn(COutPoint(0000000000, -1), coinbase 04ffff001d01044c4c426f73746f6e20486572616c64202d2032312f4d61792f32303133202d20495253204f6666696369616c20746f2054616b6520466966746820746f204)
// CTxOut(nValue=500.00000000, scriptPubKey=0411582e4e718df82c9d75a4886ab7)
// vMerkleTree: 6065d08d75
// Genesis Block (Test):
// CBlock(hash=f9e3f18eaa2404b, PoW=00000b75a3fece2f380a, ver=1, hashPrevBlock=00000000000000000000, hashMerkleRoot=6065d08d75, nTime=1369198853, nBits=1e0ffff0, nNonce=386245382, vtx=1)
// CTransaction(hash=6065d08d75, ver=1, vin.size=1, vout.size=1, nLockTime=0)
// CTxIn(COutPoint(0000000000, -1), coinbase 04ffff001d01044c4c426f73746f6e20486572616c64202d2032312f4d61792f32303133202d20495253204f6666696369616c20746f2054616b6520466966746820746f204)
// CTxOut(nValue=500.00000000, scriptPubKey=0411582e4e718df82c9d75a4886ab7)
// vMerkleTree: 6065d08d75
// Genesis block
const char* pszTimestamp = "Boston Herald - 21/May/2013 - IRS Official to Take Fifth to Avoid Testifying";
CTransaction txNew;
txNew.vin.resize(1);
txNew.vout.resize(1);
txNew.vin[0].scriptSig = CScript() << 486604799 << CBigNum(4) << vector<unsigned char>((const unsigned char*)pszTimestamp, (const unsigned char*)pszTimestamp + strlen(pszTimestamp));
txNew.vout[0].nValue = 500 * COIN;
txNew.vout[0].scriptPubKey = CScript() << ParseHex("0411582e4e718df82c9d75a4886ab7602f0a1b866d81a4e44acba04e847ccd0514b97bee4a61fa73b1474d12851422b01565f244f722f318f1608258b74a3f3fe6") << OP_CHECKSIG;
CBlock block;
block.vtx.push_back(txNew);
block.hashPrevBlock = 0;
block.hashMerkleRoot = block.BuildMerkleTree();
block.nVersion = 1;
block.nBits = 0x1e0ffff0;
block.nTime = 1369197853;
block.nNonce = 2084576387;
if (fTestNet)
{
block.nTime = 1369198853;
block.nNonce = 386245382;
}
//// debug print
uint256 hash = block.GetHash();
printf("%s\n", hash.ToString().c_str());
printf("%s\n", hashGenesisBlock.ToString().c_str());
printf("%s\n", block.hashMerkleRoot.ToString().c_str());
assert(block.hashMerkleRoot == uint256("0x6065d08d755e00a90449abe8a0923d0622feb6f7ab3f435c337369334119e636"));
// +Scrypt ----------------------------v
// If genesis block hash does not match, then generate new genesis hash.
if (false && block.GetHash() != hashGenesisBlock)
{
printf("Searching for genesis block...\n");
// This will figure out a valid hash and Nonce if you're
// creating a different genesis block:
uint256 hashTarget = CBigNum().SetCompact(block.nBits).getuint256();
uint256 thash;
char scratchpad[SCRYPT_SCRATCHPAD_SIZE];
loop
{
scrypt_1024_1_1_256_sp(BEGIN(block.nVersion), BEGIN(thash), scratchpad);
if (thash <= hashTarget)
break;
if ((block.nNonce & 0xFFF) == 0)
{
printf("nonce %08X: hash = %s (target = %s)\n", block.nNonce, thash.ToString().c_str(), hashTarget.ToString().c_str());
}
++block.nNonce;
if (block.nNonce == 0)
{
printf("NONCE WRAPPED, incrementing time\n");
++block.nTime;
}
}
printf("block.nTime = %u \n", block.nTime);
printf("block.nNonce = %u \n", block.nNonce);
printf("block.GetHash = %s\n", block.GetHash().ToString().c_str());
}
// +Scrypt ----------------------------^
block.print();
assert(hash == hashGenesisBlock);
// Start new block file
try {
unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
CDiskBlockPos blockPos;
CValidationState state;
if (!FindBlockPos(state, blockPos, nBlockSize+8, 0, block.nTime))
return error("LoadBlockIndex() : FindBlockPos failed");
if (!block.WriteToDisk(blockPos))
return error("LoadBlockIndex() : writing genesis block to disk failed");
if (!block.AddToBlockIndex(state, blockPos))
return error("LoadBlockIndex() : genesis block not accepted");
} catch(std::runtime_error &e) {
return error("LoadBlockIndex() : failed to initialize block database: %s", e.what());
}
}
return true;
}
void PrintBlockTree()
{
// pre-compute tree structure
map<CBlockIndex*, vector<CBlockIndex*> > mapNext;
for (map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.begin(); mi != mapBlockIndex.end(); ++mi)
{
CBlockIndex* pindex = (*mi).second;
mapNext[pindex->pprev].push_back(pindex);
// test
//while (rand() % 3 == 0)
// mapNext[pindex->pprev].push_back(pindex);
}
vector<pair<int, CBlockIndex*> > vStack;
vStack.push_back(make_pair(0, pindexGenesisBlock));
int nPrevCol = 0;
while (!vStack.empty())
{
int nCol = vStack.back().first;
CBlockIndex* pindex = vStack.back().second;
vStack.pop_back();
// print split or gap
if (nCol > nPrevCol)
{
for (int i = 0; i < nCol-1; i++)
printf("| ");
printf("|\\\n");
}
else if (nCol < nPrevCol)
{
for (int i = 0; i < nCol; i++)
printf("| ");
printf("|\n");
}
nPrevCol = nCol;
// print columns
for (int i = 0; i < nCol; i++)
printf("| ");
// print item
CBlock block;
block.ReadFromDisk(pindex);
printf("%d (blk%05u.dat:0x%x) %s tx %"PRIszu"",
pindex->nHeight,
pindex->GetBlockPos().nFile, pindex->GetBlockPos().nPos,
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", block.GetBlockTime()).c_str(),
block.vtx.size());
PrintWallets(block);
// put the main time-chain first
vector<CBlockIndex*>& vNext = mapNext[pindex];
for (unsigned int i = 0; i < vNext.size(); i++)
{
if (vNext[i]->GetNextInMainChain())
{
swap(vNext[0], vNext[i]);
break;
}
}
// iterate children
for (unsigned int i = 0; i < vNext.size(); i++)
vStack.push_back(make_pair(nCol+i, vNext[i]));
}
}
bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp)
{
int64 nStart = GetTimeMillis();
int nLoaded = 0;
try {
CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SIZE, MAX_BLOCK_SIZE+8, SER_DISK, CLIENT_VERSION);
uint64 nStartByte = 0;
if (dbp) {
// (try to) skip already indexed part
CBlockFileInfo info;
if (pblocktree->ReadBlockFileInfo(dbp->nFile, info)) {
nStartByte = info.nSize;
blkdat.Seek(info.nSize);
}
}
uint64 nRewind = blkdat.GetPos();
while (blkdat.good() && !blkdat.eof()) {
boost::this_thread::interruption_point();
blkdat.SetPos(nRewind);
nRewind++; // start one byte further next time, in case of failure
blkdat.SetLimit(); // remove former limit
unsigned int nSize = 0;
try {
// locate a header
unsigned char buf[4];
blkdat.FindByte(pchMessageStart[0]);
nRewind = blkdat.GetPos()+1;
blkdat >> FLATDATA(buf);
if (memcmp(buf, pchMessageStart, 4))
continue;
// read size
blkdat >> nSize;
if (nSize < 80 || nSize > MAX_BLOCK_SIZE)
continue;
} catch (std::exception &e) {
// no valid block header found; don't complain
break;
}
try {
// read block
uint64 nBlockPos = blkdat.GetPos();
blkdat.SetLimit(nBlockPos + nSize);
CBlock block;
blkdat >> block;
nRewind = blkdat.GetPos();
// process block
if (nBlockPos >= nStartByte) {
LOCK(cs_main);
if (dbp)
dbp->nPos = nBlockPos;
CValidationState state;
if (ProcessBlock(state, NULL, &block, dbp))
nLoaded++;
if (state.IsError())
break;
}
} catch (std::exception &e) {
printf("%s() : Deserialize or I/O error caught during load\n", __PRETTY_FUNCTION__);
}
}
fclose(fileIn);
} catch(std::runtime_error &e) {
AbortNode(_("Error: system error: ") + e.what());
}
if (nLoaded > 0)
printf("Loaded %i blocks from external file in %"PRI64d"ms\n", nLoaded, GetTimeMillis() - nStart);
return nLoaded > 0;
}
//////////////////////////////////////////////////////////////////////////////
//
// CAlert
//
extern map<uint256, CAlert> mapAlerts;
extern CCriticalSection cs_mapAlerts;
string GetWarnings(string strFor)
{
int nPriority = 0;
string strStatusBar;
string strRPC;
if (GetBoolArg("-testsafemode", false))
strRPC = "test";
if (!CLIENT_VERSION_IS_RELEASE)
strStatusBar = _("This is a pre-release test build - use at your own risk - do not use for mining or merchant applications");
// Misc warnings like out of disk space and clock is wrong
if (strMiscWarning != "")
{
nPriority = 1000;
strStatusBar = strMiscWarning;
}
// Longer invalid proof-of-work chain
if (pindexBest && nBestInvalidWork > nBestChainWork + (pindexBest->GetBlockWork() * 6).getuint256())
{
nPriority = 2000;
strStatusBar = strRPC = _("Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.");
}
// Alerts
{
LOCK(cs_mapAlerts);
BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
{
const CAlert& alert = item.second;
if (alert.AppliesToMe() && alert.nPriority > nPriority)
{
nPriority = alert.nPriority;
strStatusBar = alert.strStatusBar;
}
}
}
if (strFor == "statusbar")
return strStatusBar;
else if (strFor == "rpc")
return strRPC;
assert(!"GetWarnings() : invalid parameter");
return "error";
}
//////////////////////////////////////////////////////////////////////////////
//
// Messages
//
bool static AlreadyHave(const CInv& inv)
{
switch (inv.type)
{
case MSG_TX:
{
bool txInMap = false;
{
LOCK(mempool.cs);
txInMap = mempool.exists(inv.hash);
}
return txInMap || mapOrphanTransactions.count(inv.hash) ||
pcoinsTip->HaveCoins(inv.hash);
}
case MSG_BLOCK:
return mapBlockIndex.count(inv.hash) ||
mapOrphanBlocks.count(inv.hash);
}
// Don't know what it is, just say we already got one
return true;
}
// The message start string is designed to be unlikely to occur in normal data.
// The characters are rarely used upper ASCII, not valid as UTF-8, and produce
// a large 4-byte int at any alignment.
unsigned char pchMessageStart[4] = { 0xed, 0xe0, 0xe4, 0xee };
void static ProcessGetData(CNode* pfrom)
{
std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
vector<CInv> vNotFound;
while (it != pfrom->vRecvGetData.end()) {
// Don't bother if send buffer is too full to respond anyway
if (pfrom->nSendSize >= SendBufferSize())
break;
const CInv &inv = *it;
{
boost::this_thread::interruption_point();
it++;
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK)
{
// Send block from disk
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(inv.hash);
if (mi != mapBlockIndex.end())
{
CBlock block;
block.ReadFromDisk((*mi).second);
if (inv.type == MSG_BLOCK)
pfrom->PushMessage("block", block);
else // MSG_FILTERED_BLOCK)
{
LOCK(pfrom->cs_filter);
if (pfrom->pfilter)
{
CMerkleBlock merkleBlock(block, *pfrom->pfilter);
pfrom->PushMessage("merkleblock", merkleBlock);
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
// This avoids hurting performance by pointlessly requiring a round-trip
// Note that there is currently no way for a node to request any single transactions we didnt send here -
// they must either disconnect and retry or request the full block.
// Thus, the protocol spec specified allows for us to provide duplicate txn here,
// however we MUST always provide at least what the remote peer needs
typedef std::pair<unsigned int, uint256> PairType;
BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn)
if (!pfrom->setInventoryKnown.count(CInv(MSG_TX, pair.second)))
pfrom->PushMessage("tx", block.vtx[pair.first]);
}
// else
// no response
}
// Trigger them to send a getblocks request for the next batch of inventory
if (inv.hash == pfrom->hashContinue)
{
// Bypass PushInventory, this must send even if redundant,
// and we want it right after the last block so they don't
// wait for other stuff first.
vector<CInv> vInv;
vInv.push_back(CInv(MSG_BLOCK, hashBestChain));
pfrom->PushMessage("inv", vInv);
pfrom->hashContinue = 0;
}
}
}
else if (inv.IsKnownType())
{
// Send stream from relay memory
bool pushed = false;
{
LOCK(cs_mapRelay);
map<CInv, CDataStream>::iterator mi = mapRelay.find(inv);
if (mi != mapRelay.end()) {
pfrom->PushMessage(inv.GetCommand(), (*mi).second);
pushed = true;
}
}
if (!pushed && inv.type == MSG_TX) {
LOCK(mempool.cs);
if (mempool.exists(inv.hash)) {
CTransaction tx = mempool.lookup(inv.hash);
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss.reserve(1000);
ss << tx;
pfrom->PushMessage("tx", ss);
pushed = true;
}
}
if (!pushed) {
vNotFound.push_back(inv);
}
}
// Track requests for our stuff.
Inventory(inv.hash);
}
}
pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it);
if (!vNotFound.empty()) {
// Let the peer know that we didn't find what it asked for, so it doesn't
// have to wait around forever. Currently only SPV clients actually care
// about this message: it's needed when they are recursively walking the
// dependencies of relevant unconfirmed transactions. SPV clients want to
// do that because they want to know about (and store and rebroadcast and
// risk analyze) the dependencies of transactions relevant to them, without
// having to download the entire memory pool.
pfrom->PushMessage("notfound", vNotFound);
}
}
bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv)
{
RandAddSeedPerfmon();
if (fDebug)
printf("received: %s (%"PRIszu" bytes)\n", strCommand.c_str(), vRecv.size());
if (mapArgs.count("-dropmessagestest") && GetRand(atoi(mapArgs["-dropmessagestest"])) == 0)
{
printf("dropmessagestest DROPPING RECV MESSAGE\n");
return true;
}
if (strCommand == "version")
{
// Each connection can only send one version message
if (pfrom->nVersion != 0)
{
pfrom->Misbehaving(1);
return false;
}
int64 nTime;
CAddress addrMe;
CAddress addrFrom;
uint64 nNonce = 1;
vRecv >> pfrom->nVersion >> pfrom->nServices >> nTime >> addrMe;
if (pfrom->nVersion < MIN_PROTO_VERSION)
{
// Since February 20, 2012, the protocol is initiated at version 209,
// and earlier versions are no longer supported
printf("partner %s using obsolete version %i; disconnecting\n", pfrom->addr.ToString().c_str(), pfrom->nVersion);
pfrom->fDisconnect = true;
return false;
}
if (pfrom->nVersion == 10300)
pfrom->nVersion = 300;
if (!vRecv.empty())
vRecv >> addrFrom >> nNonce;
if (!vRecv.empty())
vRecv >> pfrom->strSubVer;
if (!vRecv.empty())
vRecv >> pfrom->nStartingHeight;
if (!vRecv.empty())
vRecv >> pfrom->fRelayTxes; // set to true after we get the first filter* message
else
pfrom->fRelayTxes = true;
if (pfrom->fInbound && addrMe.IsRoutable())
{
pfrom->addrLocal = addrMe;
SeenLocal(addrMe);
}
// Disconnect if we connected to ourself
if (nNonce == nLocalHostNonce && nNonce > 1)
{
printf("connected to self at %s, disconnecting\n", pfrom->addr.ToString().c_str());
pfrom->fDisconnect = true;
return true;
}
// Be shy and don't send version until we hear
if (pfrom->fInbound)
pfrom->PushVersion();
pfrom->fClient = !(pfrom->nServices & NODE_NETWORK);
AddTimeData(pfrom->addr, nTime);
// Change version
pfrom->PushMessage("verack");
pfrom->ssSend.SetVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
if (!pfrom->fInbound)
{
// Advertise our address
if (!fNoListen && !IsInitialBlockDownload())
{
CAddress addr = GetLocalAddress(&pfrom->addr);
if (addr.IsRoutable())
pfrom->PushAddress(addr);
}
// Get recent addresses
if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || addrman.size() < 1000)
{
pfrom->PushMessage("getaddr");
pfrom->fGetAddr = true;
}
addrman.Good(pfrom->addr);
} else {
if (((CNetAddr)pfrom->addr) == (CNetAddr)addrFrom)
{
addrman.Add(addrFrom, addrFrom);
addrman.Good(addrFrom);
}
}
// Relay alerts
{
LOCK(cs_mapAlerts);
BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
item.second.RelayTo(pfrom);
}
pfrom->fSuccessfullyConnected = true;
printf("receive version message: version %d, blocks=%d, us=%s, them=%s, peer=%s\n", pfrom->nVersion, pfrom->nStartingHeight, addrMe.ToString().c_str(), addrFrom.ToString().c_str(), pfrom->addr.ToString().c_str());
cPeerBlockCounts.input(pfrom->nStartingHeight);
}
else if (pfrom->nVersion == 0)
{
// Must have a version message before anything else
pfrom->Misbehaving(1);
return false;
}
else if (strCommand == "verack")
{
pfrom->SetRecvVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
}
else if (strCommand == "addr")
{
vector<CAddress> vAddr;
vRecv >> vAddr;
// Don't want addr from older versions unless seeding
if (pfrom->nVersion < CADDR_TIME_VERSION && addrman.size() > 1000)
return true;
if (vAddr.size() > 1000)
{
pfrom->Misbehaving(20);
return error("message addr size() = %"PRIszu"", vAddr.size());
}
// Store the new addresses
vector<CAddress> vAddrOk;
int64 nNow = GetAdjustedTime();
int64 nSince = nNow - 10 * 60;
BOOST_FOREACH(CAddress& addr, vAddr)
{
boost::this_thread::interruption_point();
if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
addr.nTime = nNow - 5 * 24 * 60 * 60;
pfrom->AddAddressKnown(addr);
bool fReachable = IsReachable(addr);
if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
{
// Relay to a limited number of other nodes
{
LOCK(cs_vNodes);
// Use deterministic randomness to send to the same nodes for 24 hours
// at a time so the setAddrKnowns of the chosen nodes prevent repeats
static uint256 hashSalt;
if (hashSalt == 0)
hashSalt = GetRandHash();
uint64 hashAddr = addr.GetHash();
uint256 hashRand = hashSalt ^ (hashAddr<<32) ^ ((GetTime()+hashAddr)/(24*60*60));
hashRand = Hash(BEGIN(hashRand), END(hashRand));
multimap<uint256, CNode*> mapMix;
BOOST_FOREACH(CNode* pnode, vNodes)
{
if (pnode->nVersion < CADDR_TIME_VERSION)
continue;
unsigned int nPointer;
memcpy(&nPointer, &pnode, sizeof(nPointer));
uint256 hashKey = hashRand ^ nPointer;
hashKey = Hash(BEGIN(hashKey), END(hashKey));
mapMix.insert(make_pair(hashKey, pnode));
}
int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
for (multimap<uint256, CNode*>::iterator mi = mapMix.begin(); mi != mapMix.end() && nRelayNodes-- > 0; ++mi)
((*mi).second)->PushAddress(addr);
}
}
// Do not store addresses outside our network
if (fReachable)
vAddrOk.push_back(addr);
}
addrman.Add(vAddrOk, pfrom->addr, 2 * 60 * 60);
if (vAddr.size() < 1000)
pfrom->fGetAddr = false;
if (pfrom->fOneShot)
pfrom->fDisconnect = true;
}
else if (strCommand == "inv")
{
vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
pfrom->Misbehaving(20);
return error("message inv size() = %"PRIszu"", vInv.size());
}
// find last block in inv vector
unsigned int nLastBlock = (unsigned int)(-1);
for (unsigned int nInv = 0; nInv < vInv.size(); nInv++) {
if (vInv[vInv.size() - 1 - nInv].type == MSG_BLOCK) {
nLastBlock = vInv.size() - 1 - nInv;
break;
}
}
for (unsigned int nInv = 0; nInv < vInv.size(); nInv++)
{
const CInv &inv = vInv[nInv];
boost::this_thread::interruption_point();
pfrom->AddInventoryKnown(inv);
bool fAlreadyHave = AlreadyHave(inv);
if (fDebug)
printf(" got inventory: %s %s\n", inv.ToString().c_str(), fAlreadyHave ? "have" : "new");
if (!fAlreadyHave) {
if (!fImporting && !fReindex)
pfrom->AskFor(inv);
} else if (inv.type == MSG_BLOCK && mapOrphanBlocks.count(inv.hash)) {
pfrom->PushGetBlocks(pindexBest, GetOrphanRoot(mapOrphanBlocks[inv.hash]));
} else if (nInv == nLastBlock) {
// In case we are on a very long side-chain, it is possible that we already have
// the last block in an inv bundle sent in response to getblocks. Try to detect
// this situation and push another getblocks to continue.
pfrom->PushGetBlocks(mapBlockIndex[inv.hash], uint256(0));
if (fDebug)
printf("force request: %s\n", inv.ToString().c_str());
}
// Track requests for our stuff
Inventory(inv.hash);
}
}
else if (strCommand == "getdata")
{
vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
pfrom->Misbehaving(20);
return error("message getdata size() = %"PRIszu"", vInv.size());
}
if (fDebugNet || (vInv.size() != 1))
printf("received getdata (%"PRIszu" invsz)\n", vInv.size());
if ((fDebugNet && vInv.size() > 0) || (vInv.size() == 1))
printf("received getdata for: %s\n", vInv[0].ToString().c_str());
pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
ProcessGetData(pfrom);
}
else if (strCommand == "getblocks")
{
CBlockLocator locator;
uint256 hashStop;
vRecv >> locator >> hashStop;
// Find the last block the caller has in the main chain
CBlockIndex* pindex = locator.GetBlockIndex();
// Send the rest of the chain
if (pindex)
pindex = pindex->GetNextInMainChain();
int nLimit = 500;
printf("getblocks %d to %s limit %d\n", (pindex ? pindex->nHeight : -1), hashStop.ToString().c_str(), nLimit);
for (; pindex; pindex = pindex->GetNextInMainChain())
{
if (pindex->GetBlockHash() == hashStop)
{
printf(" getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString().c_str());
break;
}
pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
if (--nLimit <= 0)
{
// When this block is requested, we'll send an inv that'll make them
// getblocks the next batch of inventory.
printf(" getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString().c_str());
pfrom->hashContinue = pindex->GetBlockHash();
break;
}
}
}
else if (strCommand == "getheaders")
{
CBlockLocator locator;
uint256 hashStop;
vRecv >> locator >> hashStop;
CBlockIndex* pindex = NULL;
if (locator.IsNull())
{
// If locator is null, return the hashStop block
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashStop);
if (mi == mapBlockIndex.end())
return true;
pindex = (*mi).second;
}
else
{
// Find the last block the caller has in the main chain
pindex = locator.GetBlockIndex();
if (pindex)
pindex = pindex->GetNextInMainChain();
}
// we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
vector<CBlock> vHeaders;
int nLimit = 2000;
printf("getheaders %d to %s\n", (pindex ? pindex->nHeight : -1), hashStop.ToString().c_str());
for (; pindex; pindex = pindex->GetNextInMainChain())
{
vHeaders.push_back(pindex->GetBlockHeader());
if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
break;
}
pfrom->PushMessage("headers", vHeaders);
}
else if (strCommand == "tx")
{
vector<uint256> vWorkQueue;
vector<uint256> vEraseQueue;
CDataStream vMsg(vRecv);
CTransaction tx;
vRecv >> tx;
CInv inv(MSG_TX, tx.GetHash());
pfrom->AddInventoryKnown(inv);
bool fMissingInputs = false;
CValidationState state;
if (tx.AcceptToMemoryPool(state, true, true, &fMissingInputs))
{
RelayTran