New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Segregated witness rebased #8149

Merged
merged 27 commits into from Jun 24, 2016
Commits
Jump to file or symbol
Failed to load files and symbols.
+344 −136
Diff settings

Always

Just for now

Viewing a subset of changes. View all

BIP141: Other consensus critical limits, and BIP145

Includes changes by Suhas Daftuar, Luke-jr, and mruddy.
  • Loading branch information...
sipa committed Jan 3, 2016
commit 2b1f6f9ccf36f1e0a2c9d99154e1642f796d7c2b
Copy path View file
@@ -97,7 +97,7 @@ def __init__(self):
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000"]))
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=800", "-blockmaxsize=999000"]))
def mine_full_block(self, node, address):
# Want to create a full block
@@ -175,13 +175,13 @@ def run_test(self):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 200*1024*1024
daily_buffer = 144 * MAX_BLOCK_SIZE
max_bytes_per_day = 800*1024*1024
daily_buffer = 144 * 4000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 144MB will be reserved for relaying new blocks, so expect this to
# succeed for ~70 tries.
# 576MB will be reserved for relaying new blocks, so expect this to
# succeed for ~235 tries.
for i in range(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
@@ -198,9 +198,9 @@ def run_test(self):
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 200 times
# We'll try 800 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(200):
for i in range(800):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
Copy path View file
@@ -195,7 +195,7 @@ static void MutateTxAddInput(CMutableTransaction& tx, const string& strInput)
uint256 txid(uint256S(strTxid));
static const unsigned int minTxOutSz = 9;
static const unsigned int maxVout = MAX_BLOCK_SIZE / minTxOutSz;
static const unsigned int maxVout = MAX_BLOCK_BASE_SIZE / minTxOutSz;
// extract and validate vout
string strVout = vStrInputParts[1];
Copy path View file
@@ -15,7 +15,7 @@
#include <unordered_map>
#define MIN_TRANSACTION_SIZE (::GetSerializeSize(CTransaction(), SER_NETWORK, PROTOCOL_VERSION))
#define MIN_TRANSACTION_BASE_SIZE (::GetSerializeSize(CTransaction(), SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS))
CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block) :
nonce(GetRand(std::numeric_limits<uint64_t>::max())),
@@ -50,7 +50,7 @@ uint64_t CBlockHeaderAndShortTxIDs::GetShortID(const uint256& txhash) const {
ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& cmpctblock) {
if (cmpctblock.header.IsNull() || (cmpctblock.shorttxids.empty() && cmpctblock.prefilledtxn.empty()))
return READ_STATUS_INVALID;
if (cmpctblock.shorttxids.size() + cmpctblock.prefilledtxn.size() > MAX_BLOCK_SIZE / MIN_TRANSACTION_SIZE)
if (cmpctblock.shorttxids.size() + cmpctblock.prefilledtxn.size() > MAX_BLOCK_BASE_SIZE / MIN_TRANSACTION_BASE_SIZE)
return READ_STATUS_INVALID;
assert(header.IsNull() && txn_available.empty());
Copy path View file
@@ -6,10 +6,16 @@
#ifndef BITCOIN_CONSENSUS_CONSENSUS_H
#define BITCOIN_CONSENSUS_CONSENSUS_H
/** The maximum allowed size for a serialized block, in bytes (network rule) */
static const unsigned int MAX_BLOCK_SIZE = 1000000;
#include <stdint.h>
/** The maximum allowed size for a serialized block, in bytes (only for buffer size limits) */
static const unsigned int MAX_BLOCK_SERIALIZED_SIZE = 4000000;
/** The maximum allowed cost for a block, see BIP 141 (network rule) */
static const unsigned int MAX_BLOCK_COST = 4000000;
/** The maximum allowed size for a block excluding witness data, in bytes (network rule) */
static const unsigned int MAX_BLOCK_BASE_SIZE = 1000000;
/** The maximum allowed number of signature check operations in a block (network rule) */
static const unsigned int MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE/50;
static const int64_t MAX_BLOCK_SIGOPS_COST = 80000;
/** Coinbase transaction outputs can only be spent after this number of new blocks (network rule) */
static const int COINBASE_MATURITY = 100;
Copy path View file
@@ -452,6 +452,7 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += HelpMessageOpt("-mempoolreplacement", strprintf(_("Enable transaction replacement in the memory pool (default: %u)"), DEFAULT_ENABLE_REPLACEMENT));
strUsage += HelpMessageGroup(_("Block creation options:"));
strUsage += HelpMessageOpt("-blockmaxcost=<n>", strprintf(_("Set maximum block cost (default: %d)"), DEFAULT_BLOCK_MAX_COST));
strUsage += HelpMessageOpt("-blockminsize=<n>", strprintf(_("Set minimum block size in bytes (default: %u)"), DEFAULT_BLOCK_MIN_SIZE));
strUsage += HelpMessageOpt("-blockmaxsize=<n>", strprintf(_("Set maximum block size in bytes (default: %d)"), DEFAULT_BLOCK_MAX_SIZE));
strUsage += HelpMessageOpt("-blockprioritysize=<n>", strprintf(_("Set maximum size of high-priority/low-fee transactions in bytes (default: %d)"), DEFAULT_BLOCK_PRIORITY_SIZE));
Copy path View file
@@ -684,8 +684,8 @@ bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(c
// have been mined or received.
// 100 orphans, each of which is at most 99,999 bytes big is
// at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
unsigned int sz = tx.GetSerializeSize(SER_NETWORK, CTransaction::CURRENT_VERSION);
if (sz >= MAX_STANDARD_TX_SIZE)
unsigned int sz = GetTransactionCost(tx);
if (sz >= MAX_STANDARD_TX_COST)
{
LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
return false;
@@ -1018,8 +1018,24 @@ unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& in
return nSigOps;
}
int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& inputs, int flags)
{
int64_t nSigOps = GetLegacySigOpCount(tx) * WITNESS_SCALE_FACTOR;
if (tx.IsCoinBase())
return nSigOps;
if (flags & SCRIPT_VERIFY_P2SH) {
nSigOps += GetP2SHSigOpCount(tx, inputs) * WITNESS_SCALE_FACTOR;
}
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
const CTxOut &prevout = inputs.GetOutputFor(tx.vin[i]);
nSigOps += CountWitnessSigOps(tx.vin[i].scriptSig, prevout.scriptPubKey, i < tx.wit.vtxinwit.size() ? &tx.wit.vtxinwit[i].scriptWitness : NULL, flags);
}
return nSigOps;
}
@@ -1033,7 +1049,7 @@ bool CheckTransaction(const CTransaction& tx, CValidationState &state)
if (tx.vout.empty())
return state.DoS(10, false, REJECT_INVALID, "bad-txns-vout-empty");
// Size limits (this doesn't take the witness into account, as that hasn't been checked for malleability)
if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) > MAX_BLOCK_SIZE)
if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) > MAX_BLOCK_BASE_SIZE)
return state.DoS(100, false, REJECT_INVALID, "bad-txns-oversize");
// Check for negative or overflow output values
@@ -1239,8 +1255,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
if (fRequireStandard && !AreInputsStandard(tx, view))
return state.Invalid(false, REJECT_NONSTANDARD, "bad-txns-nonstandard-inputs");
unsigned int nSigOps = GetLegacySigOpCount(tx);
nSigOps += GetP2SHSigOpCount(tx, view);
int64_t nSigOpsCost = GetTransactionSigOpCost(tx, view, STANDARD_SCRIPT_VERIFY_FLAGS);
CAmount nValueOut = tx.GetValueOut();
CAmount nFees = nValueIn-nValueOut;
@@ -1263,17 +1278,17 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
}
}
CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height(), pool.HasNoInputsOf(tx), inChainInputValue, fSpendsCoinbase, nSigOps, lp);
CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height(), pool.HasNoInputsOf(tx), inChainInputValue, fSpendsCoinbase, nSigOpsCost, lp);
unsigned int nSize = entry.GetTxSize();
// Check that the transaction doesn't have an excessive number of
// sigops, making it impossible to mine. Since the coinbase transaction
// itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
// MAX_BLOCK_SIGOPS; we still consider this an invalid rather than
// merely non-standard transaction.
if ((nSigOps > MAX_STANDARD_TX_SIGOPS) || (nBytesPerSigOp && nSigOps > nSize / nBytesPerSigOp))
if ((nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST) || (nBytesPerSigOp && nSigOpsCost > nSize * WITNESS_SCALE_FACTOR / nBytesPerSigOp))
return state.DoS(0, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops", false,
strprintf("%d", nSigOps));
strprintf("%d", nSigOpsCost));
CAmount mempoolRejectFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(nSize);
if (mempoolRejectFee > 0 && nModifiedFees < mempoolRejectFee) {
@@ -2439,7 +2454,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
std::vector<int> prevheights;
CAmount nFees = 0;
int nInputs = 0;
unsigned int nSigOps = 0;
int64_t nSigOpsCost = 0;
CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size()));
std::vector<std::pair<uint256, CDiskTxPos> > vPos;
vPos.reserve(block.vtx.size());
@@ -2449,10 +2464,6 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
const CTransaction &tx = block.vtx[i];
nInputs += tx.vin.size();
nSigOps += GetLegacySigOpCount(tx);
if (nSigOps > MAX_BLOCK_SIGOPS)
return state.DoS(100, error("ConnectBlock(): too many sigops"),
REJECT_INVALID, "bad-blk-sigops");
if (!tx.IsCoinBase())
{
@@ -2483,18 +2494,19 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
return state.DoS(100, error("%s: contains a non-BIP68-final transaction", __func__),
REJECT_INVALID, "bad-txns-nonfinal");
}
}
if (fStrictPayToScriptHash)
{
// Add in sigops done by pay-to-script-hash inputs;
// this is to prevent a "rogue miner" from creating
// an incredibly-expensive-to-validate block.
nSigOps += GetP2SHSigOpCount(tx, view);
if (nSigOps > MAX_BLOCK_SIGOPS)
return state.DoS(100, error("ConnectBlock(): too many sigops"),
REJECT_INVALID, "bad-blk-sigops");
}
// GetTransactionSigOpCost counts 3 types of sigops:
// * legacy (always)
// * p2sh (when P2SH enabled in flags and excludes coinbase)
// * witness (when witness enabled in flags and excludes coinbase)
nSigOpsCost += GetTransactionSigOpCost(tx, view, flags);
if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST)
return state.DoS(100, error("ConnectBlock(): too many sigops"),
REJECT_INVALID, "bad-blk-sigops");
if (!tx.IsCoinBase())
{
nFees += view.GetValueIn(tx)-tx.GetValueOut();
std::vector<CScriptCheck> vChecks;
@@ -3417,9 +3429,11 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
// All potential-corruption validation must be done before we do any
// transaction validation, as otherwise we may mark the header as invalid
// because we receive the wrong transactions for it.
// Note that witness malleability is checked in ContextualCheckBlock, so no
// checks that use witness data may be performed here.
// Size limits
if (block.vtx.empty() || block.vtx.size() > MAX_BLOCK_SIZE || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) > MAX_BLOCK_SIZE)
if (block.vtx.empty() || block.vtx.size() > MAX_BLOCK_BASE_SIZE || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) > MAX_BLOCK_BASE_SIZE)
return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, "size limits failed");
// First transaction must be coinbase, the rest must not be
@@ -3440,7 +3454,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
{
nSigOps += GetLegacySigOpCount(tx);
}
if (nSigOps > MAX_BLOCK_SIGOPS)
if (nSigOps * WITNESS_SCALE_FACTOR > MAX_BLOCK_SIGOPS_COST)
return state.DoS(100, false, REJECT_INVALID, "bad-blk-sigops", false, "out-of-bounds SigOpCount");
if (fCheckPOW && fCheckMerkleRoot)
@@ -3621,6 +3635,16 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, CBlockIn
}
}
// After the coinbase witness nonce and commitment are verified,
// we can check if the block cost passes (before we've checked the
// coinbase witness, it would be possible for the cost to be too
// large by filling up the coinbase witness, which doesn't change
// the block hash, so we couldn't mark the block as permanently
// failed).
if (GetBlockCost(block) > MAX_BLOCK_COST) {
return state.DoS(100, error("ContextualCheckBlock(): cost limit failed"), REJECT_INVALID, "bad-blk-cost");
}
return true;
}
@@ -4284,7 +4308,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
int nLoaded = 0;
try {
// This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SIZE, MAX_BLOCK_SIZE+8, SER_DISK, CLIENT_VERSION);
CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SERIALIZED_SIZE, MAX_BLOCK_SERIALIZED_SIZE+8, SER_DISK, CLIENT_VERSION);
uint64_t nRewind = blkdat.GetPos();
while (!blkdat.eof()) {
boost::this_thread::interruption_point();
@@ -4303,7 +4327,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
continue;
// read size
blkdat >> nSize;
if (nSize < 80 || nSize > MAX_BLOCK_SIZE)
if (nSize < 80 || nSize > MAX_BLOCK_SERIALIZED_SIZE)
continue;
} catch (const std::exception&) {
// no valid block header found; don't complain
Copy path View file
@@ -152,6 +152,7 @@ typedef boost::unordered_map<uint256, CBlockIndex*, BlockHasher> BlockMap;
extern BlockMap mapBlockIndex;
extern uint64_t nLastBlockTx;
extern uint64_t nLastBlockSize;
extern uint64_t nLastBlockCost;
extern const std::string strMessageMagic;
extern CWaitableCriticalSection csBestBlock;
extern CConditionVariable cvBlockChange;
Copy path View file
@@ -155,7 +155,7 @@ uint256 CPartialMerkleTree::ExtractMatches(std::vector<uint256> &vMatch, std::ve
if (nTransactions == 0)
return uint256();
// check for excessively high numbers of transactions
if (nTransactions > MAX_BLOCK_SIZE / 60) // 60 is the lower bound for the size of a serialized CTransaction
if (nTransactions > MAX_BLOCK_BASE_SIZE / 60) // 60 is the lower bound for the size of a serialized CTransaction
return uint256();
// there can never be more hashes provided than one for every txid
if (vHash.size() > nTransactions)
Oops, something went wrong.
ProTip! Use n and p to navigate between commits in a pull request.