Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

BIP100: Dynamic max block size by miner vote #398

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
3 changes: 2 additions & 1 deletion .gitignore
Expand Up @@ -105,7 +105,8 @@ linux-build
win32-build
qa/pull-tester/run-bitcoind-for-test.sh
qa/pull-tester/tests_config.py
qa/pull-tester/cache/*
qa/pull-tester/cache
qa/pull-tester/cache_bigblock
qa/pull-tester/test.*/*
qa/tmp
cache/
Expand Down
2 changes: 2 additions & 0 deletions qa/pull-tester/rpc-tests.py
Expand Up @@ -213,6 +213,7 @@ def option_passed(option_without_dashes):
'bip65-cltv',
'bip65-cltv-p2p',
'bip68-sequence',
'bip100-sizelimit',
'bipdersig-p2p',
'bipdersig',
'getblocktemplate_longpoll',
Expand Down Expand Up @@ -250,6 +251,7 @@ def show_wrapper_options():
" attempt to run disabled/skipped tests")
print(" -h / -help / --help print this help")


def runtests():
global passOn
coverage = None
Expand Down
1 change: 1 addition & 0 deletions qa/rpc-tests/.gitignore
@@ -1,2 +1,3 @@
*.pyc
cache
cache_bigblock
210 changes: 210 additions & 0 deletions qa/rpc-tests/bip100-sizelimit.py
@@ -0,0 +1,210 @@
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.

#
# Test mining and broadcast of larger-than-1MB-blocks
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *

from decimal import Decimal

CACHE_DIR = "cache_bigblock"

class BigBlockTest(BitcoinTestFramework):

def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)

if not os.path.isdir(os.path.join(CACHE_DIR, "node0")):
print("Creating initial chain. This will be cached for future runs.")

for i in range(4):
initialize_datadir(CACHE_DIR, i) # Overwrite port/rpcport in bitcoin.conf

# Node 0 creates 8MB blocks that vote for increase to 8MB
# Node 1 creates empty blocks that vote for 8MB
# Node 2 creates empty blocks that vote for 2MB
# Node 3 creates empty blocks that do not vote for increase
self.nodes = []
# Use node0 to mine blocks for input splitting
self.nodes.append(start_node(0, CACHE_DIR, ["-blockmaxsize=8000000", "-bip100=1", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"]))
self.nodes.append(start_node(1, CACHE_DIR, ["-blockmaxsize=1000", "-bip100=1", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"]))
self.nodes.append(start_node(2, CACHE_DIR, ["-blockmaxsize=1000", "-bip100=1", "-maxblocksizevote=1", "-limitancestorsize=2000", "-limitdescendantsize=2000"]))
self.nodes.append(start_node(3, CACHE_DIR, ["-blockmaxsize=1000", "-bip100=1", "-maxblocksizevote=2", "-limitancestorsize=2000", "-limitdescendantsize=2000"]))

connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 2, 3)
connect_nodes_bi(self.nodes, 3, 0)

self.is_network_split = False

# Create a 2012-block chain in a 75% ratio for increase (genesis block votes for 1MB)
# Make sure they are not already sorted correctly
blocks = []
blocks.append(self.nodes[1].generate(503))
assert(self.sync_blocks(self.nodes[1:3]))
blocks.append(self.nodes[2].generate(502)) # <--- genesis is 503rd vote for 1MB
assert(self.sync_blocks(self.nodes[2:4]))
blocks.append(self.nodes[3].generate(503))
assert(self.sync_blocks(self.nodes[1:4]))
blocks.append(self.nodes[1].generate(503))
assert(self.sync_blocks(self.nodes))

tx_file = open(os.path.join(CACHE_DIR, "txdata"), "w")

# Create a lot of tansaction data ready to be mined
fee = Decimal('.00005')
used = set()
print("Creating transaction data")
for i in range(0,25):
inputs = []
outputs = {}
limit = 0
utxos = self.nodes[3].listunspent(0)
for utxo in utxos:
if not utxo["txid"]+str(utxo["vout"]) in used:
raw_input = {}
raw_input["txid"] = utxo["txid"]
raw_input["vout"] = utxo["vout"]
inputs.append(raw_input)
outputs[self.nodes[3].getnewaddress()] = utxo["amount"] - fee
used.add(utxo["txid"]+str(utxo["vout"]))
limit = limit + 1
if (limit >= 250):
break
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
txdata = self.nodes[3].signrawtransaction(rawtx)["hex"]
self.nodes[3].sendrawtransaction(txdata)
tx_file.write(txdata+"\n")
tx_file.close()

stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = []
for i in range(4):
os.remove(log_filename(CACHE_DIR, i, "db.log"))
os.remove(log_filename(CACHE_DIR, i, "peers.dat"))
os.remove(log_filename(CACHE_DIR, i, "fee_estimates.dat"))

for i in range(4):
from_dir = os.path.join(CACHE_DIR, "node"+str(i))
to_dir = os.path.join(self.options.tmpdir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf

def sync_blocks(self, rpc_connections, wait=1, max_wait=60):
"""
Wait until everybody has the same block count
"""
for i in range(0,max_wait):
if i > 0: time.sleep(wait)
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
return True
return False
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why not use util.sync_blocks()?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The version in util.py waits forever. We rely on sync failure to detect test failures.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it seems like adding an optional timeout parameter to the version in util would be valuable

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If it's ok, I will do that in a separate PR that follows.


def setup_network(self):
self.nodes = []

self.nodes.append(start_node(0, self.options.tmpdir, ["-blockmaxsize=8000000", "-bip100=1", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"], timewait=60))
self.nodes.append(start_node(1, self.options.tmpdir, ["-blockmaxsize=1000", "-bip100=1", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"], timewait=60))
self.nodes.append(start_node(2, self.options.tmpdir, ["-blockmaxsize=1000", "-bip100=1", "-maxblocksizevote=1", "-limitancestorsize=2000", "-limitdescendantsize=2000"], timewait=60))
# (We don't restart the node with the huge wallet
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 2, 0)

self.load_mempool(self.nodes[0])

def load_mempool(self, node):
with open(os.path.join(CACHE_DIR, "txdata"), "r") as f:
for line in f:
node.sendrawtransaction(line.rstrip())

def TestMineBig(self, expect_big):
# Test if node0 will mine a block bigger than legacy MAX_BLOCK_SIZE
self.nodes[0].setminingmaxblock(self.nodes[0].getexcessiveblock()["excessiveBlockSize"])
b1hash = self.nodes[0].generate(1)[0]
b1 = self.nodes[0].getblock(b1hash, True)
assert(self.sync_blocks(self.nodes[0:3]))

if expect_big:
assert(b1['size'] > 1000*1000)

# Have node1 mine on top of the block,
# to make sure it goes along with the fork
b2hash = self.nodes[1].generate(1)[0]
b2 = self.nodes[1].getblock(b2hash, True)
assert(b2['previousblockhash'] == b1hash)
assert(self.sync_blocks(self.nodes[0:3]))

else:
assert(b1['size'] < 1000*1000)

# Reset chain to before b1hash:
for node in self.nodes[0:3]:
node.invalidateblock(b1hash)
assert(self.sync_blocks(self.nodes[0:3]))


def run_test(self):
# nodes 0 and 1 have mature 50-BTC coinbase transactions.

print("Testing consensus blocksize increase conditions")

assert_equal(self.nodes[0].getblockcount(), 2011) # This is a 0-based height

# Current nMaxBlockSize is still 1MB
assert_equal(self.nodes[0].getexcessiveblock()["excessiveBlockSize"], 1000000)
self.TestMineBig(False)

# Create a situation where the 1512th-highest vote is for 2MB
self.nodes[2].generate(1)
assert(self.sync_blocks(self.nodes[1:3]))
ahash = self.nodes[1].generate(3)[2]
assert_equal(self.nodes[1].getexcessiveblock()["excessiveBlockSize"], int(1000000 * 1.05))
assert(self.sync_blocks(self.nodes[0:2]))
self.TestMineBig(True)

# Shutdown then restart node[0], it should produce a big block.
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-blockmaxsize=8000000", "-bip100=1", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"], timewait=60)
self.load_mempool(self.nodes[0])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
assert_equal(self.nodes[0].getexcessiveblock()["excessiveBlockSize"], int(1000000 * 1.05))
self.TestMineBig(True)

# Test re-orgs past the sizechange block
stop_node(self.nodes[0], 0)
self.nodes[2].invalidateblock(ahash)
assert_equal(self.nodes[2].getexcessiveblock()["excessiveBlockSize"], 1000000)
self.nodes[2].generate(2)
assert_equal(self.nodes[2].getexcessiveblock()["excessiveBlockSize"], 1000000)
assert(self.sync_blocks(self.nodes[1:3]))

# Restart node0, it should re-org onto longer chain,
# and refuse to mine a big block:
self.nodes[0] = start_node(0, self.options.tmpdir, ["-blockmaxsize=8000000", "-bip100=1", "-maxblocksizevote=8", "-limitancestorsize=2000", "-limitdescendantsize=2000"], timewait=60)
self.load_mempool(self.nodes[0])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
assert(self.sync_blocks(self.nodes[0:3]))
assert_equal(self.nodes[0].getexcessiveblock()["excessiveBlockSize"], 1000000)
self.TestMineBig(False)

# Mine 4 blocks voting for 8MB. Bigger block NOT ok, we are in the next voting period
self.nodes[1].generate(4)
assert_equal(self.nodes[1].getexcessiveblock()["excessiveBlockSize"], 1000000)
assert(self.sync_blocks(self.nodes[0:3]))
self.TestMineBig(False)


print("Cached test chain and transactions left in %s"%(CACHE_DIR))

if __name__ == '__main__':
BigBlockTest().main()
2 changes: 2 additions & 0 deletions src/.formatted-files
Expand Up @@ -4,6 +4,8 @@ tweak.h
thinblock.cpp
thinblock.h
leakybucket.h
maxblocksize.cpp
maxblocksize.h
parallel.cpp
parallel.h
unlimited.cpp
Expand Down
2 changes: 2 additions & 0 deletions src/Makefile.am
Expand Up @@ -102,6 +102,7 @@ BITCOIN_CORE_H = \
dbwrapper.h \
limitedmap.h \
main.h \
maxblocksize.h \
memusage.h \
merkleblock.h \
miner.h \
Expand Down Expand Up @@ -181,6 +182,7 @@ libbitcoin_server_a_SOURCES = \
init.cpp \
dbwrapper.cpp \
main.cpp \
maxblocksize.cpp \
merkleblock.cpp \
miner.cpp \
net.cpp \
Expand Down
1 change: 1 addition & 0 deletions src/Makefile.test.include
Expand Up @@ -59,6 +59,7 @@ BITCOIN_TESTS =\
test/limitedmap_tests.cpp \
test/dbwrapper_tests.cpp \
test/main_tests.cpp \
test/maxblocksize_tests.cpp \
test/mempool_tests.cpp \
test/merkle_tests.cpp \
test/miner_tests.cpp \
Expand Down
27 changes: 24 additions & 3 deletions src/chain.h
Expand Up @@ -16,6 +16,9 @@

#include <vector>

static const int BIP100_DBI_VERSION = 0x08000000;
static const int DISK_BLOCK_INDEX_VERSION = BIP100_DBI_VERSION;

struct CDiskBlockPos
{
int nFile;
Expand Down Expand Up @@ -69,8 +72,8 @@ enum BlockStatus: uint32_t {

/**
* Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid, no duplicate txids,
* sigops, size, merkle root. Implies all parents are at least TREE but not necessarily TRANSACTIONS. When all
* parent blocks also have TRANSACTIONS, CBlockIndex::nChainTx will be set.
* merkle root. Implies all parents are at least TREE but not necessarily TRANSACTIONS. When all
* parent blocks also have TRANSACTIONS, CBlockIndex::nChainTx and maxblocksize will be set.
*/
BLOCK_VALID_TRANSACTIONS = 3,

Expand Down Expand Up @@ -149,6 +152,15 @@ class CBlockIndex
//! (memory only) Sequential id assigned to distinguish order in which blocks are received.
uint32_t nSequenceId;

//! Index entry serial format version
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the comment isn't helping me much. How is this different from nVersion?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We need to track nSerialVersion in the CBlockIndex object. The existing nVersion in the object is the actual block header version. nSerialVersion is read/written to the old nVersion location at the beginning of the serialized CBlockIndex object.

The ubiquitous serialization nType/nVersion parameters have been removed in a later core release.

int nSerialVersion;

//! Maximum serialized block size at nHeight
uint64_t nMaxBlockSize;

//! This block's vote for future maximum serialized block size
uint64_t nMaxBlockSizeVote;

void SetNull()
{
phashBlock = NULL;
Expand All @@ -163,6 +175,9 @@ class CBlockIndex
nChainTx = 0;
nStatus = 0;
nSequenceId = 0;
nSerialVersion = 0;
nMaxBlockSize = 0;
nMaxBlockSizeVote = 0;

nVersion = 0;
hashMerkleRoot = uint256();
Expand Down Expand Up @@ -295,14 +310,15 @@ class CDiskBlockIndex : public CBlockIndex

explicit CDiskBlockIndex(const CBlockIndex* pindex) : CBlockIndex(*pindex) {
hashPrev = (pprev ? pprev->GetBlockHash() : uint256());
nSerialVersion = DISK_BLOCK_INDEX_VERSION;
}

ADD_SERIALIZE_METHODS;

template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
if (!(nType & SER_GETHASH))
READWRITE(VARINT(nVersion));
READWRITE(VARINT(nSerialVersion));

READWRITE(VARINT(nHeight));
READWRITE(VARINT(nStatus));
Expand All @@ -321,6 +337,11 @@ class CDiskBlockIndex : public CBlockIndex
READWRITE(nTime);
READWRITE(nBits);
READWRITE(nNonce);

if (nSerialVersion >= BIP100_DBI_VERSION) {
READWRITE(VARINT(nMaxBlockSize));
READWRITE(VARINT(nMaxBlockSizeVote));
}
}

uint256 GetBlockHash() const
Expand Down
11 changes: 11 additions & 0 deletions src/chainparams.cpp
Expand Up @@ -117,6 +117,10 @@ class CMainParams : public CChainParams {
consensus.vDeployments[Consensus::DEPLOYMENT_CSV].nStartTime = 1462060800; // May 1st, 2016
consensus.vDeployments[Consensus::DEPLOYMENT_CSV].nTimeout = 1493596800; // May 1st, 2017

// BIP100 defined start height and max block size change critical vote position
consensus.bip100ActivationHeight = 449568;
consensus.nMaxBlockSizeChangePosition = 1512;

/**
* The message start string is designed to be unlikely to occur in normal data.
* The characters are rarely used upper ASCII, not valid as UTF-8, and produce
Expand Down Expand Up @@ -288,6 +292,10 @@ class CTestNetParams : public CChainParams {
consensus.vDeployments[Consensus::DEPLOYMENT_CSV].nStartTime = 1456790400; // March 1st, 2016
consensus.vDeployments[Consensus::DEPLOYMENT_CSV].nTimeout = 1493596800; // May 1st, 2017

// BIP100 defined start height and max block size change critical vote position
consensus.bip100ActivationHeight = 798336;
consensus.nMaxBlockSizeChangePosition = 1512;

pchMessageStart[0] = 0x0b;
pchMessageStart[1] = 0x11;
pchMessageStart[2] = 0x09;
Expand Down Expand Up @@ -361,6 +369,9 @@ class CRegTestParams : public CChainParams {
consensus.vDeployments[Consensus::DEPLOYMENT_CSV].nStartTime = 0;
consensus.vDeployments[Consensus::DEPLOYMENT_CSV].nTimeout = 999999999999ULL;

consensus.bip100ActivationHeight = 0;
consensus.nMaxBlockSizeChangePosition = 1512;

pchMessageStart[0] = 0xfa;
pchMessageStart[1] = 0xbf;
pchMessageStart[2] = 0xb5;
Expand Down
7 changes: 7 additions & 0 deletions src/consensus/params.h
Expand Up @@ -54,6 +54,13 @@ struct Params {
uint32_t nRuleChangeActivationThreshold;
uint32_t nMinerConfirmationWindow;
BIP9Deployment vDeployments[MAX_VERSION_BITS_DEPLOYMENTS];
/**
* BIP100: One-based position from beginning (end) of the ascending sorted list of max block size
* votes in a retarget interval, at which the possible new lower (higher) max block size is read.
* 1512 = 75th percentile of 2016
*/
int bip100ActivationHeight;
uint32_t nMaxBlockSizeChangePosition;
/** Proof of work parameters */
uint256 powLimit;
bool fPowAllowMinDifficultyBlocks;
Expand Down