-
Notifications
You must be signed in to change notification settings - Fork 22
Commit
Allows any block with a timestamp on or after 1 March 2016 00:00:00 UTC to be up to 20,000,000 bytes big (serialized). I believe this is the simplest possible set of changes that will work.
- Loading branch information
There are no files selected for viewing
Original file line number | Original file line | Diff line number | Diff line change |
---|---|---|---|
@@ -0,0 +1,101 @@ | |||
// Copyright (c) 2011-2014 The Bitcoin Core developers | |||
// Distributed under the MIT software license, see the accompanying | |||
// file COPYING or http://www.opensource.org/licenses/mit-license.php. | |||
|
|||
#include "main.h" | |||
#include "miner.h" | |||
#include "pubkey.h" | |||
#include "uint256.h" | |||
#include "util.h" | |||
|
|||
#include "test/test_bitcoin.h" | |||
|
|||
#include <boost/test/unit_test.hpp> | |||
|
|||
BOOST_FIXTURE_TEST_SUITE(block_size_tests, TestingSetup) | |||
|
|||
// Fill block with dummy transactions until it's serialized size is exactly nSize | |||
static void | |||
FillBlock(CBlock& block, unsigned int nSize) | |||
{ | |||
assert(block.vtx.size() > 0); // Start with at least a coinbase | |||
|
|||
unsigned int nBlockSize = ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); | |||
if (nBlockSize > nSize) { | |||
block.vtx.resize(1); // passed in block is too big, start with just coinbase | |||
nBlockSize = ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); | |||
} | |||
|
|||
// Create a block that is exactly 1,000,000 bytes, serialized: | |||
CMutableTransaction tx; | |||
tx.vin.resize(1); | |||
tx.vin[0].scriptSig = CScript() << OP_11; | |||
tx.vin[0].prevout.hash = block.vtx[0].GetHash(); // passes CheckBlock, would fail if we checked inputs. | |||
tx.vin[0].prevout.n = 0; | |||
tx.vout.resize(1); | |||
tx.vout[0].nValue = 1LL; | |||
tx.vout[0].scriptPubKey = block.vtx[0].vout[0].scriptPubKey; | |||
|
|||
unsigned int nTxSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION); | |||
uint256 txhash = tx.GetHash(); | |||
|
|||
// ... add copies of tx to the block to get close to 1MB: | |||
while (nBlockSize+nTxSize < nSize) { | |||
block.vtx.push_back(tx); | |||
nBlockSize += nTxSize; | |||
tx.vin[0].prevout.hash = txhash; // ... just to make each transaction unique | |||
txhash = tx.GetHash(); | |||
} | |||
// Make the last transaction exactly the right size by making the scriptSig bigger. | |||
block.vtx.pop_back(); | |||
nBlockSize = ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); | |||
unsigned int nFill = nSize - nBlockSize - nTxSize; | |||
for (unsigned int i = 0; i < nFill; i++) | |||
tx.vin[0].scriptSig << OP_11; | |||
block.vtx.push_back(tx); | |||
nBlockSize = ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION); | |||
assert(nBlockSize == nSize); | |||
} | |||
|
|||
static bool TestCheckBlock(CBlock& block, uint64_t nTime, unsigned int nSize) | |||
{ | |||
SetMockTime(nTime); | |||
block.nTime = nTime; | |||
FillBlock(block, nSize); | |||
CValidationState validationState; | |||
bool fResult = CheckBlock(block, validationState, false, false) && validationState.IsValid(); | |||
SetMockTime(0); | |||
return fResult; | |||
} | |||
|
|||
// | |||
// Unit test CheckBlock() for conditions around the block size hard fork | |||
// | |||
BOOST_AUTO_TEST_CASE(TwentyMegFork) | |||
{ | |||
CScript scriptPubKey = CScript() << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f") << OP_CHECKSIG; | |||
CBlockTemplate *pblocktemplate; | |||
|
|||
LOCK(cs_main); | |||
|
|||
BOOST_CHECK(pblocktemplate = CreateNewBlock(scriptPubKey)); | |||
CBlock *pblock = &pblocktemplate->block; | |||
|
|||
// Before fork time... | |||
BOOST_CHECK(TestCheckBlock(*pblock, TWENTY_MEG_FORK_TIME-1LL, 1000*1000)); // 1MB : valid | |||
BOOST_CHECK(!TestCheckBlock(*pblock, TWENTY_MEG_FORK_TIME-1LL, 1000*1000+1)); // >1MB : invalid | |||
BOOST_CHECK(!TestCheckBlock(*pblock, TWENTY_MEG_FORK_TIME-1LL, 20*1000*1000)); // 20MB : invalid | |||
|
|||
// Exactly at fork time... | |||
BOOST_CHECK(TestCheckBlock(*pblock, TWENTY_MEG_FORK_TIME, 1000*1000)); // 1MB : valid | |||
BOOST_CHECK(TestCheckBlock(*pblock, TWENTY_MEG_FORK_TIME, 20*1000*1000)); // 20MB : valid | |||
BOOST_CHECK(!TestCheckBlock(*pblock, TWENTY_MEG_FORK_TIME, 20*1000*1000+1)); // >20MB : invalid | |||
|
|||
// A year after fork time: | |||
uint64_t yearAfter = TWENTY_MEG_FORK_TIME+60*60*24*365; | |||
BOOST_CHECK(TestCheckBlock(*pblock, yearAfter, 1000*1000)); // 1MB : valid | |||
BOOST_CHECK(TestCheckBlock(*pblock, yearAfter, 20*1000*1000)); // 20MB : valid | |||
BOOST_CHECK(!TestCheckBlock(*pblock, yearAfter, 20*1000*1000+1)); // >20MB : invalid | |||
} | |||
|
|||
BOOST_AUTO_TEST_SUITE_END() |
10 comments
on commit 5f46da2
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
What about a block size, that is linearly dependant on block count? In this snippet it is each half an year, 15768000 secs: (would make more sense to define it via block count IMHO than by time).
static const unsigned int MAX_BLOCK_SIZE = 1000000;
inline unsigned int MaxBlockSize(uint64_t nBlockTimestamp) {
// 1MB blocks until 1 March 2016, then 20MB
return (nBlockTimestamp < TWENTY_MEG_FORK_TIME ? 1000*1000 :
((nBlockTimestamp - TWENTY_MEG_FORK_TIME) / 15768000)*1000*1000);
}
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@schildbach I believe the problem is that the slower/incrementing formula didn't "get consensus" in private discussions. I don't know exactly what that means either, but presumably something about the exact formula was controversial with someone.
I don't personally think verifying the entire block chain on a watch is a reasonable goal we should care about, but it doesn't matter - your Intel Edison will still be able to process the block chain, so don't worry. You can either set it to prune (delete old blocks) which means it won't service SPV clients any more or serve the block chain to other full nodes, but it still verifies everything and so can act as your trusted node. Or you can just assume that miners won't actually build 20mb blocks right away due to lack of demand/excessively large blocks triggering unacceptable orphan rates. Whichever the outcome is, it's not something to worry about.
@gavinandresen Change looks good to me. I will accept it into Bitcoin XT if it does not get accepted into Bitcoin Core. Comments:
DEFAULT_BLOCK_MAX_SIZE is unchanged in this patch. We know from experience that quite a few miners don't modify the default size, presumably they expect that It Just Works(tm) out of the box. If leaving it at 750kb is deliberate, that deserves discussion in the code+release notes IMHO. If it's not deliberate, then I suggest we either:
- Set it to be 20mb as well, i.e. force miners to pick an appropriate value if their blocks are too big and getting orphaned.
- Remove the default entirely and refuse to service work/block template requests if no soft limit has been set. Again, force miners to pick..
- Same as above but provide some kind of "reasonable" suggestion in the error text, where by reasonable what I mean is some figure that sounds plausible based on your propagation experiments.
The fork point is defined in terms of timestamp rather than height. Is that OK? My gut sense is that it works fine - we might get a >1mb block with a timestamp after the switchover time, and then another block afterwards with a timestamp lower than the switchover time (old rules), but I don't think that will cause any issues: the second block will be checked according to the old rules. However, it'd be nice to see a discussion in the code about this case.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I deleted any comments that are not about this particular code. Please argue about the costs/benefits of 20MB versus 1MB blocks in the usual places: reddit, bitcoin-development mailing list, blog posts (I'll be writing several), etc.
@mikehearn : yes, I deliberately am not changing DEFAULT_BLOCK_MAX_SIZE. Policy on how to handle that is a whole 'nother discussion for a whole 'nother pull request, in my humble opinion.
RE: fork in terms of timestamp: I will write a couple rpc-test that uses setmocktime to simulate mining through the switchover, and will test to be absolutely certain there will be no issues (but yes, it seems obvious that if the max block size is a pure function of block.nTime there will be no issues).
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@kangasbros : I'll write more about why just a 20MB increase and no increase-over-time in a blog post. In short, it is impossible to predict the future, and the fear is that increases in network bandwidth to the home and/or cpu power increases may plateau sometime in the next couple of years.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
But why a timestamp instead of block number. How secure are timestamps? Is there no risk at all that some clients have another opinion of the time? What happens if they do?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It's hard to predict when the chain will reach a certain height - you can do it in a fuzzy manner but it's not very precise. If the crossover was set as a block height then it might potentially occur weeks earlier or later than planned. Probably not a big deal, but I guess that's the reason.
The timestamps are allowed to drift by a couple of hours. They're cross checked by miners.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@tinspin : code up a switch-by-height implementation, and you'll see why by-timestamp is much better.
There is no danger of confusion, the block timestamp is in the block header and is part of what is hashed. Either it is before the switchover time or after, and if you're checking the block you have the timestamp to check against right there in the block's data. Your node's notion of local time is irrelevant (if your time is too far off the network's idea of the current time you'll reject the block, but that is true today and independent of the max block size check).
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Would there be any problem if MAX_PROTOCOL_MESSAGE_LENGTH is too close to MaxBlockSize? Now it is 2,097,152 vs 1,000,000 and you propose 20,971,520 vs 20,000,000
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@jl2012 : no, 1MB extra is actually a lot bigger than necessary, overhead for a block message is on the order of 10 bytes.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is not the answer, IMHO, unless 1) a significant majority of miners agree to this change, and 2) a significant majority of full-node operators agree to this change. The latter I suspect will not, and will desire miners also who will not.
The change doesn't need to force people to choose if the change can be done to merge-mine with the current 1MB-limit bitcoin. This way the mining power will not be split, and wallet providers can simply provide a config option to choose which bitcoin to default to.
The market will decide on the price of each bitcoin - and we'll probably find that the market cap of the two bitcoins combined will stay about the same - perhaps increase with the increased confidence. To force a change to 20MB blocks will further CENTRALIZE bitcoin with only a small number of organisations running full nodes.
With the merge-mining way - that can still happen with the 20MB bitcoin, but at least we can continue to see where the 1MB bitcoin would have evolved to become, rather than abort it while still a relative fetus.
As I understand it, this is set to 4000 based on the previous math of 1,000,000/50/5, leaving the 4,000 sigops/tx as is going forward. It does make me wonder why it was previously set to a percentage of block size.
We need to keep in mind that the increase in block size allows for an increase in tx volume but not an increase in sigops/tx, which could come into consideration as merchants and payment processors sweep more and more payments at the same time. I would expect that the 4,000 limit is already taken into account in such cases, and transactions are batched.
Are we missing anything that might mean an increase in tx volume would require an increase in sigops/tx?
Why have a maximum at all?
Why not a smaller maximum?
Is this an attack vector we're trying to prevent? DoS via large scripts?
Is a 12,000 sigop tx any different from 3 x 4,000 sigop txs? This is now going to allow 100 x 4,000 sigops txs (disregarding size).
Is it better kept at 20,000/block (or some other fixed amount smaller than 400,000) instead of 4,000/tx?