Skip to content

Commit

Permalink
test: remove confusing MAX_BLOCK_BASE_SIZE
Browse files Browse the repository at this point in the history
The constant `MAX_BLOCK_BASE_SIZE` has been removed from the
core implementation years ago due to being confusing and
superfluous, as it is implied by the block weight limit (see
PRs bitcoin#10618 and bitcoin#10608). Since there is also no point in
still keeping it in the functional test framework, we switch
to weight-based accounting on the relevant test code parts
and use `MAX_BLOCK_WEIGHT` instead for the block limit
checks.
  • Loading branch information
theStack committed Jul 1, 2021
1 parent 4af97c7 commit e26ad3b
Show file tree
Hide file tree
Showing 5 changed files with 35 additions and 47 deletions.
40 changes: 20 additions & 20 deletions test/functional/feature_block.py
Expand Up @@ -22,7 +22,7 @@
CTransaction,
CTxIn,
CTxOut,
MAX_BLOCK_BASE_SIZE,
MAX_BLOCK_WEIGHT,
uint256_from_compact,
uint256_from_str,
)
Expand Down Expand Up @@ -307,33 +307,33 @@ def run_test(self):
b22 = self.next_block(22, spend=out[5])
self.send_blocks([b22], success=False, reject_reason='bad-txns-premature-spend-of-coinbase', reconnect=True)

# Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
# Create a block on either side of MAX_BLOCK_WEIGHT and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Accept a block of size MAX_BLOCK_BASE_SIZE")
self.log.info("Accept a block of weight MAX_BLOCK_WEIGHT")
self.move_tip(15)
b23 = self.next_block(23, spend=out[6])
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69
script_length = (MAX_BLOCK_WEIGHT - b23.get_weight() - 276) // 4
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
b23 = self.update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE)
# Make sure the math above worked out to produce a max-weighted block
assert_equal(b23.get_weight(), MAX_BLOCK_WEIGHT)
self.send_blocks([b23], True)
self.save_spendable_output()

self.log.info("Reject a block of size MAX_BLOCK_BASE_SIZE + 1")
self.log.info("Reject a block of weight MAX_BLOCK_WEIGHT + 4")
self.move_tip(15)
b24 = self.next_block(24, spend=out[6])
script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69
script_length = (MAX_BLOCK_WEIGHT - b24.get_weight() - 276) // 4
script_output = CScript([b'\x00' * (script_length + 1)])
tx.vout = [CTxOut(0, script_output)]
b24 = self.update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE + 1)
assert_equal(b24.get_weight(), MAX_BLOCK_WEIGHT + 1 * 4)
self.send_blocks([b24], success=False, reject_reason='bad-blk-length', reconnect=True)

b25 = self.next_block(25, spend=out[7])
Expand Down Expand Up @@ -485,13 +485,13 @@ def run_test(self):
# Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
tx_new = None
tx_last = tx
total_size = len(b39.serialize())
while(total_size < MAX_BLOCK_BASE_SIZE):
total_weight = b39.get_weight()
while total_weight < MAX_BLOCK_WEIGHT:
tx_new = self.create_tx(tx_last, 1, 1, p2sh_script)
tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
tx_new.rehash()
total_size += len(tx_new.serialize())
if total_size >= MAX_BLOCK_BASE_SIZE:
total_weight += tx_new.get_weight()
if total_weight >= MAX_BLOCK_WEIGHT:
break
b39.vtx.append(tx_new) # add tx to block
tx_last = tx_new
Expand All @@ -502,7 +502,7 @@ def run_test(self):
# Make sure we didn't accidentally make too big a block. Note that the
# size of the block has non-determinism due to the ECDSA signature in
# the first transaction.
while (len(b39.serialize()) >= MAX_BLOCK_BASE_SIZE):
while (b39.get_weight() >= MAX_BLOCK_WEIGHT):
del b39.vtx[-1]

b39 = self.update_block(39, [])
Expand Down Expand Up @@ -892,7 +892,7 @@ def run_test(self):
self.send_blocks([b63], success=False, reject_reason='bad-txns-nonfinal', reconnect=True)

# This checks that a block with a bloated VARINT between the block_header and the array of tx such that
# the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint,
# the block is > MAX_BLOCK_WEIGHT with the bloated varint, but <= MAX_BLOCK_WEIGHT without the bloated varint,
# does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
# care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
#
Expand All @@ -917,12 +917,12 @@ def run_test(self):
tx = CTransaction()

# use canonical serialization to calculate size
script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69
script_length = (MAX_BLOCK_WEIGHT - 4 * len(b64a.normal_serialize()) - 276) // 4
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
b64a = self.update_block("64a", [tx])
assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8)
assert_equal(b64a.get_weight(), MAX_BLOCK_WEIGHT + 8 * 4)
self.send_blocks([b64a], success=False, reject_reason='non-canonical ReadCompactSize()')

# bitcoind doesn't disconnect us for sending a bloated block, but if we subsequently
Expand All @@ -936,7 +936,7 @@ def run_test(self):
b64 = CBlock(b64a)
b64.vtx = copy.deepcopy(b64a.vtx)
assert_equal(b64.hash, b64a.hash)
assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE)
assert_equal(b64.get_weight(), MAX_BLOCK_WEIGHT)
self.blocks[64] = b64
b64 = self.update_block(64, [])
self.send_blocks([b64], True)
Expand Down Expand Up @@ -1270,12 +1270,12 @@ def run_test(self):
for i in range(89, LARGE_REORG_SIZE + 89):
b = self.next_block(i, spend)
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69
script_length = (MAX_BLOCK_WEIGHT - b.get_weight() - 276) // 4
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
b = self.update_block(i, [tx])
assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE)
assert_equal(b.get_weight(), MAX_BLOCK_WEIGHT)
blocks.append(b)
self.save_spendable_output()
spend = self.get_spendable_output()
Expand Down
4 changes: 2 additions & 2 deletions test/functional/mempool_accept.py
Expand Up @@ -14,7 +14,7 @@
COIN,
COutPoint,
CTxOut,
MAX_BLOCK_BASE_SIZE,
MAX_BLOCK_WEIGHT,
MAX_MONEY,
tx_from_hex,
)
Expand Down Expand Up @@ -207,7 +207,7 @@ def run_test(self):

self.log.info('A really large transaction')
tx = tx_from_hex(raw_tx_reference)
tx.vin = [tx.vin[0]] * math.ceil(MAX_BLOCK_BASE_SIZE / len(tx.vin[0].serialize()))
tx.vin = [tx.vin[0]] * math.ceil(MAX_BLOCK_WEIGHT // 4 / len(tx.vin[0].serialize()))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-oversize'}],
rawtxs=[tx.serialize().hex()],
Expand Down
8 changes: 4 additions & 4 deletions test/functional/mining_prioritisetransaction.py
Expand Up @@ -6,7 +6,7 @@

import time

from test_framework.messages import COIN, MAX_BLOCK_BASE_SIZE
from test_framework.messages import COIN, MAX_BLOCK_WEIGHT
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, create_confirmed_utxos, create_lots_of_big_transactions, gen_return_txouts

Expand Down Expand Up @@ -61,15 +61,15 @@ def run_test(self):
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee)

# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create
# more transactions.
# MAX_BLOCK_WEIGHT // 4 -- otherwise the test needs to be revised to
# create more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
for i in range(3):
for j in txids[i]:
assert j in mempool
sizes[i] += mempool[j]['vsize']
assert sizes[i] > MAX_BLOCK_BASE_SIZE # Fail => raise utxo_count
assert sizes[i] > MAX_BLOCK_WEIGHT // 4 # Fail => raise utxo_count

# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined
Expand Down
28 changes: 8 additions & 20 deletions test/functional/p2p_segwit.py
Expand Up @@ -21,7 +21,7 @@
CTxInWitness,
CTxOut,
CTxWitness,
MAX_BLOCK_BASE_SIZE,
MAX_BLOCK_WEIGHT,
MSG_BLOCK,
MSG_TX,
MSG_WITNESS_FLAG,
Expand Down Expand Up @@ -110,16 +110,6 @@ def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()

def get_virtual_size(witness_block):
"""Calculate the virtual size of a witness block.
Virtual size is base + witness/4."""
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize())
# the "+3" is so we round up
vsize = int((3 * base_size + total_size + 3) / 4)
return vsize

def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
Expand Down Expand Up @@ -902,7 +892,7 @@ def test_block_malleability(self):
block.solve()

block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000)
assert get_virtual_size(block) > MAX_BLOCK_BASE_SIZE
assert block.get_weight() > MAX_BLOCK_WEIGHT

# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
Expand All @@ -911,7 +901,7 @@ def test_block_malleability(self):
assert self.nodes[0].getbestblockhash() != block.hash

block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert get_virtual_size(block) < MAX_BLOCK_BASE_SIZE
assert block.get_weight() < MAX_BLOCK_WEIGHT
assert_equal(None, self.nodes[0].submitblock(block.serialize().hex()))

assert self.nodes[0].getbestblockhash() == block.hash
Expand Down Expand Up @@ -974,11 +964,10 @@ def test_witness_block_size(self):
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])

vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
additional_bytes = MAX_BLOCK_WEIGHT - block.get_weight()
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
# Add some more bytes to each input until we hit MAX_BLOCK_WEIGHT+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
Expand All @@ -987,8 +976,7 @@ def test_witness_block_size(self):
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
assert_equal(block.get_weight(), MAX_BLOCK_WEIGHT + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert len(block.serialize()) > 2 * 1024 * 1024
Expand All @@ -1001,7 +989,7 @@ def test_witness_block_size(self):
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert get_virtual_size(block) == MAX_BLOCK_BASE_SIZE
assert block.get_weight() == MAX_BLOCK_WEIGHT

test_witness_block(self.nodes[0], self.test_node, block, accepted=True)

Expand Down Expand Up @@ -1727,7 +1715,7 @@ def test_signature_version_1(self):
block.vtx.append(tx)

# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
if (block.get_weight() > MAX_BLOCK_WEIGHT - 4000):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
Expand Down
2 changes: 1 addition & 1 deletion test/functional/test_framework/messages.py
Expand Up @@ -32,7 +32,7 @@
from test_framework.util import hex_str_to_bytes, assert_equal

MAX_LOCATOR_SZ = 101
MAX_BLOCK_BASE_SIZE = 1000000
MAX_BLOCK_WEIGHT = 4000000
MAX_BLOOM_FILTER_SIZE = 36000
MAX_BLOOM_HASH_FUNCS = 50

Expand Down

0 comments on commit e26ad3b

Please sign in to comment.