Skip to content

Commit

Permalink
blockstorage: do not flush block to disk if it is already there
Browse files Browse the repository at this point in the history
  • Loading branch information
pinheadmz committed Feb 27, 2023
1 parent 28f212a commit 5afd6bf
Show file tree
Hide file tree
Showing 2 changed files with 53 additions and 11 deletions.
8 changes: 4 additions & 4 deletions src/node/blockstorage.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -566,7 +566,7 @@ void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune)

static FlatFileSeq BlockFileSeq()
{
return FlatFileSeq(gArgs.GetBlocksDirPath(), "blk", gArgs.GetBoolArg("-fastprune", false) ? 0x4000 /* 16kb */ : BLOCKFILE_CHUNK_SIZE);
return FlatFileSeq(gArgs.GetBlocksDirPath(), "blk", gArgs.GetBoolArg("-fastprune", false) ? 0x4000 /* 16 KiB */ : BLOCKFILE_CHUNK_SIZE);
}

static FlatFileSeq UndoFileSeq()
Expand Down Expand Up @@ -601,7 +601,7 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne

bool finalize_undo = false;
if (!fKnown) {
while (m_blockfile_info[nFile].nSize + nAddSize >= (gArgs.GetBoolArg("-fastprune", false) ? 0x10000 /* 64kb */ : MAX_BLOCKFILE_SIZE)) {
while (m_blockfile_info[nFile].nSize + nAddSize >= (gArgs.GetBoolArg("-fastprune", false) ? 0x10000 /* 64 KiB */ : MAX_BLOCKFILE_SIZE)) {
// when the undo file is keeping up with the block file, we want to flush it explicitly
// when it is lagging behind (more blocks arrive than are being connected), we let the
// undo block write case handle it
Expand All @@ -618,8 +618,8 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne
if ((int)nFile != m_last_blockfile) {
if (!fKnown) {
LogPrint(BCLog::BLOCKSTORE, "Leaving block file %i: %s\n", m_last_blockfile, m_blockfile_info[m_last_blockfile].ToString());
FlushBlockFile(/*fFinalize=*/true, finalize_undo);
}
FlushBlockFile(!fKnown, finalize_undo);
m_last_blockfile = nFile;
}

Expand Down Expand Up @@ -803,7 +803,7 @@ FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight, CCha
{
unsigned int nBlockSize = ::GetSerializeSize(block, CLIENT_VERSION);
FlatFilePos blockPos;
const auto position_known {dbp != nullptr};
const bool position_known{dbp != nullptr};
if (position_known) {
blockPos = *dbp;
} else {
Expand Down
56 changes: 49 additions & 7 deletions test/functional/feature_reindex.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3.
- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3.
- Verify that out-of-order blocks are correctly processed, see LoadExternalBlockFile()
- Start a second node, generate blocks, then restart with -reindex after setting blk files to read-only
"""

import os
Expand All @@ -19,22 +20,25 @@
class ReindexTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.num_nodes = 2
self.extra_args = [
[],
["-fastprune"] # only used in reindex_readonly()
]

def reindex(self, justchainstate=False):
self.generatetoaddress(self.nodes[0], 3, self.nodes[0].get_deterministic_priv_key().address)
blockcount = self.nodes[0].getblockcount()
self.stop_nodes()
extra_args = [["-reindex-chainstate" if justchainstate else "-reindex"]]
self.start_nodes(extra_args)
self.restart_node(0, extra_args=["-reindex-chainstate" if justchainstate else "-reindex"])
self.connect_nodes(0, 1)
assert_equal(self.nodes[0].getblockcount(), blockcount) # start_node is blocking on reindex
self.log.info("Success")

# Check that blocks can be processed out of order
def out_of_order(self):
# The previous test created 12 blocks
assert_equal(self.nodes[0].getblockcount(), 12)
self.stop_nodes()
self.stop_node(0)

# In this test environment, blocks will always be in order (since
# we're generating them rather than getting them from peers), so to
Expand Down Expand Up @@ -68,12 +72,49 @@ def find_block(b, start):
'LoadExternalBlockFile: Out of order block',
'LoadExternalBlockFile: Processing out of order child',
]):
extra_args = [["-reindex"]]
self.start_nodes(extra_args)
self.start_node(0, extra_args=["-reindex"])

# All blocks should be accepted and processed.
assert_equal(self.nodes[0].getblockcount(), 12)

def reindex_readonly(self):
self.connect_nodes(0, 1)
addr = self.nodes[1].get_deterministic_priv_key().address

# generate enough blocks to ensure that the -fastprune node fills up the
# first blk00000.dat file and starts another block file

# How big are empty regtest blocks?
block_hash = self.generatetoaddress(self.nodes[1], 1, addr)[0]
block_size = self.nodes[1].getblock(block_hash)["size"]
block_size += 8 # BLOCK_SERIALIZATION_HEADER_SIZE

# How many blocks do we need to roll over a new .blk file?
block_count = self.nodes[1].getblockcount()
fastprune_blockfile_size = 0x10000
size_needed = fastprune_blockfile_size - (block_size * block_count)
blocks_needed = size_needed // block_size

self.generatetoaddress(self.nodes[1], blocks_needed, addr)
self.stop_node(1)

assert os.path.exists(self.nodes[1].chain_path / 'blocks' / 'blk00000.dat')
assert os.path.exists(self.nodes[1].chain_path / 'blocks' / 'blk00001.dat')

self.log.debug("Make the first block file read-only")
filename = self.nodes[1].chain_path / 'blocks' / 'blk00000.dat'
os.chmod(filename, 0o444)

self.log.debug("Restart and reindex the node with the read-only block file")
self.start_node(1, extra_args=["-reindex", "-fastprune"])
assert_equal(self.nodes[1].getblockcount(), block_count + blocks_needed)

self.log.debug("Shut down this node, triggering disk flushes, but not to the read-only file")
with self.nodes[1].assert_debug_log(expected_msgs=['FlushStateToDisk'], unexpected_msgs=['failed to open file']):
self.stop_node(1)

self.log.info("Success of reindex read-only test")

def run_test(self):
self.reindex(False)
self.reindex(True)
Expand All @@ -82,6 +123,7 @@ def run_test(self):

self.out_of_order()

self.reindex_readonly()

if __name__ == '__main__':
ReindexTest().main()

0 comments on commit 5afd6bf

Please sign in to comment.