Skip to content

Commit

Permalink
Allow maintaining the blockfilterindex when using prune
Browse files Browse the repository at this point in the history
Summary:
PR description:
> Maintaining the blockfilterindexes in prune mode is possible and may lead to efficient p2p based rescans of wallets (restore backups, import/sweep keys) beyond the prune height (rescans not part of that PR).
>
> This PR allows running the blockfilterindex(es) in conjunction with pruning.
>
> - Bitcoind/Qt will shutdown during startup when missing block data has been detected ([re]enable -blockfilterindex when we already have pruned)
> - manual block pruning is disabled during blockfilterindex sync
> - auto-pruning is delayed during blockfilterindex sync

----

> Allow blockfilter in conjunction with prune
bitcoin/bitcoin@6abe9f5

----

> index: Fix backwards search for bestblock
bitcoin/bitcoin@698c524

----

> Avoid accessing nullpointer in BaseIndex::GetSummary()
bitcoin/bitcoin@00d57ff

----

> Avoid pruning below the blockfilterindex sync height
bitcoin/bitcoin@5e11226

----

> Add debug startup parameter -fastprune for more effective pruning tests
bitcoin/bitcoin@00d57ff

----

> Add functional test for blockfilterindex in prune-mode
bitcoin/bitcoin@ab3a0a2

----

> Fix several bugs in feature_blockfilterindex_prune.py
[[bitcoin/bitcoin#21230 | core#21230]]

----

> test: Intermittent issue in feature_blockfilterindex_prune
[[bitcoin/bitcoin#21252 | core#21252]]

----

> test: improve assertions in feature_blockfilterindex_prune.py
> test: remove unneeded node from feature_blockfilterindex_prune.py
[[bitcoin/bitcoin#21297 | core#21297]]

----

> test: Add edge case of pruning up to index height
bitcoin/bitcoin@9600ea0

This is a backport of [[bitcoin/bitcoin#15946 | core#15946]], [[bitcoin/bitcoin#21230 | core#21230]], [[bitcoin/bitcoin#21252 | core#21252]], [[bitcoin/bitcoin#21297 | core#21297]] and  [[bitcoin/bitcoin#23365 |  core#23365]]

Backport notes:
 - the test was buggy and ugly code  after the first commit, so I had to squash all these commits to reach an acceptable quality.
 - I had to use different numbers of blocks that are generated and pruned, because Bitcoin ABC can fit more blocks into each blk?????.dat file than core because in this test the witness data of the coinbase transaction makes core blocks larger. Comments were added to explain this where needed.
 - I used the shortcut `node = self.nodes[0]` to make the code a bit more readable (shorter lines)

Test Plan:
`ninja all check-all`

Run IBD with pruning enabled
` src/bitcoind -prune=2000 -blockfilterindex=1`

Reviewers: #bitcoin_abc, Fabien

Reviewed By: #bitcoin_abc, Fabien

Subscribers: Fabien

Differential Revision: https://reviews.bitcoinabc.org/D11299
  • Loading branch information
jonasschnelli authored and PiRK committed Apr 5, 2022
1 parent 8848294 commit eacbda7
Show file tree
Hide file tree
Showing 8 changed files with 191 additions and 13 deletions.
5 changes: 4 additions & 1 deletion src/blockdb.cpp
Expand Up @@ -10,7 +10,10 @@
extern RecursiveMutex cs_main;

FlatFileSeq BlockFileSeq() {
return FlatFileSeq(gArgs.GetBlocksDirPath(), "blk", BLOCKFILE_CHUNK_SIZE);
return FlatFileSeq(gArgs.GetBlocksDirPath(), "blk",
gArgs.GetBoolArg("-fastprune", false)
? 0x4000 /* 16kb */
: BLOCKFILE_CHUNK_SIZE);
}

FlatFileSeq UndoFileSeq() {
Expand Down
2 changes: 1 addition & 1 deletion src/chainparams.cpp
Expand Up @@ -456,7 +456,7 @@ class CRegTestParams : public CChainParams {
netMagic[2] = 0xbf;
netMagic[3] = 0xfa;
nDefaultPort = 18444;
nPruneAfterHeight = 1000;
nPruneAfterHeight = gArgs.GetBoolArg("-fastprune", false) ? 100 : 1000;
m_assumed_blockchain_size = 0;
m_assumed_chain_state_size = 0;

Expand Down
49 changes: 48 additions & 1 deletion src/index/base.cpp
Expand Up @@ -67,6 +67,48 @@ bool BaseIndex::Init() {
::ChainActive(), locator);
}
m_synced = m_best_block_index.load() == ::ChainActive().Tip();
if (!m_synced) {
bool prune_violation = false;
if (!m_best_block_index) {
// index is not built yet
// make sure we have all block data back to the genesis
const CBlockIndex *block = ::ChainActive().Tip();
while (block->pprev && block->pprev->nStatus.hasData()) {
block = block->pprev;
}
prune_violation = block != ::ChainActive().Genesis();
}
// in case the index has a best block set and is not fully synced
// check if we have the required blocks to continue building the index
else {
const CBlockIndex *block_to_test = m_best_block_index.load();
if (!ChainActive().Contains(block_to_test)) {
// if the bestblock is not part of the mainchain, find the fork
// and make sure we have all data down to the fork
block_to_test = ::ChainActive().FindFork(block_to_test);
}
const CBlockIndex *block = ::ChainActive().Tip();
prune_violation = true;
// check backwards from the tip if we have all block data until we
// reach the indexes bestblock
while (block_to_test && block && block->nStatus.hasData()) {
if (block_to_test == block) {
prune_violation = false;
break;
}
assert(block->pprev);
block = block->pprev;
}
}
if (prune_violation) {
// throw error and graceful shutdown if we can't build the index
FatalError("%s: %s best block of the index goes beyond pruned "
"data. Please disable the index or reindex (which will "
"download the whole blockchain again)",
__func__, GetName());
return false;
}
}
return true;
}

Expand Down Expand Up @@ -182,6 +224,10 @@ bool BaseIndex::Rewind(const CBlockIndex *current_tip,
assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip);

// In the case of a reorg, ensure persisted block locator is not stale.
// Pruning has a minimum of 288 blocks-to-keep and getting the index
// out of sync may be possible but a users fault.
// In case we reorg beyond the pruned depth, ReadBlockFromDisk would
// throw and lead to a graceful shutdown
m_best_block_index = new_tip;
if (!Commit()) {
// If commit fails, revert the best block index to avoid corruption.
Expand Down Expand Up @@ -334,6 +380,7 @@ IndexSummary BaseIndex::GetSummary() const {
IndexSummary summary{};
summary.name = GetName();
summary.synced = m_synced;
summary.best_block_height = m_best_block_index.load()->nHeight;
summary.best_block_height =
m_best_block_index ? m_best_block_index.load()->nHeight : 0;
return summary;
}
9 changes: 5 additions & 4 deletions src/init.cpp
Expand Up @@ -443,6 +443,11 @@ void SetupServerArgs(NodeContext &node) {
"Specify directory to hold blocks subdirectory for *.dat "
"files (default: <datadir>)",
ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-fastprune",
"Use smaller block files and lower minimum prune height for "
"testing purposes",
ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
OptionsCategory::DEBUG_TEST);
#if defined(HAVE_SYSTEM)
argsman.AddArg("-blocknotify=<cmd>",
"Execute command when the best block changes (%s in cmd is "
Expand Down Expand Up @@ -1905,10 +1910,6 @@ bool AppInitParameterInteraction(Config &config, const ArgsManager &args) {
if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
return InitError(_("Prune mode is incompatible with -txindex."));
}
if (!g_enabled_filter_types.empty()) {
return InitError(
_("Prune mode is incompatible with -blockfilterindex."));
}
}

// -bind and -whitebind can't be set when not listening
Expand Down
27 changes: 22 additions & 5 deletions src/validation.cpp
Expand Up @@ -21,6 +21,7 @@
#include <consensus/tx_verify.h>
#include <consensus/validation.h>
#include <hash.h>
#include <index/blockfilterindex.h>
#include <index/txindex.h>
#include <logging.h>
#include <logging/timer.h>
Expand Down Expand Up @@ -2156,22 +2157,34 @@ bool CChainState::FlushStateToDisk(const CChainParams &chainparams,
{
bool fFlushForPrune = false;
bool fDoFullFlush = false;

CoinsCacheSizeState cache_state =
GetCoinsCacheSizeState(&m_mempool);
LOCK(cs_LastBlockFile);
if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) &&
!fReindex) {
// Make sure we don't prune above the blockfilterindexes
// bestblocks. Pruning is height-based.
int last_prune = m_chain.Height();
ForEachBlockFilterIndex([&](BlockFilterIndex &index) {
last_prune = std::max(
1, std::min(last_prune,
index.GetSummary().best_block_height));
});

if (nManualPruneHeight > 0) {
LOG_TIME_MILLIS_WITH_CATEGORY(
"find files to prune (manual)", BCLog::BENCH);
m_blockman.FindFilesToPruneManual(
setFilesToPrune, nManualPruneHeight, m_chain.Height());
setFilesToPrune,
std::min(last_prune, nManualPruneHeight),
m_chain.Height());
} else {
LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune",
BCLog::BENCH);
m_blockman.FindFilesToPrune(
setFilesToPrune, chainparams.PruneAfterHeight(),
m_chain.Height(), IsInitialBlockDownload());
m_chain.Height(), last_prune, IsInitialBlockDownload());
fCheckForPruning = false;
}
if (!setFilesToPrune.empty()) {
Expand Down Expand Up @@ -3687,7 +3700,9 @@ static bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize,

bool finalize_undo = false;
if (!fKnown) {
while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
while (vinfoBlockFile[nFile].nSize + nAddSize >=
(gArgs.GetBoolArg("-fastprune", false) ? 0x10000 /* 64kb */
: MAX_BLOCKFILE_SIZE)) {
// when the undo file is keeping up with the block file, we want to
// flush it explicitly when it is lagging behind (more blocks arrive
// than are being connected), we let the undo block write case
Expand Down Expand Up @@ -4651,7 +4666,8 @@ void PruneBlockFilesManual(CChainState &active_chainstate,

void BlockManager::FindFilesToPrune(std::set<int> &setFilesToPrune,
uint64_t nPruneAfterHeight,
int chain_tip_height, bool is_ibd) {
int chain_tip_height, int prune_height,
bool is_ibd) {
LOCK2(cs_main, cs_LastBlockFile);
if (chain_tip_height < 0 || nPruneTarget == 0) {
return;
Expand All @@ -4660,7 +4676,8 @@ void BlockManager::FindFilesToPrune(std::set<int> &setFilesToPrune,
return;
}

unsigned int nLastBlockWeCanPrune = chain_tip_height - MIN_BLOCKS_TO_KEEP;
unsigned int nLastBlockWeCanPrune = std::min(
prune_height, chain_tip_height - static_cast<int>(MIN_BLOCKS_TO_KEEP));
uint64_t nCurrentUsage = CalculateCurrentUsage();
// We don't check to prune until after we've allocated new space for files,
// so we should leave a buffer under our target to account for another
Expand Down
2 changes: 1 addition & 1 deletion src/validation.h
Expand Up @@ -564,7 +564,7 @@ class BlockManager {
*/
void FindFilesToPrune(std::set<int> &setFilesToPrune,
uint64_t nPruneAfterHeight, int chain_tip_height,
bool is_ibd);
int prune_height, bool is_ibd);

public:
BlockMap m_block_index GUARDED_BY(cs_main);
Expand Down
109 changes: 109 additions & 0 deletions test/functional/feature_blockfilterindex_prune.py
@@ -0,0 +1,109 @@
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test blockfilterindex in conjunction with prune."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)


class FeatureBlockfilterindexPruneTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-fastprune", "-prune=1", "-blockfilterindex=1"]]

def sync_index(self, height):
expected = {
'basic block filter index': {
'synced': True,
'best_block_height': height
}
}
self.wait_until(lambda: self.nodes[0].getindexinfo() == expected)

def run_test(self):
node = self.nodes[0]

self.log.info("check if we can access a blockfilter when pruning is "
"enabled but no blocks are actually pruned")
self.sync_index(200)
assert_greater_than(
len(node.getblockfilter(node.getbestblockhash())['filter']),
0)
node.generate(500)
self.sync_index(height=700)

self.log.info("prune some blocks")
pruneheight = node.pruneblockchain(400)
# The difference in number of blocks per block file between Bitcoin ABC
# and Bitcoin Core is caused by additional witness data in coinbase
# transactions for core.
assert_equal(pruneheight, 346)

self.log.info("check if we can access the tips blockfilter when we have"
" pruned some blocks")
assert_greater_than(
len(node.getblockfilter(node.getbestblockhash())['filter']),
0)

self.log.info("check if we can access the blockfilter of a pruned "
"block")
assert_greater_than(
len(node.getblockfilter(node.getblockhash(2))['filter']),
0)

# mine and sync index up to a height that will later be the pruneheight
node.generate(338)
# Backport note: 3 blk?????.dat file with 346 blocks each makes 1038.
self.sync_index(height=1038)

self.log.info("start node without blockfilterindex")
self.restart_node(0, extra_args=["-fastprune", "-prune=1"])

self.log.info("make sure accessing the blockfilters throws an error")
assert_raises_rpc_error(
-1, "Index is not enabled for filtertype basic",
node.getblockfilter, node.getblockhash(2))
node.generate(462)

self.log.info("prune exactly up to the blockfilterindexes best block "
"while blockfilters are disabled")
pruneheight_2 = self.nodes[0].pruneblockchain(1040)
assert_equal(pruneheight_2, 1038)
self.restart_node(
0, extra_args=["-fastprune", "-prune=1", "-blockfilterindex=1"])
self.log.info("make sure that we can continue with the partially synced"
" index after having pruned up to the index height")
self.sync_index(height=1500)

self.log.info("prune below the blockfilterindexes best block while "
"blockfilters are disabled")
self.restart_node(
0,
extra_args=["-fastprune", "-prune=1"])
node.generate(1000)
pruneheight_3 = self.nodes[0].pruneblockchain(2000)
assert_greater_than(pruneheight_3, pruneheight_2)
self.stop_node(0)

self.log.info("make sure we get an init error when starting the node "
"again with block filters")
with node.assert_debug_log(
["basic block filter index best block of the index goes beyond "
"pruned data. Please disable the index or reindex (which will "
"download the whole blockchain again)"]):
node.assert_start_raises_init_error(
extra_args=["-fastprune", "-prune=1", "-blockfilterindex=1"])
self.log.info("make sure the node starts again with the -reindex arg")
self.start_node(
0,
extra_args=["-fastprune", "-prune=1", "-blockfilterindex",
"-reindex"])


if __name__ == '__main__':
FeatureBlockfilterindexPruneTest().main()
1 change: 1 addition & 0 deletions test/lint/lint-circular-dependencies.sh
Expand Up @@ -14,6 +14,7 @@ set -euo pipefail

EXPECTED_CIRCULAR_DEPENDENCIES=(
"index/txindex -> validation -> index/txindex"
"index/blockfilterindex -> validation -> index/blockfilterindex"
"qt/addresstablemodel -> qt/walletmodel -> qt/addresstablemodel"
"qt/bitcoingui -> qt/walletframe -> qt/bitcoingui"
"qt/recentrequeststablemodel -> qt/walletmodel -> qt/recentrequeststablemodel"
Expand Down

0 comments on commit eacbda7

Please sign in to comment.