Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions src/net_processing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1367,12 +1367,16 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co

// Bootstrap quickly by guessing a parent of our best tip is the forking point.
// Guessing wrong in either direction is not a problem.
// Also reset pindexLastCommonBlock after a snapshot was loaded, so that blocks after the snapshot will be prioritised for download.
if (state->pindexLastCommonBlock == nullptr ||
(snap_base && state->pindexLastCommonBlock->nHeight < snap_base->nHeight)) {
if (state->pindexLastCommonBlock == nullptr) {
state->pindexLastCommonBlock = m_chainman.ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight, m_chainman.ActiveChain().Height())];
}

// If our tip has advanced beyond pindexLastCommonBlock, move it ahead to the tip. We don't need to download any blocks in between, and skipping ahead here
// allows us to determine nWindowEnd better.
if (m_chainman.ActiveHeight() > state->pindexLastCommonBlock->nHeight && state->pindexBestKnownBlock->GetAncestor(m_chainman.ActiveHeight()) == m_chainman.ActiveTip()) {
state->pindexLastCommonBlock = m_chainman.ActiveTip();
}

// If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
// of its current tip anymore. Go back enough to fix that.
state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
Expand Down
32 changes: 13 additions & 19 deletions test/functional/p2p_ibd_stalling.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,15 @@


class P2PStaller(P2PDataStore):
def __init__(self, stall_block):
self.stall_block = stall_block
def __init__(self, stall_blocks):
self.stall_blocks = stall_blocks
super().__init__()

def on_getdata(self, message):
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (inv.type & MSG_TYPE_MASK) == MSG_BLOCK:
if (inv.hash != self.stall_block):
if (inv.hash not in self.stall_blocks):
self.send_without_ping(msg_block(self.block_store[inv.hash]))

def on_getheaders(self, message):
Expand All @@ -51,7 +51,7 @@ def set_test_params(self):

def run_test(self):
NUM_BLOCKS = 1025
NUM_PEERS = 4
NUM_PEERS = 5
node = self.nodes[0]
tip = int(node.getbestblockhash(), 16)
blocks = []
Expand All @@ -67,6 +67,7 @@ def run_test(self):
height += 1
block_dict[blocks[-1].sha256] = blocks[-1]
stall_block = blocks[0].sha256
second_stall = blocks[500].sha256 # another block we don't provide immediately

headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in blocks[:NUM_BLOCKS-1]]
Expand All @@ -76,14 +77,12 @@ def run_test(self):
self.mocktime = int(time.time()) + 1
node.setmocktime(self.mocktime)
for id in range(NUM_PEERS):
peers.append(node.add_outbound_p2p_connection(P2PStaller(stall_block), p2p_idx=id, connection_type="outbound-full-relay"))
peers.append(node.add_outbound_p2p_connection(P2PStaller([stall_block, second_stall]), p2p_idx=id, connection_type="outbound-full-relay"))
peers[-1].block_store = block_dict
peers[-1].send_and_ping(headers_message)

# Need to wait until 1023 blocks are received - the magic total bytes number is a workaround in lack of an rpc
# returning the number of downloaded (but not connected) blocks.
bytes_recv = 172761 if not self.options.v2transport else 169692
self.wait_until(lambda: self.total_bytes_recv_for_blocks() == bytes_recv)
# Wait until all blocks are received (except for the stall blocks), so that no other blocks are in flight.
self.wait_until(lambda: sum(len(peer['inflight']) for peer in node.getpeerinfo()) == 2)

self.all_sync_send_with_ping(peers)
# If there was a peer marked for stalling, it would get disconnected
Expand Down Expand Up @@ -135,21 +134,16 @@ def run_test(self):
self.wait_until(lambda: self.is_block_requested(peers, stall_block))
self.all_sync_send_with_ping(peers)

self.log.info("Provide the withheld block and check that stalling timeout gets reduced back to 2 seconds")
with node.assert_debug_log(expected_msgs=['Decreased stalling timeout to 2 seconds']):
self.log.info("Provide the first withheld block and check that stalling timeout gets reduced back to 2 seconds")
with node.assert_debug_log(expected_msgs=['Decreased stalling timeout to 2 seconds'], unexpected_msgs=['Stall started']):
for p in peers:
if p.is_connected and (stall_block in p.getdata_requests):
p.send_without_ping(msg_block(block_dict[stall_block]))
self.all_sync_send_with_ping(peers)

self.log.info("Check that all outstanding blocks get connected")
self.wait_until(lambda: node.getblockcount() == NUM_BLOCKS)
self.log.info("Check that all outstanding blocks up to the second stall block get connected")
self.wait_until(lambda: node.getblockcount() == 500)

def total_bytes_recv_for_blocks(self):
total = 0
for info in self.nodes[0].getpeerinfo():
if ("block" in info["bytesrecv_per_msg"].keys()):
total += info["bytesrecv_per_msg"]["block"]
return total

def all_sync_send_with_ping(self, peers):
for p in peers:
Expand Down
Loading