Skip to content

Commit 1fe851a

Browse files
committed
Merge bitcoin/bitcoin#32180: p2p: Advance pindexLastCommonBlock early after connecting blocks
01cc20f test: improve coverage for a resolved stalling situation (Martin Zumsande) 9af6daf test: remove magic number when checking for blocks that have arrived (Martin Zumsande) 3069d66 p2p: During block download, adjust pindexLastCommonBlock better (Martin Zumsande) Pull request description: As described in #32179, `pindexLastCommonBlock` is updated later than necessary in master. In case of a linear chain with no forks, it can be moved forward at the beginning of `FindNextBlocksToDownload`, so that the updated value can be used to better estimate `nWindowEnd`. This helps the node to request all blocks from peers within the correct 1024-block-window and avoids peers being incorrectly marked as stallers. I also changed `p2p_ibd_stalling.py` to cover the situation after a resolved situation, making sure that no additional peers are marked for stalling. Fixes #32179 ACKs for top commit: Crypt-iQ: crACK 01cc20f stringintech: re-ACK 01cc20f achow101: ACK 01cc20f sipa: utACK 01cc20f Tree-SHA512: a97f7a7ef5ded538ee35576e04b3fbcdd46a6d0189c7ba3abacc6e0d81e800aac5b0c2d2565d0462ef6fd4acc751989f577fd6adfd450171a7d6ab26f437df32
2 parents 5f0303b + 01cc20f commit 1fe851a

File tree

2 files changed

+30
-36
lines changed

2 files changed

+30
-36
lines changed

src/net_processing.cpp

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1393,17 +1393,16 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co
13931393
return;
13941394
}
13951395

1396-
// Bootstrap quickly by guessing a parent of our best tip is the forking point.
1397-
// Guessing wrong in either direction is not a problem.
1398-
// Also reset pindexLastCommonBlock after a snapshot was loaded, so that blocks after the snapshot will be prioritised for download.
1396+
// Determine the forking point between the peer's chain and our chain:
1397+
// pindexLastCommonBlock is required to be an ancestor of pindexBestKnownBlock, and will be used as a starting point.
1398+
// It is being set to the fork point between the peer's best known block and the current tip, unless it is already set to
1399+
// an ancestor with more work than the fork point.
1400+
auto fork_point = LastCommonAncestor(state->pindexBestKnownBlock, m_chainman.ActiveTip());
13991401
if (state->pindexLastCommonBlock == nullptr ||
1400-
(snap_base && state->pindexLastCommonBlock->nHeight < snap_base->nHeight)) {
1401-
state->pindexLastCommonBlock = m_chainman.ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight, m_chainman.ActiveChain().Height())];
1402+
fork_point->nChainWork > state->pindexLastCommonBlock->nChainWork ||
1403+
state->pindexBestKnownBlock->GetAncestor(state->pindexLastCommonBlock->nHeight) != state->pindexLastCommonBlock) {
1404+
state->pindexLastCommonBlock = fork_point;
14021405
}
1403-
1404-
// If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
1405-
// of its current tip anymore. Go back enough to fix that.
1406-
state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
14071406
if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
14081407
return;
14091408

test/functional/p2p_ibd_stalling.py

Lines changed: 22 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -29,15 +29,15 @@
2929

3030

3131
class P2PStaller(P2PDataStore):
32-
def __init__(self, stall_block):
33-
self.stall_block = stall_block
32+
def __init__(self, stall_blocks):
33+
self.stall_blocks = stall_blocks
3434
super().__init__()
3535

3636
def on_getdata(self, message):
3737
for inv in message.inv:
3838
self.getdata_requests.append(inv.hash)
3939
if (inv.type & MSG_TYPE_MASK) == MSG_BLOCK:
40-
if (inv.hash != self.stall_block):
40+
if (inv.hash not in self.stall_blocks):
4141
self.send_without_ping(msg_block(self.block_store[inv.hash]))
4242

4343
def on_getheaders(self, message):
@@ -51,7 +51,7 @@ def set_test_params(self):
5151

5252
def run_test(self):
5353
NUM_BLOCKS = 1025
54-
NUM_PEERS = 4
54+
NUM_PEERS = 5
5555
node = self.nodes[0]
5656
tip = int(node.getbestblockhash(), 16)
5757
blocks = []
@@ -66,7 +66,9 @@ def run_test(self):
6666
block_time += 1
6767
height += 1
6868
block_dict[blocks[-1].hash_int] = blocks[-1]
69-
stall_block = blocks[0].hash_int
69+
stall_index = 0
70+
second_stall_index = 500
71+
stall_blocks = [blocks[stall_index].hash_int, blocks[second_stall_index].hash_int]
7072

7173
headers_message = msg_headers()
7274
headers_message.headers = [CBlockHeader(b) for b in blocks[:NUM_BLOCKS-1]]
@@ -76,14 +78,12 @@ def run_test(self):
7678
self.mocktime = int(time.time()) + 1
7779
node.setmocktime(self.mocktime)
7880
for id in range(NUM_PEERS):
79-
peers.append(node.add_outbound_p2p_connection(P2PStaller(stall_block), p2p_idx=id, connection_type="outbound-full-relay"))
81+
peers.append(node.add_outbound_p2p_connection(P2PStaller(stall_blocks), p2p_idx=id, connection_type="outbound-full-relay"))
8082
peers[-1].block_store = block_dict
8183
peers[-1].send_and_ping(headers_message)
8284

83-
# Need to wait until 1023 blocks are received - the magic total bytes number is a workaround in lack of an rpc
84-
# returning the number of downloaded (but not connected) blocks.
85-
bytes_recv = 172761 if not self.options.v2transport else 169692
86-
self.wait_until(lambda: self.total_bytes_recv_for_blocks() == bytes_recv)
85+
# Wait until all blocks are received (except for the stall blocks), so that no other blocks are in flight.
86+
self.wait_until(lambda: sum(len(peer['inflight']) for peer in node.getpeerinfo()) == len(stall_blocks))
8787

8888
self.all_sync_send_with_ping(peers)
8989
# If there was a peer marked for stalling, it would get disconnected
@@ -104,7 +104,7 @@ def run_test(self):
104104
node.setmocktime(self.mocktime)
105105
peers[0].wait_for_disconnect()
106106
assert_equal(node.num_test_p2p_connections(), NUM_PEERS - 1)
107-
self.wait_until(lambda: self.is_block_requested(peers, stall_block))
107+
self.wait_until(lambda: self.is_block_requested(peers, stall_blocks[0]))
108108
# Make sure that SendMessages() is invoked, which assigns the missing block
109109
# to another peer and starts the stalling logic for them
110110
self.all_sync_send_with_ping(peers)
@@ -119,7 +119,7 @@ def run_test(self):
119119
self.mocktime += 2
120120
node.setmocktime(self.mocktime)
121121
self.wait_until(lambda: sum(x.is_connected for x in node.p2ps) == NUM_PEERS - 2)
122-
self.wait_until(lambda: self.is_block_requested(peers, stall_block))
122+
self.wait_until(lambda: self.is_block_requested(peers, stall_blocks[0]))
123123
self.all_sync_send_with_ping(peers)
124124

125125
self.log.info("Check that the stalling timeout gets doubled to 8 seconds for the next staller")
@@ -132,24 +132,19 @@ def run_test(self):
132132
self.mocktime += 2
133133
node.setmocktime(self.mocktime)
134134
self.wait_until(lambda: sum(x.is_connected for x in node.p2ps) == NUM_PEERS - 3)
135-
self.wait_until(lambda: self.is_block_requested(peers, stall_block))
135+
self.wait_until(lambda: self.is_block_requested(peers, stall_blocks[0]))
136136
self.all_sync_send_with_ping(peers)
137137

138-
self.log.info("Provide the withheld block and check that stalling timeout gets reduced back to 2 seconds")
139-
with node.assert_debug_log(expected_msgs=['Decreased stalling timeout to 2 seconds']):
138+
self.log.info("Provide the first withheld block and check that stalling timeout gets reduced back to 2 seconds")
139+
with node.assert_debug_log(expected_msgs=['Decreased stalling timeout to 2 seconds'], unexpected_msgs=['Stall started']):
140140
for p in peers:
141-
if p.is_connected and (stall_block in p.getdata_requests):
142-
p.send_without_ping(msg_block(block_dict[stall_block]))
143-
144-
self.log.info("Check that all outstanding blocks get connected")
145-
self.wait_until(lambda: node.getblockcount() == NUM_BLOCKS)
146-
147-
def total_bytes_recv_for_blocks(self):
148-
total = 0
149-
for info in self.nodes[0].getpeerinfo():
150-
if ("block" in info["bytesrecv_per_msg"].keys()):
151-
total += info["bytesrecv_per_msg"]["block"]
152-
return total
141+
if p.is_connected and (stall_blocks[0] in p.getdata_requests):
142+
p.send_without_ping(msg_block(block_dict[stall_blocks[0]]))
143+
self.all_sync_send_with_ping(peers)
144+
145+
self.log.info("Check that all outstanding blocks up to the second stall block get connected")
146+
self.wait_until(lambda: node.getblockcount() == second_stall_index)
147+
153148

154149
def all_sync_send_with_ping(self, peers):
155150
for p in peers:

0 commit comments

Comments
 (0)