|
9 | 9 | The assumeutxo value generated and used here is committed to in
|
10 | 10 | `CRegTestParams::m_assumeutxo_data` in `src/kernel/chainparams.cpp`.
|
11 | 11 | """
|
| 12 | +import time |
12 | 13 | from shutil import rmtree
|
13 | 14 |
|
14 | 15 | from dataclasses import dataclass
|
15 | 16 | from test_framework.blocktools import (
|
16 | 17 | create_block,
|
17 | 18 | create_coinbase
|
18 | 19 | )
|
19 |
| -from test_framework.messages import tx_from_hex |
| 20 | +from test_framework.messages import ( |
| 21 | + CBlockHeader, |
| 22 | + from_hex, |
| 23 | + msg_headers, |
| 24 | + tx_from_hex |
| 25 | +) |
| 26 | +from test_framework.p2p import ( |
| 27 | + P2PInterface, |
| 28 | +) |
20 | 29 | from test_framework.test_framework import BitcoinTestFramework
|
21 | 30 | from test_framework.util import (
|
22 | 31 | assert_approx,
|
23 | 32 | assert_equal,
|
24 | 33 | assert_raises_rpc_error,
|
25 | 34 | sha256sum_file,
|
| 35 | + try_rpc, |
26 | 36 | )
|
27 | 37 | from test_framework.wallet import (
|
28 | 38 | getnewdestination,
|
@@ -248,6 +258,69 @@ def test_snapshot_not_on_most_work_chain(self, dump_output_path):
|
248 | 258 | node1.submitheader(main_block1)
|
249 | 259 | node1.submitheader(main_block2)
|
250 | 260 |
|
| 261 | + def test_sync_from_assumeutxo_node(self, snapshot): |
| 262 | + """ |
| 263 | + This test verifies that: |
| 264 | + 1. An IBD node can sync headers from an AssumeUTXO node at any time. |
| 265 | + 2. IBD nodes do not request historical blocks from AssumeUTXO nodes while they are syncing the background-chain. |
| 266 | + 3. The assumeUTXO node dynamically adjusts the network services it offers according to its state. |
| 267 | + 4. IBD nodes can fully sync from AssumeUTXO nodes after they finish the background-chain sync. |
| 268 | + """ |
| 269 | + self.log.info("Testing IBD-sync from assumeUTXO node") |
| 270 | + # Node2 starts clean and loads the snapshot. |
| 271 | + # Node3 starts clean and seeks to sync-up from snapshot_node. |
| 272 | + miner = self.nodes[0] |
| 273 | + snapshot_node = self.nodes[2] |
| 274 | + ibd_node = self.nodes[3] |
| 275 | + |
| 276 | + # Start test fresh by cleaning up node directories |
| 277 | + for node in (snapshot_node, ibd_node): |
| 278 | + self.stop_node(node.index) |
| 279 | + rmtree(node.chain_path) |
| 280 | + self.start_node(node.index, extra_args=self.extra_args[node.index]) |
| 281 | + |
| 282 | + # Sync-up headers chain on snapshot_node to load snapshot |
| 283 | + headers_provider_conn = snapshot_node.add_p2p_connection(P2PInterface()) |
| 284 | + headers_provider_conn.wait_for_getheaders() |
| 285 | + msg = msg_headers() |
| 286 | + for block_num in range(1, miner.getblockcount()+1): |
| 287 | + msg.headers.append(from_hex(CBlockHeader(), miner.getblockheader(miner.getblockhash(block_num), verbose=False))) |
| 288 | + headers_provider_conn.send_message(msg) |
| 289 | + |
| 290 | + # Ensure headers arrived |
| 291 | + default_value = {'status': ''} # No status |
| 292 | + headers_tip_hash = miner.getbestblockhash() |
| 293 | + self.wait_until(lambda: next(filter(lambda x: x['hash'] == headers_tip_hash, snapshot_node.getchaintips()), default_value)['status'] == "headers-only") |
| 294 | + snapshot_node.disconnect_p2ps() |
| 295 | + |
| 296 | + # Load snapshot |
| 297 | + snapshot_node.loadtxoutset(snapshot['path']) |
| 298 | + |
| 299 | + # Connect nodes and verify the ibd_node can sync-up the headers-chain from the snapshot_node |
| 300 | + self.connect_nodes(ibd_node.index, snapshot_node.index) |
| 301 | + snapshot_block_hash = snapshot['base_hash'] |
| 302 | + self.wait_until(lambda: next(filter(lambda x: x['hash'] == snapshot_block_hash, ibd_node.getchaintips()), default_value)['status'] == "headers-only") |
| 303 | + |
| 304 | + # Once the headers-chain is synced, the ibd_node must avoid requesting historical blocks from the snapshot_node. |
| 305 | + # If it does request such blocks, the snapshot_node will ignore requests it cannot fulfill, causing the ibd_node |
| 306 | + # to stall. This stall could last for up to 10 min, ultimately resulting in an abrupt disconnection due to the |
| 307 | + # ibd_node's perceived unresponsiveness. |
| 308 | + time.sleep(3) # Sleep here because we can't detect when a node avoids requesting blocks from other peer. |
| 309 | + assert_equal(len(ibd_node.getpeerinfo()[0]['inflight']), 0) |
| 310 | + |
| 311 | + # Now disconnect nodes and finish background chain sync |
| 312 | + self.disconnect_nodes(ibd_node.index, snapshot_node.index) |
| 313 | + self.connect_nodes(snapshot_node.index, miner.index) |
| 314 | + self.sync_blocks(nodes=(miner, snapshot_node)) |
| 315 | + # Check the base snapshot block was stored and ensure node signals full-node service support |
| 316 | + self.wait_until(lambda: not try_rpc(-1, "Block not found", snapshot_node.getblock, snapshot_block_hash)) |
| 317 | + assert 'NETWORK' in snapshot_node.getnetworkinfo()['localservicesnames'] |
| 318 | + |
| 319 | + # Now the snapshot_node is sync, verify the ibd_node can sync from it |
| 320 | + self.connect_nodes(snapshot_node.index, ibd_node.index) |
| 321 | + assert 'NETWORK' in ibd_node.getpeerinfo()[0]['servicesnames'] |
| 322 | + self.sync_blocks(nodes=(ibd_node, snapshot_node)) |
| 323 | + |
251 | 324 | def assert_only_network_limited_service(self, node):
|
252 | 325 | node_services = node.getnetworkinfo()['localservicesnames']
|
253 | 326 | assert 'NETWORK' not in node_services
|
@@ -661,6 +734,9 @@ def check_tx_counts(final: bool) -> None:
|
661 | 734 |
|
662 | 735 | self.test_snapshot_in_a_divergent_chain(dump_output['path'])
|
663 | 736 |
|
| 737 | + # The following test cleans node2 and node3 chain directories. |
| 738 | + self.test_sync_from_assumeutxo_node(snapshot=dump_output) |
| 739 | + |
664 | 740 | @dataclass
|
665 | 741 | class Block:
|
666 | 742 | hash: str
|
|
0 commit comments