Skip to content

Commit 03d6d23

Browse files
committed
[tests] make pruning test faster
This commit makes the pruning.py much faster. Key insights to do this: - pruning.py doesn't care what kind of transactions make up the big blocks that are pruned in the test. Instead of making blocks with several large, expensive to construct and validate transactions, instead make the large blocks contain a single coinbase transaction with a huge OP_RETURN txout. - avoid stop-starting nodes where possible. This test could probably be made even faster by using the P2P interface for submitting blocks instead of the submitblock RPC.
1 parent 1c29ac4 commit 03d6d23

File tree

1 file changed

+60
-47
lines changed

1 file changed

+60
-47
lines changed

test/functional/feature_pruning.py

Lines changed: 60 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,13 @@
88
This test uses 4GB of disk space.
99
This test takes 30 mins or more (up to 2 hours)
1010
"""
11+
import os
1112

13+
from test_framework.blocktools import create_coinbase
14+
from test_framework.messages import CBlock, ToHex
15+
from test_framework.script import CScript, OP_RETURN, OP_NOP
1216
from test_framework.test_framework import BitcoinTestFramework
13-
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, mine_large_block, sync_blocks, wait_until
14-
15-
import os
17+
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, disconnect_nodes, sync_blocks, wait_until
1618

1719
MIN_BLOCKS_TO_KEEP = 288
1820

@@ -21,6 +23,47 @@
2123
# compatible with pruning based on key creation time.
2224
TIMESTAMP_WINDOW = 2 * 60 * 60
2325

26+
def mine_large_blocks(node, n):
27+
# Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN
28+
# followed by 950k of OP_NOP. This would be non-standard in a non-coinbase
29+
# transaction but is consensus valid.
30+
31+
# Get the block parameters for the first block
32+
big_script = CScript([OP_RETURN] + [OP_NOP] * 950000)
33+
best_block = node.getblock(node.getbestblockhash())
34+
height = int(best_block["height"]) + 1
35+
try:
36+
# Static variable ensures that time is monotonicly increasing and is therefore
37+
# different for each block created => blockhash is unique.
38+
mine_large_blocks.nTime = min(mine_large_blocks.nTime, int(best_block["time"])) + 1
39+
except AttributeError:
40+
mine_large_blocks.nTime = int(best_block["time"]) + 1
41+
previousblockhash = int(best_block["hash"], 16)
42+
43+
for _ in range(n):
44+
# Build the coinbase transaction (with large scriptPubKey)
45+
coinbase_tx = create_coinbase(height)
46+
coinbase_tx.vin[0].nSequence = 2 ** 32 - 1
47+
coinbase_tx.vout[0].scriptPubKey = big_script
48+
coinbase_tx.rehash()
49+
50+
# Build the block
51+
block = CBlock()
52+
block.nVersion = best_block["version"]
53+
block.hashPrevBlock = previousblockhash
54+
block.nTime = mine_large_blocks.nTime
55+
block.nBits = int('207fffff', 16)
56+
block.nNonce = 0
57+
block.vtx = [coinbase_tx]
58+
block.hashMerkleRoot = block.calc_merkle_root()
59+
block.solve()
60+
61+
# Submit to the node
62+
node.submitblock(ToHex(block))
63+
64+
previousblockhash = block.sha256
65+
height += 1
66+
mine_large_blocks.nTime += 1
2467

2568
def calc_usage(blockdir):
2669
return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.)
@@ -29,11 +72,10 @@ class PruneTest(BitcoinTestFramework):
2972
def set_test_params(self):
3073
self.setup_clean_chain = True
3174
self.num_nodes = 6
32-
self.rpc_timeout = 900
3375

3476
# Create nodes 0 and 1 to mine.
3577
# Create node 2 to test pruning.
36-
self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000"]
78+
self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5"]
3779
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
3880
# Create nodes 5 to test wallet in prune mode, but do not connect
3981
self.extra_args = [
@@ -73,8 +115,7 @@ def create_big_chain(self):
73115
self.nodes[0].generate(150)
74116

75117
# Then mine enough full blocks to create more than 550MiB of data
76-
for i in range(645):
77-
mine_large_block(self.nodes[0], self.utxo_cache_0)
118+
mine_large_blocks(self.nodes[0], 645)
78119

79120
sync_blocks(self.nodes[0:5])
80121

@@ -84,8 +125,7 @@ def test_height_min(self):
84125
self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir))
85126
self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
86127
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
87-
for i in range(25):
88-
mine_large_block(self.nodes[0], self.utxo_cache_0)
128+
mine_large_blocks(self.nodes[0], 25)
89129

90130
# Wait for blk00000.dat to be pruned
91131
wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30)
@@ -102,22 +142,13 @@ def create_chain_with_staleblocks(self):
102142
for j in range(12):
103143
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
104144
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
105-
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
106-
self.stop_node(0)
107-
self.start_node(0, extra_args=self.full_node_default_args)
145+
disconnect_nodes(self.nodes[0], 1)
146+
disconnect_nodes(self.nodes[0], 2)
108147
# Mine 24 blocks in node 1
109-
for i in range(24):
110-
if j == 0:
111-
mine_large_block(self.nodes[1], self.utxo_cache_1)
112-
else:
113-
# Add node1's wallet transactions back to the mempool, to
114-
# avoid the mined blocks from being too small.
115-
self.nodes[1].resendwallettransactions()
116-
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
148+
mine_large_blocks(self.nodes[1], 24)
117149

118150
# Reorg back with 25 block chain from node 0
119-
for i in range(25):
120-
mine_large_block(self.nodes[0], self.utxo_cache_0)
151+
mine_large_blocks(self.nodes[0], 25)
121152

122153
# Create connections in the order so both nodes can see the reorg at the same time
123154
connect_nodes(self.nodes[0], 1)
@@ -129,10 +160,6 @@ def create_chain_with_staleblocks(self):
129160
def reorg_test(self):
130161
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
131162
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
132-
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
133-
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
134-
self.stop_node(1)
135-
self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxweight=20000", "-checkblocks=5"])
136163

137164
height = self.nodes[1].getblockcount()
138165
self.log.info("Current block height: %d" % height)
@@ -153,9 +180,9 @@ def reorg_test(self):
153180
assert self.nodes[1].getblockcount() == self.forkheight - 1
154181
self.log.info("New best height: %d" % self.nodes[1].getblockcount())
155182

156-
# Reboot node1 to clear those giant tx's from mempool
157-
self.stop_node(1)
158-
self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxweight=20000", "-checkblocks=5"])
183+
# Disconnect node1 and generate the new chain
184+
disconnect_nodes(self.nodes[0], 1)
185+
disconnect_nodes(self.nodes[1], 2)
159186

160187
self.log.info("Generating new longer chain of 300 more blocks")
161188
self.nodes[1].generate(300)
@@ -167,17 +194,10 @@ def reorg_test(self):
167194

168195
self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount())
169196
self.log.info("Usage possibly still high because of stale blocks in block files: %d" % calc_usage(self.prunedir))
170-
self.log.info("Mine 220 more large blocks so we have requisite history")
171197

172-
# Get node0's wallet transactions back in its mempool, to avoid the
173-
# mined blocks from being too small.
174-
self.nodes[0].resendwallettransactions()
198+
self.log.info("Mine 220 more large blocks so we have requisite history")
175199

176-
for i in range(22):
177-
# This can be slow, so do this in multiple RPC calls to avoid
178-
# RPC timeouts.
179-
self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects
180-
sync_blocks(self.nodes[0:3], timeout=300)
200+
mine_large_blocks(self.nodes[0], 220)
181201

182202
usage = calc_usage(self.prunedir)
183203
self.log.info("Usage should be below target: %d" % usage)
@@ -331,16 +351,9 @@ def wallet_test(self):
331351
self.log.info("Success")
332352

333353
def run_test(self):
334-
self.log.info("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
335-
self.log.info("Mining a big blockchain of 995 blocks")
336-
337-
# Determine default relay fee
338-
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
339-
340-
# Cache for utxos, as the listunspent may take a long time later in the test
341-
self.utxo_cache_0 = []
342-
self.utxo_cache_1 = []
354+
self.log.info("Warning! This test requires 4GB of disk space")
343355

356+
self.log.info("Mining a big blockchain of 995 blocks")
344357
self.create_big_chain()
345358
# Chain diagram key:
346359
# * blocks on main chain

0 commit comments

Comments
 (0)