Skip to content

Commit dc5c2e4

Browse files
author
MarcoFalke
committed
Merge #15686: [tests] make pruning test faster
03d6d23 [tests] make pruning test faster (John Newbery) 1c29ac4 [tests] style fixes in feature_pruning.py (John Newbery) Pull request description: This commit makes the pruning.py much faster. Key insights to do this: - pruning.py doesn't care what kind of transactions make up the big blocks that are pruned in the test. Instead of making blocks with several large, expensive to construct and validate transactions, instead make the large blocks contain a single coinbase transaction with a huge OP_RETURN txout. - avoid stop-starting nodes where possible. ACKs for commit 03d6d2: MarcoFalke: utACK 03d6d23 Tree-SHA512: 511642ce0fa294319dce3486fe06d75970d8ab66deda7f692be081d3056b4ce5b4cf91a7b5762eefbba224ba6c848750016454ff1e5d564acc507b1c41213628
2 parents 904129b + 03d6d23 commit dc5c2e4

File tree

1 file changed

+93
-93
lines changed

1 file changed

+93
-93
lines changed

test/functional/feature_pruning.py

Lines changed: 93 additions & 93 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,13 @@
88
This test uses 4GB of disk space.
99
This test takes 30 mins or more (up to 2 hours)
1010
"""
11+
import os
1112

13+
from test_framework.blocktools import create_coinbase
14+
from test_framework.messages import CBlock, ToHex
15+
from test_framework.script import CScript, OP_RETURN, OP_NOP
1216
from test_framework.test_framework import BitcoinTestFramework
13-
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, mine_large_block, sync_blocks, wait_until
14-
15-
import os
17+
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, disconnect_nodes, sync_blocks, wait_until
1618

1719
MIN_BLOCKS_TO_KEEP = 288
1820

@@ -21,19 +23,59 @@
2123
# compatible with pruning based on key creation time.
2224
TIMESTAMP_WINDOW = 2 * 60 * 60
2325

26+
def mine_large_blocks(node, n):
27+
# Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN
28+
# followed by 950k of OP_NOP. This would be non-standard in a non-coinbase
29+
# transaction but is consensus valid.
30+
31+
# Get the block parameters for the first block
32+
big_script = CScript([OP_RETURN] + [OP_NOP] * 950000)
33+
best_block = node.getblock(node.getbestblockhash())
34+
height = int(best_block["height"]) + 1
35+
try:
36+
# Static variable ensures that time is monotonicly increasing and is therefore
37+
# different for each block created => blockhash is unique.
38+
mine_large_blocks.nTime = min(mine_large_blocks.nTime, int(best_block["time"])) + 1
39+
except AttributeError:
40+
mine_large_blocks.nTime = int(best_block["time"]) + 1
41+
previousblockhash = int(best_block["hash"], 16)
42+
43+
for _ in range(n):
44+
# Build the coinbase transaction (with large scriptPubKey)
45+
coinbase_tx = create_coinbase(height)
46+
coinbase_tx.vin[0].nSequence = 2 ** 32 - 1
47+
coinbase_tx.vout[0].scriptPubKey = big_script
48+
coinbase_tx.rehash()
49+
50+
# Build the block
51+
block = CBlock()
52+
block.nVersion = best_block["version"]
53+
block.hashPrevBlock = previousblockhash
54+
block.nTime = mine_large_blocks.nTime
55+
block.nBits = int('207fffff', 16)
56+
block.nNonce = 0
57+
block.vtx = [coinbase_tx]
58+
block.hashMerkleRoot = block.calc_merkle_root()
59+
block.solve()
60+
61+
# Submit to the node
62+
node.submitblock(ToHex(block))
63+
64+
previousblockhash = block.sha256
65+
height += 1
66+
mine_large_blocks.nTime += 1
2467

2568
def calc_usage(blockdir):
26-
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.)
69+
return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.)
2770

2871
class PruneTest(BitcoinTestFramework):
2972
def set_test_params(self):
3073
self.setup_clean_chain = True
3174
self.num_nodes = 6
32-
self.rpc_timeout = 900
3375

3476
# Create nodes 0 and 1 to mine.
3577
# Create node 2 to test pruning.
36-
self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000"]
78+
self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5"]
3779
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
3880
# Create nodes 5 to test wallet in prune mode, but do not connect
3981
self.extra_args = [
@@ -55,7 +97,7 @@ def setup_network(self):
5597

5698
connect_nodes(self.nodes[0], 1)
5799
connect_nodes(self.nodes[1], 2)
58-
connect_nodes(self.nodes[2], 0)
100+
connect_nodes(self.nodes[0], 2)
59101
connect_nodes(self.nodes[0], 3)
60102
connect_nodes(self.nodes[0], 4)
61103
sync_blocks(self.nodes[0:5])
@@ -71,30 +113,27 @@ def create_big_chain(self):
71113
self.nodes[1].generate(200)
72114
sync_blocks(self.nodes[0:2])
73115
self.nodes[0].generate(150)
116+
74117
# Then mine enough full blocks to create more than 550MiB of data
75-
for i in range(645):
76-
mine_large_block(self.nodes[0], self.utxo_cache_0)
118+
mine_large_blocks(self.nodes[0], 645)
77119

78120
sync_blocks(self.nodes[0:5])
79121

80122
def test_height_min(self):
81-
if not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")):
82-
raise AssertionError("blk00000.dat is missing, pruning too early")
123+
assert os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early"
83124
self.log.info("Success")
84125
self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir))
85126
self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
86127
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
87-
for i in range(25):
88-
mine_large_block(self.nodes[0], self.utxo_cache_0)
128+
mine_large_blocks(self.nodes[0], 25)
89129

90130
# Wait for blk00000.dat to be pruned
91131
wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30)
92132

93133
self.log.info("Success")
94134
usage = calc_usage(self.prunedir)
95135
self.log.info("Usage should be below target: %d" % usage)
96-
if (usage > 550):
97-
raise AssertionError("Pruning target not being met")
136+
assert_greater_than(550, usage)
98137

99138
def create_chain_with_staleblocks(self):
100139
# Create stale blocks in manageable sized chunks
@@ -103,90 +142,66 @@ def create_chain_with_staleblocks(self):
103142
for j in range(12):
104143
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
105144
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
106-
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
107-
self.stop_node(0)
108-
self.start_node(0, extra_args=self.full_node_default_args)
145+
disconnect_nodes(self.nodes[0], 1)
146+
disconnect_nodes(self.nodes[0], 2)
109147
# Mine 24 blocks in node 1
110-
for i in range(24):
111-
if j == 0:
112-
mine_large_block(self.nodes[1], self.utxo_cache_1)
113-
else:
114-
# Add node1's wallet transactions back to the mempool, to
115-
# avoid the mined blocks from being too small.
116-
self.nodes[1].resendwallettransactions()
117-
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
148+
mine_large_blocks(self.nodes[1], 24)
118149

119150
# Reorg back with 25 block chain from node 0
120-
for i in range(25):
121-
mine_large_block(self.nodes[0], self.utxo_cache_0)
151+
mine_large_blocks(self.nodes[0], 25)
122152

123153
# Create connections in the order so both nodes can see the reorg at the same time
124-
connect_nodes(self.nodes[1], 0)
125-
connect_nodes(self.nodes[2], 0)
154+
connect_nodes(self.nodes[0], 1)
155+
connect_nodes(self.nodes[0], 2)
126156
sync_blocks(self.nodes[0:3])
127157

128158
self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir))
129159

130160
def reorg_test(self):
131161
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
132162
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
133-
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
134-
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
135-
self.stop_node(1)
136-
self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxweight=20000", "-checkblocks=5"])
137163

138164
height = self.nodes[1].getblockcount()
139165
self.log.info("Current block height: %d" % height)
140166

141-
invalidheight = height-287
142-
badhash = self.nodes[1].getblockhash(invalidheight)
143-
self.log.info("Invalidating block %s at height %d" % (badhash,invalidheight))
144-
self.nodes[1].invalidateblock(badhash)
167+
self.forkheight = height - 287
168+
self.forkhash = self.nodes[1].getblockhash(self.forkheight)
169+
self.log.info("Invalidating block %s at height %d" % (self.forkhash, self.forkheight))
170+
self.nodes[1].invalidateblock(self.forkhash)
145171

146172
# We've now switched to our previously mined-24 block fork on node 1, but that's not what we want
147173
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
148-
mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
149-
curhash = self.nodes[1].getblockhash(invalidheight - 1)
174+
mainchainhash = self.nodes[0].getblockhash(self.forkheight - 1)
175+
curhash = self.nodes[1].getblockhash(self.forkheight - 1)
150176
while curhash != mainchainhash:
151177
self.nodes[1].invalidateblock(curhash)
152-
curhash = self.nodes[1].getblockhash(invalidheight - 1)
178+
curhash = self.nodes[1].getblockhash(self.forkheight - 1)
153179

154-
assert self.nodes[1].getblockcount() == invalidheight - 1
180+
assert self.nodes[1].getblockcount() == self.forkheight - 1
155181
self.log.info("New best height: %d" % self.nodes[1].getblockcount())
156182

157-
# Reboot node1 to clear those giant tx's from mempool
158-
self.stop_node(1)
159-
self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxweight=20000", "-checkblocks=5"])
183+
# Disconnect node1 and generate the new chain
184+
disconnect_nodes(self.nodes[0], 1)
185+
disconnect_nodes(self.nodes[1], 2)
160186

161187
self.log.info("Generating new longer chain of 300 more blocks")
162188
self.nodes[1].generate(300)
163189

164190
self.log.info("Reconnect nodes")
165191
connect_nodes(self.nodes[0], 1)
166-
connect_nodes(self.nodes[2], 1)
192+
connect_nodes(self.nodes[1], 2)
167193
sync_blocks(self.nodes[0:3], timeout=120)
168194

169195
self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount())
170-
self.log.info("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage(self.prunedir))
171-
172-
self.log.info("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
196+
self.log.info("Usage possibly still high because of stale blocks in block files: %d" % calc_usage(self.prunedir))
173197

174-
# Get node0's wallet transactions back in its mempool, to avoid the
175-
# mined blocks from being too small.
176-
self.nodes[0].resendwallettransactions()
198+
self.log.info("Mine 220 more large blocks so we have requisite history")
177199

178-
for i in range(22):
179-
# This can be slow, so do this in multiple RPC calls to avoid
180-
# RPC timeouts.
181-
self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects
182-
sync_blocks(self.nodes[0:3], timeout=300)
200+
mine_large_blocks(self.nodes[0], 220)
183201

184202
usage = calc_usage(self.prunedir)
185203
self.log.info("Usage should be below target: %d" % usage)
186-
if (usage > 550):
187-
raise AssertionError("Pruning target not being met")
188-
189-
return invalidheight,badhash
204+
assert_greater_than(550, usage)
190205

191206
def reorg_back(self):
192207
# Verify that a block on the old main chain fork has been pruned away
@@ -219,17 +234,17 @@ def reorg_back(self):
219234
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
220235
self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine)
221236
self.nodes[0].invalidateblock(curchainhash)
222-
assert self.nodes[0].getblockcount() == self.mainchainheight
223-
assert self.nodes[0].getbestblockhash() == self.mainchainhash2
237+
assert_equal(self.nodes[0].getblockcount(), self.mainchainheight)
238+
assert_equal(self.nodes[0].getbestblockhash(), self.mainchainhash2)
224239
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
225240
goalbestheight = first_reorg_height + 1
226241

227242
self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
228243
# Wait for Node 2 to reorg to proper height
229244
wait_until(lambda: self.nodes[2].getblockcount() >= goalbestheight, timeout=900)
230-
assert self.nodes[2].getbestblockhash() == goalbesthash
245+
assert_equal(self.nodes[2].getbestblockhash(), goalbesthash)
231246
# Verify we can now have the data for a block previously pruned
232-
assert self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight
247+
assert_equal(self.nodes[2].getblock(self.forkhash)["height"], self.forkheight)
233248

234249
def manual_test(self, node_number, use_timestamp):
235250
# at this point, node has 995 blocks and has not yet run in prune mode
@@ -287,38 +302,30 @@ def has_block(index):
287302

288303
# height=100 too low to prune first block file so this is a no-op
289304
prune(100)
290-
if not has_block(0):
291-
raise AssertionError("blk00000.dat is missing when should still be there")
305+
assert has_block(0), "blk00000.dat is missing when should still be there"
292306

293307
# Does nothing
294308
node.pruneblockchain(height(0))
295-
if not has_block(0):
296-
raise AssertionError("blk00000.dat is missing when should still be there")
309+
assert has_block(0), "blk00000.dat is missing when should still be there"
297310

298311
# height=500 should prune first file
299312
prune(500)
300-
if has_block(0):
301-
raise AssertionError("blk00000.dat is still there, should be pruned by now")
302-
if not has_block(1):
303-
raise AssertionError("blk00001.dat is missing when should still be there")
313+
assert not has_block(0), "blk00000.dat is still there, should be pruned by now"
314+
assert has_block(1), "blk00001.dat is missing when should still be there"
304315

305316
# height=650 should prune second file
306317
prune(650)
307-
if has_block(1):
308-
raise AssertionError("blk00001.dat is still there, should be pruned by now")
318+
assert not has_block(1), "blk00001.dat is still there, should be pruned by now"
309319

310320
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
311321
prune(1000, 1001 - MIN_BLOCKS_TO_KEEP)
312-
if not has_block(2):
313-
raise AssertionError("blk00002.dat is still there, should be pruned by now")
322+
assert has_block(2), "blk00002.dat is still there, should be pruned by now"
314323

315324
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
316325
node.generate(288)
317326
prune(1000)
318-
if has_block(2):
319-
raise AssertionError("blk00002.dat is still there, should be pruned by now")
320-
if has_block(3):
321-
raise AssertionError("blk00003.dat is still there, should be pruned by now")
327+
assert not has_block(2), "blk00002.dat is still there, should be pruned by now"
328+
assert not has_block(3), "blk00003.dat is still there, should be pruned by now"
322329

323330
# stop node, start back up with auto-prune at 550 MiB, make sure still runs
324331
self.stop_node(node_number)
@@ -339,21 +346,14 @@ def wallet_test(self):
339346
connect_nodes(self.nodes[0], 5)
340347
nds = [self.nodes[0], self.nodes[5]]
341348
sync_blocks(nds, wait=5, timeout=300)
342-
self.stop_node(5) #stop and start to trigger rescan
349+
self.stop_node(5) # stop and start to trigger rescan
343350
self.start_node(5, extra_args=["-prune=550"])
344351
self.log.info("Success")
345352

346353
def run_test(self):
347-
self.log.info("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
348-
self.log.info("Mining a big blockchain of 995 blocks")
349-
350-
# Determine default relay fee
351-
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
352-
353-
# Cache for utxos, as the listunspent may take a long time later in the test
354-
self.utxo_cache_0 = []
355-
self.utxo_cache_1 = []
354+
self.log.info("Warning! This test requires 4GB of disk space")
356355

356+
self.log.info("Mining a big blockchain of 995 blocks")
357357
self.create_big_chain()
358358
# Chain diagram key:
359359
# * blocks on main chain
@@ -394,11 +394,11 @@ def run_test(self):
394394
# +...+(1044) &.. $...$(1319)
395395

396396
# Save some current chain state for later use
397-
self.mainchainheight = self.nodes[2].getblockcount() #1320
397+
self.mainchainheight = self.nodes[2].getblockcount() # 1320
398398
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
399399

400400
self.log.info("Check that we can survive a 288 block reorg still")
401-
(self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
401+
self.reorg_test() # (1033, )
402402
# Now create a 288 block reorg by mining a longer chain on N1
403403
# First disconnect N1
404404
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain

0 commit comments

Comments
 (0)