8
8
This test uses 4GB of disk space.
9
9
This test takes 30 mins or more (up to 2 hours)
10
10
"""
11
+ import os
11
12
13
+ from test_framework .blocktools import create_coinbase
14
+ from test_framework .messages import CBlock , ToHex
15
+ from test_framework .script import CScript , OP_RETURN , OP_NOP
12
16
from test_framework .test_framework import BitcoinTestFramework
13
- from test_framework .util import assert_equal , assert_greater_than , assert_raises_rpc_error , connect_nodes , mine_large_block , sync_blocks , wait_until
14
-
15
- import os
17
+ from test_framework .util import assert_equal , assert_greater_than , assert_raises_rpc_error , connect_nodes , disconnect_nodes , sync_blocks , wait_until
16
18
17
19
MIN_BLOCKS_TO_KEEP = 288
18
20
21
23
# compatible with pruning based on key creation time.
22
24
TIMESTAMP_WINDOW = 2 * 60 * 60
23
25
26
+ def mine_large_blocks (node , n ):
27
+ # Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN
28
+ # followed by 950k of OP_NOP. This would be non-standard in a non-coinbase
29
+ # transaction but is consensus valid.
30
+
31
+ # Get the block parameters for the first block
32
+ big_script = CScript ([OP_RETURN ] + [OP_NOP ] * 950000 )
33
+ best_block = node .getblock (node .getbestblockhash ())
34
+ height = int (best_block ["height" ]) + 1
35
+ try :
36
+ # Static variable ensures that time is monotonicly increasing and is therefore
37
+ # different for each block created => blockhash is unique.
38
+ mine_large_blocks .nTime = min (mine_large_blocks .nTime , int (best_block ["time" ])) + 1
39
+ except AttributeError :
40
+ mine_large_blocks .nTime = int (best_block ["time" ]) + 1
41
+ previousblockhash = int (best_block ["hash" ], 16 )
42
+
43
+ for _ in range (n ):
44
+ # Build the coinbase transaction (with large scriptPubKey)
45
+ coinbase_tx = create_coinbase (height )
46
+ coinbase_tx .vin [0 ].nSequence = 2 ** 32 - 1
47
+ coinbase_tx .vout [0 ].scriptPubKey = big_script
48
+ coinbase_tx .rehash ()
49
+
50
+ # Build the block
51
+ block = CBlock ()
52
+ block .nVersion = best_block ["version" ]
53
+ block .hashPrevBlock = previousblockhash
54
+ block .nTime = mine_large_blocks .nTime
55
+ block .nBits = int ('207fffff' , 16 )
56
+ block .nNonce = 0
57
+ block .vtx = [coinbase_tx ]
58
+ block .hashMerkleRoot = block .calc_merkle_root ()
59
+ block .solve ()
60
+
61
+ # Submit to the node
62
+ node .submitblock (ToHex (block ))
63
+
64
+ previousblockhash = block .sha256
65
+ height += 1
66
+ mine_large_blocks .nTime += 1
24
67
25
68
def calc_usage (blockdir ):
26
- return sum (os .path .getsize (blockdir + f ) for f in os .listdir (blockdir ) if os .path .isfile (os .path .join (blockdir , f ))) / (1024. * 1024. )
69
+ return sum (os .path .getsize (blockdir + f ) for f in os .listdir (blockdir ) if os .path .isfile (os .path .join (blockdir , f ))) / (1024. * 1024. )
27
70
28
71
class PruneTest (BitcoinTestFramework ):
29
72
def set_test_params (self ):
30
73
self .setup_clean_chain = True
31
74
self .num_nodes = 6
32
- self .rpc_timeout = 900
33
75
34
76
# Create nodes 0 and 1 to mine.
35
77
# Create node 2 to test pruning.
36
- self .full_node_default_args = ["-maxreceivebuffer=20000" , "-checkblocks=5" , "-limitdescendantcount=100" , "-limitdescendantsize=5000" , "-limitancestorcount=100" , "-limitancestorsize=5000" ]
78
+ self .full_node_default_args = ["-maxreceivebuffer=20000" , "-checkblocks=5" ]
37
79
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
38
80
# Create nodes 5 to test wallet in prune mode, but do not connect
39
81
self .extra_args = [
@@ -55,7 +97,7 @@ def setup_network(self):
55
97
56
98
connect_nodes (self .nodes [0 ], 1 )
57
99
connect_nodes (self .nodes [1 ], 2 )
58
- connect_nodes (self .nodes [2 ], 0 )
100
+ connect_nodes (self .nodes [0 ], 2 )
59
101
connect_nodes (self .nodes [0 ], 3 )
60
102
connect_nodes (self .nodes [0 ], 4 )
61
103
sync_blocks (self .nodes [0 :5 ])
@@ -71,30 +113,27 @@ def create_big_chain(self):
71
113
self .nodes [1 ].generate (200 )
72
114
sync_blocks (self .nodes [0 :2 ])
73
115
self .nodes [0 ].generate (150 )
116
+
74
117
# Then mine enough full blocks to create more than 550MiB of data
75
- for i in range (645 ):
76
- mine_large_block (self .nodes [0 ], self .utxo_cache_0 )
118
+ mine_large_blocks (self .nodes [0 ], 645 )
77
119
78
120
sync_blocks (self .nodes [0 :5 ])
79
121
80
122
def test_height_min (self ):
81
- if not os .path .isfile (os .path .join (self .prunedir , "blk00000.dat" )):
82
- raise AssertionError ("blk00000.dat is missing, pruning too early" )
123
+ assert os .path .isfile (os .path .join (self .prunedir , "blk00000.dat" )), "blk00000.dat is missing, pruning too early"
83
124
self .log .info ("Success" )
84
125
self .log .info ("Though we're already using more than 550MiB, current usage: %d" % calc_usage (self .prunedir ))
85
126
self .log .info ("Mining 25 more blocks should cause the first block file to be pruned" )
86
127
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
87
- for i in range (25 ):
88
- mine_large_block (self .nodes [0 ], self .utxo_cache_0 )
128
+ mine_large_blocks (self .nodes [0 ], 25 )
89
129
90
130
# Wait for blk00000.dat to be pruned
91
131
wait_until (lambda : not os .path .isfile (os .path .join (self .prunedir , "blk00000.dat" )), timeout = 30 )
92
132
93
133
self .log .info ("Success" )
94
134
usage = calc_usage (self .prunedir )
95
135
self .log .info ("Usage should be below target: %d" % usage )
96
- if (usage > 550 ):
97
- raise AssertionError ("Pruning target not being met" )
136
+ assert_greater_than (550 , usage )
98
137
99
138
def create_chain_with_staleblocks (self ):
100
139
# Create stale blocks in manageable sized chunks
@@ -103,90 +142,66 @@ def create_chain_with_staleblocks(self):
103
142
for j in range (12 ):
104
143
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
105
144
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
106
- # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
107
- self .stop_node (0 )
108
- self .start_node (0 , extra_args = self .full_node_default_args )
145
+ disconnect_nodes (self .nodes [0 ], 1 )
146
+ disconnect_nodes (self .nodes [0 ], 2 )
109
147
# Mine 24 blocks in node 1
110
- for i in range (24 ):
111
- if j == 0 :
112
- mine_large_block (self .nodes [1 ], self .utxo_cache_1 )
113
- else :
114
- # Add node1's wallet transactions back to the mempool, to
115
- # avoid the mined blocks from being too small.
116
- self .nodes [1 ].resendwallettransactions ()
117
- self .nodes [1 ].generate (1 ) #tx's already in mempool from previous disconnects
148
+ mine_large_blocks (self .nodes [1 ], 24 )
118
149
119
150
# Reorg back with 25 block chain from node 0
120
- for i in range (25 ):
121
- mine_large_block (self .nodes [0 ], self .utxo_cache_0 )
151
+ mine_large_blocks (self .nodes [0 ], 25 )
122
152
123
153
# Create connections in the order so both nodes can see the reorg at the same time
124
- connect_nodes (self .nodes [1 ], 0 )
125
- connect_nodes (self .nodes [2 ], 0 )
154
+ connect_nodes (self .nodes [0 ], 1 )
155
+ connect_nodes (self .nodes [0 ], 2 )
126
156
sync_blocks (self .nodes [0 :3 ])
127
157
128
158
self .log .info ("Usage can be over target because of high stale rate: %d" % calc_usage (self .prunedir ))
129
159
130
160
def reorg_test (self ):
131
161
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
132
162
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
133
- # Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
134
- # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
135
- self .stop_node (1 )
136
- self .start_node (1 , extra_args = ["-maxreceivebuffer=20000" ,"-blockmaxweight=20000" , "-checkblocks=5" ])
137
163
138
164
height = self .nodes [1 ].getblockcount ()
139
165
self .log .info ("Current block height: %d" % height )
140
166
141
- invalidheight = height - 287
142
- badhash = self .nodes [1 ].getblockhash (invalidheight )
143
- self .log .info ("Invalidating block %s at height %d" % (badhash , invalidheight ))
144
- self .nodes [1 ].invalidateblock (badhash )
167
+ self . forkheight = height - 287
168
+ self . forkhash = self .nodes [1 ].getblockhash (self . forkheight )
169
+ self .log .info ("Invalidating block %s at height %d" % (self . forkhash , self . forkheight ))
170
+ self .nodes [1 ].invalidateblock (self . forkhash )
145
171
146
172
# We've now switched to our previously mined-24 block fork on node 1, but that's not what we want
147
173
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
148
- mainchainhash = self .nodes [0 ].getblockhash (invalidheight - 1 )
149
- curhash = self .nodes [1 ].getblockhash (invalidheight - 1 )
174
+ mainchainhash = self .nodes [0 ].getblockhash (self . forkheight - 1 )
175
+ curhash = self .nodes [1 ].getblockhash (self . forkheight - 1 )
150
176
while curhash != mainchainhash :
151
177
self .nodes [1 ].invalidateblock (curhash )
152
- curhash = self .nodes [1 ].getblockhash (invalidheight - 1 )
178
+ curhash = self .nodes [1 ].getblockhash (self . forkheight - 1 )
153
179
154
- assert self .nodes [1 ].getblockcount () == invalidheight - 1
180
+ assert self .nodes [1 ].getblockcount () == self . forkheight - 1
155
181
self .log .info ("New best height: %d" % self .nodes [1 ].getblockcount ())
156
182
157
- # Reboot node1 to clear those giant tx's from mempool
158
- self .stop_node ( 1 )
159
- self .start_node ( 1 , extra_args = [ "-maxreceivebuffer=20000" , "-blockmaxweight=20000" , "-checkblocks=5" ] )
183
+ # Disconnect node1 and generate the new chain
184
+ disconnect_nodes ( self .nodes [ 0 ], 1 )
185
+ disconnect_nodes ( self .nodes [ 1 ], 2 )
160
186
161
187
self .log .info ("Generating new longer chain of 300 more blocks" )
162
188
self .nodes [1 ].generate (300 )
163
189
164
190
self .log .info ("Reconnect nodes" )
165
191
connect_nodes (self .nodes [0 ], 1 )
166
- connect_nodes (self .nodes [2 ], 1 )
192
+ connect_nodes (self .nodes [1 ], 2 )
167
193
sync_blocks (self .nodes [0 :3 ], timeout = 120 )
168
194
169
195
self .log .info ("Verify height on node 2: %d" % self .nodes [2 ].getblockcount ())
170
- self .log .info ("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage (self .prunedir ))
171
-
172
- self .log .info ("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)" )
196
+ self .log .info ("Usage possibly still high because of stale blocks in block files: %d" % calc_usage (self .prunedir ))
173
197
174
- # Get node0's wallet transactions back in its mempool, to avoid the
175
- # mined blocks from being too small.
176
- self .nodes [0 ].resendwallettransactions ()
198
+ self .log .info ("Mine 220 more large blocks so we have requisite history" )
177
199
178
- for i in range (22 ):
179
- # This can be slow, so do this in multiple RPC calls to avoid
180
- # RPC timeouts.
181
- self .nodes [0 ].generate (10 ) #node 0 has many large tx's in its mempool from the disconnects
182
- sync_blocks (self .nodes [0 :3 ], timeout = 300 )
200
+ mine_large_blocks (self .nodes [0 ], 220 )
183
201
184
202
usage = calc_usage (self .prunedir )
185
203
self .log .info ("Usage should be below target: %d" % usage )
186
- if (usage > 550 ):
187
- raise AssertionError ("Pruning target not being met" )
188
-
189
- return invalidheight ,badhash
204
+ assert_greater_than (550 , usage )
190
205
191
206
def reorg_back (self ):
192
207
# Verify that a block on the old main chain fork has been pruned away
@@ -219,17 +234,17 @@ def reorg_back(self):
219
234
blocks_to_mine = first_reorg_height + 1 - self .mainchainheight
220
235
self .log .info ("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine )
221
236
self .nodes [0 ].invalidateblock (curchainhash )
222
- assert self .nodes [0 ].getblockcount () == self .mainchainheight
223
- assert self .nodes [0 ].getbestblockhash () == self .mainchainhash2
237
+ assert_equal ( self .nodes [0 ].getblockcount (), self .mainchainheight )
238
+ assert_equal ( self .nodes [0 ].getbestblockhash (), self .mainchainhash2 )
224
239
goalbesthash = self .nodes [0 ].generate (blocks_to_mine )[- 1 ]
225
240
goalbestheight = first_reorg_height + 1
226
241
227
242
self .log .info ("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload" )
228
243
# Wait for Node 2 to reorg to proper height
229
244
wait_until (lambda : self .nodes [2 ].getblockcount () >= goalbestheight , timeout = 900 )
230
- assert self .nodes [2 ].getbestblockhash () == goalbesthash
245
+ assert_equal ( self .nodes [2 ].getbestblockhash (), goalbesthash )
231
246
# Verify we can now have the data for a block previously pruned
232
- assert self .nodes [2 ].getblock (self .forkhash )["height" ] == self .forkheight
247
+ assert_equal ( self .nodes [2 ].getblock (self .forkhash )["height" ], self .forkheight )
233
248
234
249
def manual_test (self , node_number , use_timestamp ):
235
250
# at this point, node has 995 blocks and has not yet run in prune mode
@@ -287,38 +302,30 @@ def has_block(index):
287
302
288
303
# height=100 too low to prune first block file so this is a no-op
289
304
prune (100 )
290
- if not has_block (0 ):
291
- raise AssertionError ("blk00000.dat is missing when should still be there" )
305
+ assert has_block (0 ), "blk00000.dat is missing when should still be there"
292
306
293
307
# Does nothing
294
308
node .pruneblockchain (height (0 ))
295
- if not has_block (0 ):
296
- raise AssertionError ("blk00000.dat is missing when should still be there" )
309
+ assert has_block (0 ), "blk00000.dat is missing when should still be there"
297
310
298
311
# height=500 should prune first file
299
312
prune (500 )
300
- if has_block (0 ):
301
- raise AssertionError ("blk00000.dat is still there, should be pruned by now" )
302
- if not has_block (1 ):
303
- raise AssertionError ("blk00001.dat is missing when should still be there" )
313
+ assert not has_block (0 ), "blk00000.dat is still there, should be pruned by now"
314
+ assert has_block (1 ), "blk00001.dat is missing when should still be there"
304
315
305
316
# height=650 should prune second file
306
317
prune (650 )
307
- if has_block (1 ):
308
- raise AssertionError ("blk00001.dat is still there, should be pruned by now" )
318
+ assert not has_block (1 ), "blk00001.dat is still there, should be pruned by now"
309
319
310
320
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
311
321
prune (1000 , 1001 - MIN_BLOCKS_TO_KEEP )
312
- if not has_block (2 ):
313
- raise AssertionError ("blk00002.dat is still there, should be pruned by now" )
322
+ assert has_block (2 ), "blk00002.dat is still there, should be pruned by now"
314
323
315
324
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
316
325
node .generate (288 )
317
326
prune (1000 )
318
- if has_block (2 ):
319
- raise AssertionError ("blk00002.dat is still there, should be pruned by now" )
320
- if has_block (3 ):
321
- raise AssertionError ("blk00003.dat is still there, should be pruned by now" )
327
+ assert not has_block (2 ), "blk00002.dat is still there, should be pruned by now"
328
+ assert not has_block (3 ), "blk00003.dat is still there, should be pruned by now"
322
329
323
330
# stop node, start back up with auto-prune at 550 MiB, make sure still runs
324
331
self .stop_node (node_number )
@@ -339,21 +346,14 @@ def wallet_test(self):
339
346
connect_nodes (self .nodes [0 ], 5 )
340
347
nds = [self .nodes [0 ], self .nodes [5 ]]
341
348
sync_blocks (nds , wait = 5 , timeout = 300 )
342
- self .stop_node (5 ) # stop and start to trigger rescan
349
+ self .stop_node (5 ) # stop and start to trigger rescan
343
350
self .start_node (5 , extra_args = ["-prune=550" ])
344
351
self .log .info ("Success" )
345
352
346
353
def run_test (self ):
347
- self .log .info ("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)" )
348
- self .log .info ("Mining a big blockchain of 995 blocks" )
349
-
350
- # Determine default relay fee
351
- self .relayfee = self .nodes [0 ].getnetworkinfo ()["relayfee" ]
352
-
353
- # Cache for utxos, as the listunspent may take a long time later in the test
354
- self .utxo_cache_0 = []
355
- self .utxo_cache_1 = []
354
+ self .log .info ("Warning! This test requires 4GB of disk space" )
356
355
356
+ self .log .info ("Mining a big blockchain of 995 blocks" )
357
357
self .create_big_chain ()
358
358
# Chain diagram key:
359
359
# * blocks on main chain
@@ -394,11 +394,11 @@ def run_test(self):
394
394
# +...+(1044) &.. $...$(1319)
395
395
396
396
# Save some current chain state for later use
397
- self .mainchainheight = self .nodes [2 ].getblockcount () # 1320
397
+ self .mainchainheight = self .nodes [2 ].getblockcount () # 1320
398
398
self .mainchainhash2 = self .nodes [2 ].getblockhash (self .mainchainheight )
399
399
400
400
self .log .info ("Check that we can survive a 288 block reorg still" )
401
- ( self .forkheight , self . forkhash ) = self . reorg_test () # (1033, )
401
+ self .reorg_test () # (1033, )
402
402
# Now create a 288 block reorg by mining a longer chain on N1
403
403
# First disconnect N1
404
404
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
0 commit comments