23
23
24
24
25
25
def calc_usage (blockdir ):
26
- return sum (os .path .getsize (blockdir + f ) for f in os .listdir (blockdir ) if os .path .isfile (os .path .join (blockdir , f ))) / (1024. * 1024. )
26
+ return sum (os .path .getsize (blockdir + f ) for f in os .listdir (blockdir ) if os .path .isfile (os .path .join (blockdir , f ))) / (1024. * 1024. )
27
27
28
28
class PruneTest (BitcoinTestFramework ):
29
29
def set_test_params (self ):
@@ -55,7 +55,7 @@ def setup_network(self):
55
55
56
56
connect_nodes (self .nodes [0 ], 1 )
57
57
connect_nodes (self .nodes [1 ], 2 )
58
- connect_nodes (self .nodes [2 ], 0 )
58
+ connect_nodes (self .nodes [0 ], 2 )
59
59
connect_nodes (self .nodes [0 ], 3 )
60
60
connect_nodes (self .nodes [0 ], 4 )
61
61
sync_blocks (self .nodes [0 :5 ])
@@ -71,15 +71,15 @@ def create_big_chain(self):
71
71
self .nodes [1 ].generate (200 )
72
72
sync_blocks (self .nodes [0 :2 ])
73
73
self .nodes [0 ].generate (150 )
74
+
74
75
# Then mine enough full blocks to create more than 550MiB of data
75
76
for i in range (645 ):
76
77
mine_large_block (self .nodes [0 ], self .utxo_cache_0 )
77
78
78
79
sync_blocks (self .nodes [0 :5 ])
79
80
80
81
def test_height_min (self ):
81
- if not os .path .isfile (os .path .join (self .prunedir , "blk00000.dat" )):
82
- raise AssertionError ("blk00000.dat is missing, pruning too early" )
82
+ assert os .path .isfile (os .path .join (self .prunedir , "blk00000.dat" )), "blk00000.dat is missing, pruning too early"
83
83
self .log .info ("Success" )
84
84
self .log .info ("Though we're already using more than 550MiB, current usage: %d" % calc_usage (self .prunedir ))
85
85
self .log .info ("Mining 25 more blocks should cause the first block file to be pruned" )
@@ -93,8 +93,7 @@ def test_height_min(self):
93
93
self .log .info ("Success" )
94
94
usage = calc_usage (self .prunedir )
95
95
self .log .info ("Usage should be below target: %d" % usage )
96
- if (usage > 550 ):
97
- raise AssertionError ("Pruning target not being met" )
96
+ assert_greater_than (550 , usage )
98
97
99
98
def create_chain_with_staleblocks (self ):
100
99
# Create stale blocks in manageable sized chunks
@@ -121,8 +120,8 @@ def create_chain_with_staleblocks(self):
121
120
mine_large_block (self .nodes [0 ], self .utxo_cache_0 )
122
121
123
122
# Create connections in the order so both nodes can see the reorg at the same time
124
- connect_nodes (self .nodes [1 ], 0 )
125
- connect_nodes (self .nodes [2 ], 0 )
123
+ connect_nodes (self .nodes [0 ], 1 )
124
+ connect_nodes (self .nodes [0 ], 2 )
126
125
sync_blocks (self .nodes [0 :3 ])
127
126
128
127
self .log .info ("Usage can be over target because of high stale rate: %d" % calc_usage (self .prunedir ))
@@ -138,20 +137,20 @@ def reorg_test(self):
138
137
height = self .nodes [1 ].getblockcount ()
139
138
self .log .info ("Current block height: %d" % height )
140
139
141
- invalidheight = height - 287
142
- badhash = self .nodes [1 ].getblockhash (invalidheight )
143
- self .log .info ("Invalidating block %s at height %d" % (badhash , invalidheight ))
144
- self .nodes [1 ].invalidateblock (badhash )
140
+ self . forkheight = height - 287
141
+ self . forkhash = self .nodes [1 ].getblockhash (self . forkheight )
142
+ self .log .info ("Invalidating block %s at height %d" % (self . forkhash , self . forkheight ))
143
+ self .nodes [1 ].invalidateblock (self . forkhash )
145
144
146
145
# We've now switched to our previously mined-24 block fork on node 1, but that's not what we want
147
146
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
148
- mainchainhash = self .nodes [0 ].getblockhash (invalidheight - 1 )
149
- curhash = self .nodes [1 ].getblockhash (invalidheight - 1 )
147
+ mainchainhash = self .nodes [0 ].getblockhash (self . forkheight - 1 )
148
+ curhash = self .nodes [1 ].getblockhash (self . forkheight - 1 )
150
149
while curhash != mainchainhash :
151
150
self .nodes [1 ].invalidateblock (curhash )
152
- curhash = self .nodes [1 ].getblockhash (invalidheight - 1 )
151
+ curhash = self .nodes [1 ].getblockhash (self . forkheight - 1 )
153
152
154
- assert self .nodes [1 ].getblockcount () == invalidheight - 1
153
+ assert self .nodes [1 ].getblockcount () == self . forkheight - 1
155
154
self .log .info ("New best height: %d" % self .nodes [1 ].getblockcount ())
156
155
157
156
# Reboot node1 to clear those giant tx's from mempool
@@ -163,13 +162,12 @@ def reorg_test(self):
163
162
164
163
self .log .info ("Reconnect nodes" )
165
164
connect_nodes (self .nodes [0 ], 1 )
166
- connect_nodes (self .nodes [2 ], 1 )
165
+ connect_nodes (self .nodes [1 ], 2 )
167
166
sync_blocks (self .nodes [0 :3 ], timeout = 120 )
168
167
169
168
self .log .info ("Verify height on node 2: %d" % self .nodes [2 ].getblockcount ())
170
- self .log .info ("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage (self .prunedir ))
171
-
172
- self .log .info ("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)" )
169
+ self .log .info ("Usage possibly still high because of stale blocks in block files: %d" % calc_usage (self .prunedir ))
170
+ self .log .info ("Mine 220 more large blocks so we have requisite history" )
173
171
174
172
# Get node0's wallet transactions back in its mempool, to avoid the
175
173
# mined blocks from being too small.
@@ -183,10 +181,7 @@ def reorg_test(self):
183
181
184
182
usage = calc_usage (self .prunedir )
185
183
self .log .info ("Usage should be below target: %d" % usage )
186
- if (usage > 550 ):
187
- raise AssertionError ("Pruning target not being met" )
188
-
189
- return invalidheight ,badhash
184
+ assert_greater_than (550 , usage )
190
185
191
186
def reorg_back (self ):
192
187
# Verify that a block on the old main chain fork has been pruned away
@@ -219,17 +214,17 @@ def reorg_back(self):
219
214
blocks_to_mine = first_reorg_height + 1 - self .mainchainheight
220
215
self .log .info ("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine )
221
216
self .nodes [0 ].invalidateblock (curchainhash )
222
- assert self .nodes [0 ].getblockcount () == self .mainchainheight
223
- assert self .nodes [0 ].getbestblockhash () == self .mainchainhash2
217
+ assert_equal ( self .nodes [0 ].getblockcount (), self .mainchainheight )
218
+ assert_equal ( self .nodes [0 ].getbestblockhash (), self .mainchainhash2 )
224
219
goalbesthash = self .nodes [0 ].generate (blocks_to_mine )[- 1 ]
225
220
goalbestheight = first_reorg_height + 1
226
221
227
222
self .log .info ("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload" )
228
223
# Wait for Node 2 to reorg to proper height
229
224
wait_until (lambda : self .nodes [2 ].getblockcount () >= goalbestheight , timeout = 900 )
230
- assert self .nodes [2 ].getbestblockhash () == goalbesthash
225
+ assert_equal ( self .nodes [2 ].getbestblockhash (), goalbesthash )
231
226
# Verify we can now have the data for a block previously pruned
232
- assert self .nodes [2 ].getblock (self .forkhash )["height" ] == self .forkheight
227
+ assert_equal ( self .nodes [2 ].getblock (self .forkhash )["height" ], self .forkheight )
233
228
234
229
def manual_test (self , node_number , use_timestamp ):
235
230
# at this point, node has 995 blocks and has not yet run in prune mode
@@ -287,38 +282,30 @@ def has_block(index):
287
282
288
283
# height=100 too low to prune first block file so this is a no-op
289
284
prune (100 )
290
- if not has_block (0 ):
291
- raise AssertionError ("blk00000.dat is missing when should still be there" )
285
+ assert has_block (0 ), "blk00000.dat is missing when should still be there"
292
286
293
287
# Does nothing
294
288
node .pruneblockchain (height (0 ))
295
- if not has_block (0 ):
296
- raise AssertionError ("blk00000.dat is missing when should still be there" )
289
+ assert has_block (0 ), "blk00000.dat is missing when should still be there"
297
290
298
291
# height=500 should prune first file
299
292
prune (500 )
300
- if has_block (0 ):
301
- raise AssertionError ("blk00000.dat is still there, should be pruned by now" )
302
- if not has_block (1 ):
303
- raise AssertionError ("blk00001.dat is missing when should still be there" )
293
+ assert not has_block (0 ), "blk00000.dat is still there, should be pruned by now"
294
+ assert has_block (1 ), "blk00001.dat is missing when should still be there"
304
295
305
296
# height=650 should prune second file
306
297
prune (650 )
307
- if has_block (1 ):
308
- raise AssertionError ("blk00001.dat is still there, should be pruned by now" )
298
+ assert not has_block (1 ), "blk00001.dat is still there, should be pruned by now"
309
299
310
300
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
311
301
prune (1000 , 1001 - MIN_BLOCKS_TO_KEEP )
312
- if not has_block (2 ):
313
- raise AssertionError ("blk00002.dat is still there, should be pruned by now" )
302
+ assert has_block (2 ), "blk00002.dat is still there, should be pruned by now"
314
303
315
304
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
316
305
node .generate (288 )
317
306
prune (1000 )
318
- if has_block (2 ):
319
- raise AssertionError ("blk00002.dat is still there, should be pruned by now" )
320
- if has_block (3 ):
321
- raise AssertionError ("blk00003.dat is still there, should be pruned by now" )
307
+ assert not has_block (2 ), "blk00002.dat is still there, should be pruned by now"
308
+ assert not has_block (3 ), "blk00003.dat is still there, should be pruned by now"
322
309
323
310
# stop node, start back up with auto-prune at 550 MiB, make sure still runs
324
311
self .stop_node (node_number )
@@ -339,7 +326,7 @@ def wallet_test(self):
339
326
connect_nodes (self .nodes [0 ], 5 )
340
327
nds = [self .nodes [0 ], self .nodes [5 ]]
341
328
sync_blocks (nds , wait = 5 , timeout = 300 )
342
- self .stop_node (5 ) # stop and start to trigger rescan
329
+ self .stop_node (5 ) # stop and start to trigger rescan
343
330
self .start_node (5 , extra_args = ["-prune=550" ])
344
331
self .log .info ("Success" )
345
332
@@ -394,11 +381,11 @@ def run_test(self):
394
381
# +...+(1044) &.. $...$(1319)
395
382
396
383
# Save some current chain state for later use
397
- self .mainchainheight = self .nodes [2 ].getblockcount () # 1320
384
+ self .mainchainheight = self .nodes [2 ].getblockcount () # 1320
398
385
self .mainchainhash2 = self .nodes [2 ].getblockhash (self .mainchainheight )
399
386
400
387
self .log .info ("Check that we can survive a 288 block reorg still" )
401
- ( self .forkheight , self . forkhash ) = self . reorg_test () # (1033, )
388
+ self .reorg_test () # (1033, )
402
389
# Now create a 288 block reorg by mining a longer chain on N1
403
390
# First disconnect N1
404
391
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
0 commit comments