8
8
This test uses 4GB of disk space.
9
9
This test takes 30 mins or more (up to 2 hours)
10
10
"""
11
+ import os
11
12
13
+ from test_framework .blocktools import create_coinbase
14
+ from test_framework .messages import CBlock , ToHex
15
+ from test_framework .script import CScript , OP_RETURN , OP_NOP
12
16
from test_framework .test_framework import BitcoinTestFramework
13
- from test_framework .util import assert_equal , assert_greater_than , assert_raises_rpc_error , connect_nodes , mine_large_block , sync_blocks , wait_until
14
-
15
- import os
17
+ from test_framework .util import assert_equal , assert_greater_than , assert_raises_rpc_error , connect_nodes , disconnect_nodes , sync_blocks , wait_until
16
18
17
19
MIN_BLOCKS_TO_KEEP = 288
18
20
21
23
# compatible with pruning based on key creation time.
22
24
TIMESTAMP_WINDOW = 2 * 60 * 60
23
25
26
+ def mine_large_blocks (node , n ):
27
+ # Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN
28
+ # followed by 950k of OP_NOP. This would be non-standard in a non-coinbase
29
+ # transaction but is consensus valid.
30
+
31
+ # Get the block parameters for the first block
32
+ big_script = CScript ([OP_RETURN ] + [OP_NOP ] * 950000 )
33
+ best_block = node .getblock (node .getbestblockhash ())
34
+ height = int (best_block ["height" ]) + 1
35
+ try :
36
+ # Static variable ensures that time is monotonicly increasing and is therefore
37
+ # different for each block created => blockhash is unique.
38
+ mine_large_blocks .nTime = min (mine_large_blocks .nTime , int (best_block ["time" ])) + 1
39
+ except AttributeError :
40
+ mine_large_blocks .nTime = int (best_block ["time" ]) + 1
41
+ previousblockhash = int (best_block ["hash" ], 16 )
42
+
43
+ for _ in range (n ):
44
+ # Build the coinbase transaction (with large scriptPubKey)
45
+ coinbase_tx = create_coinbase (height )
46
+ coinbase_tx .vin [0 ].nSequence = 2 ** 32 - 1
47
+ coinbase_tx .vout [0 ].scriptPubKey = big_script
48
+ coinbase_tx .rehash ()
49
+
50
+ # Build the block
51
+ block = CBlock ()
52
+ block .nVersion = best_block ["version" ]
53
+ block .hashPrevBlock = previousblockhash
54
+ block .nTime = mine_large_blocks .nTime
55
+ block .nBits = int ('207fffff' , 16 )
56
+ block .nNonce = 0
57
+ block .vtx = [coinbase_tx ]
58
+ block .hashMerkleRoot = block .calc_merkle_root ()
59
+ block .solve ()
60
+
61
+ # Submit to the node
62
+ node .submitblock (ToHex (block ))
63
+
64
+ previousblockhash = block .sha256
65
+ height += 1
66
+ mine_large_blocks .nTime += 1
24
67
25
68
def calc_usage (blockdir ):
26
69
return sum (os .path .getsize (blockdir + f ) for f in os .listdir (blockdir ) if os .path .isfile (os .path .join (blockdir , f ))) / (1024. * 1024. )
@@ -29,11 +72,10 @@ class PruneTest(BitcoinTestFramework):
29
72
def set_test_params (self ):
30
73
self .setup_clean_chain = True
31
74
self .num_nodes = 6
32
- self .rpc_timeout = 900
33
75
34
76
# Create nodes 0 and 1 to mine.
35
77
# Create node 2 to test pruning.
36
- self .full_node_default_args = ["-maxreceivebuffer=20000" , "-checkblocks=5" , "-limitdescendantcount=100" , "-limitdescendantsize=5000" , "-limitancestorcount=100" , "-limitancestorsize=5000" ]
78
+ self .full_node_default_args = ["-maxreceivebuffer=20000" , "-checkblocks=5" ]
37
79
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
38
80
# Create nodes 5 to test wallet in prune mode, but do not connect
39
81
self .extra_args = [
@@ -73,8 +115,7 @@ def create_big_chain(self):
73
115
self .nodes [0 ].generate (150 )
74
116
75
117
# Then mine enough full blocks to create more than 550MiB of data
76
- for i in range (645 ):
77
- mine_large_block (self .nodes [0 ], self .utxo_cache_0 )
118
+ mine_large_blocks (self .nodes [0 ], 645 )
78
119
79
120
sync_blocks (self .nodes [0 :5 ])
80
121
@@ -84,8 +125,7 @@ def test_height_min(self):
84
125
self .log .info ("Though we're already using more than 550MiB, current usage: %d" % calc_usage (self .prunedir ))
85
126
self .log .info ("Mining 25 more blocks should cause the first block file to be pruned" )
86
127
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
87
- for i in range (25 ):
88
- mine_large_block (self .nodes [0 ], self .utxo_cache_0 )
128
+ mine_large_blocks (self .nodes [0 ], 25 )
89
129
90
130
# Wait for blk00000.dat to be pruned
91
131
wait_until (lambda : not os .path .isfile (os .path .join (self .prunedir , "blk00000.dat" )), timeout = 30 )
@@ -102,22 +142,13 @@ def create_chain_with_staleblocks(self):
102
142
for j in range (12 ):
103
143
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
104
144
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
105
- # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
106
- self .stop_node (0 )
107
- self .start_node (0 , extra_args = self .full_node_default_args )
145
+ disconnect_nodes (self .nodes [0 ], 1 )
146
+ disconnect_nodes (self .nodes [0 ], 2 )
108
147
# Mine 24 blocks in node 1
109
- for i in range (24 ):
110
- if j == 0 :
111
- mine_large_block (self .nodes [1 ], self .utxo_cache_1 )
112
- else :
113
- # Add node1's wallet transactions back to the mempool, to
114
- # avoid the mined blocks from being too small.
115
- self .nodes [1 ].resendwallettransactions ()
116
- self .nodes [1 ].generate (1 ) #tx's already in mempool from previous disconnects
148
+ mine_large_blocks (self .nodes [1 ], 24 )
117
149
118
150
# Reorg back with 25 block chain from node 0
119
- for i in range (25 ):
120
- mine_large_block (self .nodes [0 ], self .utxo_cache_0 )
151
+ mine_large_blocks (self .nodes [0 ], 25 )
121
152
122
153
# Create connections in the order so both nodes can see the reorg at the same time
123
154
connect_nodes (self .nodes [0 ], 1 )
@@ -129,10 +160,6 @@ def create_chain_with_staleblocks(self):
129
160
def reorg_test (self ):
130
161
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
131
162
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
132
- # Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
133
- # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
134
- self .stop_node (1 )
135
- self .start_node (1 , extra_args = ["-maxreceivebuffer=20000" ,"-blockmaxweight=20000" , "-checkblocks=5" ])
136
163
137
164
height = self .nodes [1 ].getblockcount ()
138
165
self .log .info ("Current block height: %d" % height )
@@ -153,9 +180,9 @@ def reorg_test(self):
153
180
assert self .nodes [1 ].getblockcount () == self .forkheight - 1
154
181
self .log .info ("New best height: %d" % self .nodes [1 ].getblockcount ())
155
182
156
- # Reboot node1 to clear those giant tx's from mempool
157
- self .stop_node ( 1 )
158
- self .start_node ( 1 , extra_args = [ "-maxreceivebuffer=20000" , "-blockmaxweight=20000" , "-checkblocks=5" ] )
183
+ # Disconnect node1 and generate the new chain
184
+ disconnect_nodes ( self .nodes [ 0 ], 1 )
185
+ disconnect_nodes ( self .nodes [ 1 ], 2 )
159
186
160
187
self .log .info ("Generating new longer chain of 300 more blocks" )
161
188
self .nodes [1 ].generate (300 )
@@ -167,17 +194,10 @@ def reorg_test(self):
167
194
168
195
self .log .info ("Verify height on node 2: %d" % self .nodes [2 ].getblockcount ())
169
196
self .log .info ("Usage possibly still high because of stale blocks in block files: %d" % calc_usage (self .prunedir ))
170
- self .log .info ("Mine 220 more large blocks so we have requisite history" )
171
197
172
- # Get node0's wallet transactions back in its mempool, to avoid the
173
- # mined blocks from being too small.
174
- self .nodes [0 ].resendwallettransactions ()
198
+ self .log .info ("Mine 220 more large blocks so we have requisite history" )
175
199
176
- for i in range (22 ):
177
- # This can be slow, so do this in multiple RPC calls to avoid
178
- # RPC timeouts.
179
- self .nodes [0 ].generate (10 ) #node 0 has many large tx's in its mempool from the disconnects
180
- sync_blocks (self .nodes [0 :3 ], timeout = 300 )
200
+ mine_large_blocks (self .nodes [0 ], 220 )
181
201
182
202
usage = calc_usage (self .prunedir )
183
203
self .log .info ("Usage should be below target: %d" % usage )
@@ -331,16 +351,9 @@ def wallet_test(self):
331
351
self .log .info ("Success" )
332
352
333
353
def run_test (self ):
334
- self .log .info ("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)" )
335
- self .log .info ("Mining a big blockchain of 995 blocks" )
336
-
337
- # Determine default relay fee
338
- self .relayfee = self .nodes [0 ].getnetworkinfo ()["relayfee" ]
339
-
340
- # Cache for utxos, as the listunspent may take a long time later in the test
341
- self .utxo_cache_0 = []
342
- self .utxo_cache_1 = []
354
+ self .log .info ("Warning! This test requires 4GB of disk space" )
343
355
356
+ self .log .info ("Mining a big blockchain of 995 blocks" )
344
357
self .create_big_chain ()
345
358
# Chain diagram key:
346
359
# * blocks on main chain
0 commit comments