@@ -102,7 +102,7 @@ def restart_node(self, node_index, expected_tip):
102
102
# perhaps we generated a test case that blew up our cache?
103
103
# TODO: If this happens a lot, we should try to restart without -dbcrashratio
104
104
# and make sure that recovery happens.
105
- raise AssertionError ("Unable to successfully restart node %d in allotted time" , node_index )
105
+ raise AssertionError (f "Unable to successfully restart node { node_index } in allotted time" )
106
106
107
107
def submit_block_catch_error (self , node_index , block ):
108
108
"""Try submitting a block to the given node.
@@ -114,10 +114,10 @@ def submit_block_catch_error(self, node_index, block):
114
114
self .nodes [node_index ].submitblock (block )
115
115
return True
116
116
except (http .client .CannotSendRequest , http .client .RemoteDisconnected ) as e :
117
- self .log .debug ("node %d submitblock raised exception: %s" , node_index , e )
117
+ self .log .debug (f "node { node_index } submitblock raised exception: { e } " )
118
118
return False
119
119
except OSError as e :
120
- self .log .debug ("node %d submitblock raised OSError exception: errno=%s" , node_index , e .errno )
120
+ self .log .debug (f "node { node_index } submitblock raised OSError exception: errno={ e .errno } " )
121
121
if e .errno in [errno .EPIPE , errno .ECONNREFUSED , errno .ECONNRESET ]:
122
122
# The node has likely crashed
123
123
return False
@@ -142,15 +142,15 @@ def sync_node3blocks(self, block_hashes):
142
142
# Deliver each block to each other node
143
143
for i in range (3 ):
144
144
nodei_utxo_hash = None
145
- self .log .debug ("Syncing blocks to node %d" , i )
145
+ self .log .debug (f "Syncing blocks to node { i } " )
146
146
for (block_hash , block ) in blocks :
147
147
# Get the block from node3, and submit to node_i
148
- self .log .debug ("submitting block %s" , block_hash )
148
+ self .log .debug (f "submitting block { block_hash } " )
149
149
if not self .submit_block_catch_error (i , block ):
150
150
# TODO: more carefully check that the crash is due to -dbcrashratio
151
151
# (change the exit code perhaps, and check that here?)
152
152
self .wait_for_node_exit (i , timeout = 30 )
153
- self .log .debug ("Restarting node %d after block hash %s" , i , block_hash )
153
+ self .log .debug (f "Restarting node { i } after block hash { block_hash } " )
154
154
nodei_utxo_hash = self .restart_node (i , block_hash )
155
155
assert nodei_utxo_hash is not None
156
156
self .restart_counts [i ] += 1
@@ -167,7 +167,7 @@ def sync_node3blocks(self, block_hashes):
167
167
# - we only update the utxo cache after a node restart, since flushing
168
168
# the cache is a no-op at that point
169
169
if nodei_utxo_hash is not None :
170
- self .log .debug ("Checking txoutsetinfo matches for node %d" , i )
170
+ self .log .debug (f "Checking txoutsetinfo matches for node { i } " )
171
171
assert_equal (nodei_utxo_hash , node3_utxo_hash )
172
172
173
173
def verify_utxo_hash (self ):
@@ -218,14 +218,14 @@ def run_test(self):
218
218
# Start by creating a lot of utxos on node3
219
219
initial_height = self .nodes [3 ].getblockcount ()
220
220
utxo_list = create_confirmed_utxos (self .nodes [3 ].getnetworkinfo ()['relayfee' ], self .nodes [3 ], 5000 )
221
- self .log .info ("Prepped %d utxo entries" , len ( utxo_list ) )
221
+ self .log .info (f "Prepped { len ( utxo_list ) } utxo entries" )
222
222
223
223
# Sync these blocks with the other nodes
224
224
block_hashes_to_sync = []
225
225
for height in range (initial_height + 1 , self .nodes [3 ].getblockcount () + 1 ):
226
226
block_hashes_to_sync .append (self .nodes [3 ].getblockhash (height ))
227
227
228
- self .log .debug ("Syncing %d blocks with other nodes" , len ( block_hashes_to_sync ) )
228
+ self .log .debug (f "Syncing { len ( block_hashes_to_sync ) } blocks with other nodes" )
229
229
# Syncing the blocks could cause nodes to crash, so the test begins here.
230
230
self .sync_node3blocks (block_hashes_to_sync )
231
231
@@ -235,18 +235,18 @@ def run_test(self):
235
235
# each time through the loop, generate a bunch of transactions,
236
236
# and then either mine a single new block on the tip, or some-sized reorg.
237
237
for i in range (40 ):
238
- self .log .info ("Iteration %d , generating 2500 transactions %s" , i , self .restart_counts )
238
+ self .log .info (f "Iteration { i } , generating 2500 transactions { self .restart_counts } " )
239
239
# Generate a bunch of small-ish transactions
240
240
self .generate_small_transactions (self .nodes [3 ], 2500 , utxo_list )
241
241
# Pick a random block between current tip, and starting tip
242
242
current_height = self .nodes [3 ].getblockcount ()
243
243
random_height = random .randint (starting_tip_height , current_height )
244
- self .log .debug ("At height %d , considering height %d" , current_height , random_height )
244
+ self .log .debug (f "At height { current_height } , considering height { random_height } " )
245
245
if random_height > starting_tip_height :
246
246
# Randomly reorg from this point with some probability (1/4 for
247
247
# tip, 1/5 for tip-1, ...)
248
248
if random .random () < 1.0 / (current_height + 4 - random_height ):
249
- self .log .debug ("Invalidating block at height %d" , random_height )
249
+ self .log .debug (f "Invalidating block at height { random_height } " )
250
250
self .nodes [3 ].invalidateblock (self .nodes [3 ].getblockhash (random_height ))
251
251
252
252
# Now generate new blocks until we pass the old tip height
@@ -258,18 +258,18 @@ def run_test(self):
258
258
# new address to avoid mining a block that has just been invalidated
259
259
address = self .nodes [3 ].getnewaddress (),
260
260
))
261
- self .log .debug ("Syncing %d new blocks..." , len ( block_hashes ) )
261
+ self .log .debug (f "Syncing { len ( block_hashes ) } new blocks..." )
262
262
self .sync_node3blocks (block_hashes )
263
263
utxo_list = self .nodes [3 ].listunspent ()
264
- self .log .debug ("Node3 utxo count: %d" , len (utxo_list ))
264
+ self .log .debug (f "Node3 utxo count: { len (utxo_list )} " )
265
265
266
266
# Check that the utxo hashes agree with node3
267
267
# Useful side effect: each utxo cache gets flushed here, so that we
268
268
# won't get crashes on shutdown at the end of the test.
269
269
self .verify_utxo_hash ()
270
270
271
271
# Check the test coverage
272
- self .log .info ("Restarted nodes: %s ; crashes on restart: %d" , self .restart_counts , self . crashed_on_restart )
272
+ self .log .info (f "Restarted nodes: { self . restart_counts } ; crashes on restart: { self .crashed_on_restart } " )
273
273
274
274
# If no nodes were restarted, we didn't test anything.
275
275
assert self .restart_counts != [0 , 0 , 0 ]
@@ -280,7 +280,7 @@ def run_test(self):
280
280
# Warn if any of the nodes escaped restart.
281
281
for i in range (3 ):
282
282
if self .restart_counts [i ] == 0 :
283
- self .log .warning ("Node %d never crashed during utxo flush!" , i )
283
+ self .log .warning (f "Node { i } never crashed during utxo flush!" )
284
284
285
285
286
286
if __name__ == "__main__" :
0 commit comments