1
+ from typing import (
2
+ List ,
3
+ NamedTuple ,
4
+ )
5
+
1
6
from eth_hash .auto import keccak
2
7
from eth_typing import (
3
8
Address ,
4
- Hash32
9
+ Hash32 ,
5
10
)
6
11
from eth_utils import (
7
12
ValidationError ,
13
+ encode_hex ,
8
14
get_extended_debug_logger ,
9
15
int_to_big_endian ,
16
+ to_bytes ,
17
+ to_int ,
10
18
)
11
19
import rlp
12
20
from trie import (
22
30
AtomicDatabaseAPI ,
23
31
DatabaseAPI ,
24
32
)
33
+ from eth .constants import (
34
+ BLANK_ROOT_HASH ,
35
+ )
25
36
from eth .db .backends .base import (
26
37
BaseDB ,
27
38
)
39
+ from eth .db .backends .memory import (
40
+ MemoryDB ,
41
+ )
28
42
from eth .db .batch import (
29
43
BatchDB ,
30
44
)
42
56
)
43
57
44
58
59
+ class PendingWrites (NamedTuple ):
60
+ """
61
+ A set of variables captured just before account storage deletion.
62
+ The variables are used to revive storage if the EVM reverts to a point
63
+ prior to deletion.
64
+ """
65
+ write_trie : HexaryTrie # The write trie at the time of deletion
66
+ trie_nodes_batch : BatchDB # A batch of all trie nodes written to the trie
67
+ starting_root_hash : Hash32 # The starting root hash
68
+
69
+
45
70
class StorageLookup (BaseDB ):
46
71
"""
47
72
This lookup converts lookups of storage slot integers into the appropriate trie lookup.
@@ -51,12 +76,23 @@ class StorageLookup(BaseDB):
51
76
"""
52
77
logger = get_extended_debug_logger ("eth.db.storage.StorageLookup" )
53
78
79
+ # The trie that is modified in-place, used to calculate storage root on-demand
80
+ _write_trie : HexaryTrie
81
+
82
+ # These are the new trie nodes, waiting to be committed to disk
83
+ _trie_nodes_batch : BatchDB
84
+
85
+ # When deleting an account, push the pending write info onto this stack.
86
+ # This stack can get as big as the number of transactions per block: one for each delete.
87
+ _historical_write_tries : List [PendingWrites ]
88
+
54
89
def __init__ (self , db : DatabaseAPI , storage_root : Hash32 , address : Address ) -> None :
55
90
self ._db = db
56
- self ._starting_root_hash = storage_root
91
+
92
+ # Set the starting root hash, to be used for on-disk storage read lookups
93
+ self ._initialize_to_root_hash (storage_root )
94
+
57
95
self ._address = address
58
- self ._write_trie = None
59
- self ._trie_nodes_batch : BatchDB = None
60
96
61
97
def _get_write_trie (self ) -> HexaryTrie :
62
98
if self ._trie_nodes_batch is None :
@@ -120,18 +156,21 @@ def __delitem__(self, key: bytes) -> None:
120
156
121
157
@property
122
158
def has_changed_root (self ) -> bool :
123
- return self ._write_trie and self . _write_trie . root_hash != self . _starting_root_hash
159
+ return self ._write_trie is not None
124
160
125
161
def get_changed_root (self ) -> Hash32 :
126
162
if self ._write_trie is not None :
127
163
return self ._write_trie .root_hash
128
164
else :
129
165
raise ValidationError ("Asked for changed root when no writes have been made" )
130
166
131
- def _clear_changed_root (self ) -> None :
167
+ def _initialize_to_root_hash (self , root_hash : Hash32 ) -> None :
168
+ self ._starting_root_hash = root_hash
132
169
self ._write_trie = None
133
170
self ._trie_nodes_batch = None
134
- self ._starting_root_hash = None
171
+
172
+ # Reset the historical writes, which can't be reverted after committing
173
+ self ._historical_write_tries = []
135
174
136
175
def commit_to (self , db : DatabaseAPI ) -> None :
137
176
"""
@@ -142,10 +181,67 @@ def commit_to(self, db: DatabaseAPI) -> None:
142
181
if self ._trie_nodes_batch is None :
143
182
raise ValidationError (
144
183
"It is invalid to commit an account's storage if it has no pending changes. "
145
- "Always check storage_lookup.has_changed_root before attempting to commit."
184
+ "Always check storage_lookup.has_changed_root before attempting to commit. "
185
+ f"Write tries on stack = { len (self ._historical_write_tries )} ; Root hash = "
186
+ f"{ encode_hex (self ._starting_root_hash )} "
146
187
)
147
188
self ._trie_nodes_batch .commit_to (db , apply_deletes = False )
148
- self ._clear_changed_root ()
189
+
190
+ # Mark the trie as having been all written out to the database.
191
+ # It removes the 'dirty' flag and clears out any pending writes.
192
+ self ._initialize_to_root_hash (self ._write_trie .root_hash )
193
+
194
+ def new_trie (self ) -> int :
195
+ """
196
+ Switch to an empty trie. Save the old trie, and pending writes, in
197
+ case of a revert.
198
+
199
+ :return: index for reviving the previous trie
200
+ """
201
+ write_trie = self ._get_write_trie ()
202
+
203
+ # Write the previous trie into a historical stack
204
+ self ._historical_write_tries .append (PendingWrites (
205
+ write_trie ,
206
+ self ._trie_nodes_batch ,
207
+ self ._starting_root_hash ,
208
+ ))
209
+
210
+ new_idx = len (self ._historical_write_tries )
211
+ self ._starting_root_hash = BLANK_ROOT_HASH
212
+ self ._write_trie = None
213
+ self ._trie_nodes_batch = None
214
+
215
+ return new_idx
216
+
217
+ def rollback_trie (self , trie_index : int ) -> None :
218
+ """
219
+ Revert back to the previous trie, using the index returned by a
220
+ :meth:`~new_trie` call. The index returned by that call returns you
221
+ to the trie in place *before* the call.
222
+
223
+ :param trie_index: index for reviving the previous trie
224
+ """
225
+
226
+ if trie_index >= len (self ._historical_write_tries ):
227
+ raise ValidationError (
228
+ f"Trying to roll back a delete to index { trie_index } , but there are only"
229
+ f" { len (self ._historical_write_tries )} indices available."
230
+ )
231
+
232
+ (
233
+ self ._write_trie ,
234
+ self ._trie_nodes_batch ,
235
+ self ._starting_root_hash ,
236
+ ) = self ._historical_write_tries [trie_index ]
237
+
238
+ # Cannot roll forward after a rollback, so remove created/ignored tries.
239
+ # This also deletes the trie that you just reverted to. It will be re-added
240
+ # to the stack when the next new_trie() is called.
241
+ del self ._historical_write_tries [trie_index :]
242
+
243
+
244
+ CLEAR_COUNT_KEY_NAME = b'clear-count'
149
245
150
246
151
247
class AccountStorageDB (AccountStorageDatabaseAPI ):
@@ -187,9 +283,15 @@ def __init__(self, db: AtomicDatabaseAPI, storage_root: Hash32, address: Address
187
283
self ._address = address
188
284
self ._storage_lookup = StorageLookup (db , storage_root , address )
189
285
self ._storage_cache = CacheDB (self ._storage_lookup )
190
- self ._locked_changes = BatchDB (self ._storage_cache )
286
+ self ._locked_changes = JournalDB (self ._storage_cache )
191
287
self ._journal_storage = JournalDB (self ._locked_changes )
192
288
289
+ # Track how many times we have cleared the storage. This is journaled
290
+ # in lockstep with other storage changes. That way, we can detect if a revert
291
+ # causes use to revert past the previous storage deletion. The clear count is used
292
+ # as an index to find the base trie from before the revert.
293
+ self ._clear_count = JournalDB (MemoryDB ({CLEAR_COUNT_KEY_NAME : to_bytes (0 )}))
294
+
193
295
def get (self , slot : int , from_journal : bool = True ) -> int :
194
296
key = int_to_big_endian (slot )
195
297
lookup_db = self ._journal_storage if from_journal else self ._locked_changes
@@ -222,40 +324,84 @@ def set(self, slot: int, value: int) -> None:
222
324
223
325
def delete (self ) -> None :
224
326
self .logger .debug2 (
225
- "Deleting all storage in account 0x%s, hashed 0x%s " ,
327
+ "Deleting all storage in account 0x%s" ,
226
328
self ._address .hex (),
227
- keccak (self ._address ).hex (),
228
329
)
229
330
self ._journal_storage .clear ()
230
331
self ._storage_cache .reset_cache ()
231
332
333
+ # Empty out the storage lookup trie (keeping history, in case of a revert)
334
+ new_clear_count = self ._storage_lookup .new_trie ()
335
+
336
+ # Look up the previous count of how many times the account has been deleted.
337
+ # This can happen multiple times in one block, via CREATE2.
338
+ old_clear_count = to_int (self ._clear_count [CLEAR_COUNT_KEY_NAME ])
339
+
340
+ # Gut check that we have incremented correctly
341
+ if new_clear_count != old_clear_count + 1 :
342
+ raise ValidationError (
343
+ f"Must increase clear count by one on each delete. Instead, went from"
344
+ f" { old_clear_count } -> { new_clear_count } in account 0x{ self ._address .hex ()} "
345
+ )
346
+
347
+ # Save the new count, ie~ the index used for a future revert.
348
+ self ._clear_count [CLEAR_COUNT_KEY_NAME ] = to_bytes (new_clear_count )
349
+
232
350
def record (self , checkpoint : JournalDBCheckpoint ) -> None :
233
351
self ._journal_storage .record (checkpoint )
352
+ self ._clear_count .record (checkpoint )
234
353
235
354
def discard (self , checkpoint : JournalDBCheckpoint ) -> None :
236
355
self .logger .debug2 ('discard checkpoint %r' , checkpoint )
356
+ latest_clear_count = to_int (self ._clear_count [CLEAR_COUNT_KEY_NAME ])
357
+
237
358
if self ._journal_storage .has_checkpoint (checkpoint ):
238
359
self ._journal_storage .discard (checkpoint )
360
+ self ._clear_count .discard (checkpoint )
239
361
else :
240
362
# if the checkpoint comes before this account started tracking,
241
363
# then simply reset to the beginning
242
364
self ._journal_storage .reset ()
365
+ self ._clear_count .reset ()
243
366
self ._storage_cache .reset_cache ()
244
367
368
+ reverted_clear_count = to_int (self ._clear_count [CLEAR_COUNT_KEY_NAME ])
369
+
370
+ if reverted_clear_count == latest_clear_count - 1 :
371
+ # This revert rewinds past a trie deletion, so roll back to the trie at
372
+ # that point. We use the clear count as an index to get back to the
373
+ # old base trie.
374
+ self ._storage_lookup .rollback_trie (reverted_clear_count )
375
+ elif reverted_clear_count == latest_clear_count :
376
+ # No change in the base trie, take no action
377
+ pass
378
+ else :
379
+ # Although CREATE2 permits multiple creates and deletes in a single block,
380
+ # you can still only revert across a single delete. That's because delete
381
+ # is only triggered at the end of the transaction.
382
+ raise ValidationError (
383
+ f"This revert has changed the clear count in an invalid way, from"
384
+ f" { latest_clear_count } to { reverted_clear_count } , in 0x{ self ._address .hex ()} "
385
+ )
386
+
245
387
def commit (self , checkpoint : JournalDBCheckpoint ) -> None :
246
388
if self ._journal_storage .has_checkpoint (checkpoint ):
247
389
self ._journal_storage .commit (checkpoint )
390
+ self ._clear_count .commit (checkpoint )
248
391
else :
249
392
# if the checkpoint comes before this account started tracking,
250
393
# then flatten all changes, without persisting
251
394
self ._journal_storage .flatten ()
395
+ self ._clear_count .flatten ()
252
396
253
397
def lock_changes (self ) -> None :
398
+ if self ._journal_storage .has_clear ():
399
+ self ._locked_changes .clear ()
254
400
self ._journal_storage .persist ()
255
401
256
402
def make_storage_root (self ) -> None :
257
403
self .lock_changes ()
258
- self ._locked_changes .commit ( apply_deletes = True )
404
+ self ._locked_changes .persist ( )
259
405
260
406
def _validate_flushed (self ) -> None :
261
407
"""
0 commit comments