Skip to content

Commit bced740

Browse files
committed
Make PruneExecutionDB not depend on consensusDB
Refactor PruneExecutionDB, more specifically findImportantRoots so it does not depend on consensusDB, but only on executionDB Signed-off-by: Igor Braga <5835477+bragaigor@users.noreply.github.com>
1 parent 0aa6b18 commit bced740

File tree

6 files changed

+63
-84
lines changed

6 files changed

+63
-84
lines changed

changelog/bragaigor-nit-4266.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
### Changed
2+
- Make `PruneExecutionDB` only depend on `executionDB` by removing `consensusDB` dependency

cmd/pruning/pruning.go

Lines changed: 18 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -21,17 +21,12 @@ import (
2121
"github.com/ethereum/go-ethereum/node"
2222
"github.com/ethereum/go-ethereum/rpc"
2323

24-
"github.com/offchainlabs/nitro/arbnode/dataposter/storage"
25-
"github.com/offchainlabs/nitro/arbnode/db/read"
26-
"github.com/offchainlabs/nitro/arbnode/mel"
27-
"github.com/offchainlabs/nitro/arbutil"
2824
"github.com/offchainlabs/nitro/bold/protocol"
2925
"github.com/offchainlabs/nitro/cmd/chaininfo"
3026
"github.com/offchainlabs/nitro/cmd/conf"
3127
"github.com/offchainlabs/nitro/execution/gethexec"
3228
"github.com/offchainlabs/nitro/solgen/go/bridgegen"
3329
"github.com/offchainlabs/nitro/solgen/go/rollupgen"
34-
"github.com/offchainlabs/nitro/staker"
3530
"github.com/offchainlabs/nitro/staker/bold"
3631
legacystaker "github.com/offchainlabs/nitro/staker/legacy"
3732
multiprotocolstaker "github.com/offchainlabs/nitro/staker/multi_protocol"
@@ -94,16 +89,6 @@ func findImportantRoots(ctx context.Context, executionDB ethdb.Database, stack *
9489
if chainConfig == nil {
9590
return nil, errors.New("database doesn't have a chain config (was this node initialized?)")
9691
}
97-
consensusDB, err := stack.OpenDatabaseWithOptions("arbitrumdata", node.DatabaseOptions{MetricsNamespace: "arbitrumdata/", ReadOnly: true, PebbleExtraOptions: persistentConfig.Pebble.ExtraOptions("arbitrumdata"), NoFreezer: true})
98-
if err != nil {
99-
return nil, err
100-
}
101-
defer func() {
102-
err := consensusDB.Close()
103-
if err != nil {
104-
log.Warn("failed to close arbitrum database after finding pruning targets", "err", err)
105-
}
106-
}()
10792
roots := importantRoots{
10893
executionDB: executionDB,
10994
}
@@ -113,7 +98,7 @@ func findImportantRoots(ctx context.Context, executionDB ethdb.Database, stack *
11398
if genesisHeader == nil {
11499
return nil, errors.New("missing L2 genesis block header")
115100
}
116-
err = roots.addHeader(genesisHeader, false)
101+
err := roots.addHeader(genesisHeader, false)
117102
if err != nil {
118103
return nil, err
119104
}
@@ -139,24 +124,25 @@ func findImportantRoots(ctx context.Context, executionDB ethdb.Database, stack *
139124
log.Warn("missing latest confirmed block", "hash", confirmedHash)
140125
}
141126

142-
validatorDB := rawdb.NewTable(consensusDB, storage.BlockValidatorPrefix)
143-
lastValidated, err := staker.ReadLastValidatedInfo(validatorDB)
127+
data, err := executionDB.Get(gethexec.ValidatedBlockHashKey)
144128
if err != nil {
145129
return nil, err
146130
}
147-
if lastValidated != nil {
131+
lastValidatedBlockHash := common.BytesToHash(data)
132+
133+
if lastValidatedBlockHash != (common.Hash{}) {
148134
var lastValidatedHeader *types.Header
149-
headerNum, found := rawdb.ReadHeaderNumber(executionDB, lastValidated.GlobalState.BlockHash)
135+
headerNum, found := rawdb.ReadHeaderNumber(executionDB, lastValidatedBlockHash)
150136
if found {
151-
lastValidatedHeader = rawdb.ReadHeader(executionDB, lastValidated.GlobalState.BlockHash, headerNum)
137+
lastValidatedHeader = rawdb.ReadHeader(executionDB, lastValidatedBlockHash, headerNum)
152138
}
153139
if lastValidatedHeader != nil {
154140
err = roots.addHeader(lastValidatedHeader, false)
155141
if err != nil {
156142
return nil, err
157143
}
158144
} else {
159-
log.Warn("missing latest validated block", "hash", lastValidated.GlobalState.BlockHash)
145+
log.Warn("missing latest validated block", "hash", lastValidatedBlockHash)
160146
}
161147
}
162148
} else if initConfig.Prune == "full" || initConfig.Prune == "minimal" {
@@ -179,56 +165,21 @@ func findImportantRoots(ctx context.Context, executionDB ethdb.Database, stack *
179165
return nil, fmt.Errorf("unknown pruning mode: \"%v\"", initConfig.Prune)
180166
}
181167
if initConfig.Prune != "minimal" && l1Client != nil {
182-
// in pruning modes other then "minimal", find the latest finalized block and add it as a pruning target
183-
l1Block, err := l1Client.BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber)))
184-
if err != nil {
185-
return nil, fmt.Errorf("failed to get finalized block: %w", err)
168+
// in pruning modes other than "minimal", get the latest finalized block and add it as a pruning target
169+
finalizedBlockHash := rawdb.ReadFinalizedBlockHash(executionDB)
170+
finalizedBlockNumber, ok := rawdb.ReadHeaderNumber(executionDB, finalizedBlockHash)
171+
if !ok {
172+
return nil, errors.New("Number of finalized block is missing")
186173
}
187-
l1BlockNum := l1Block.NumberU64()
188-
var batch uint64
189-
if melEnabled {
190-
batch, err = read.MELSequencerBatchCount(consensusDB)
174+
175+
l2Header := rawdb.ReadHeader(executionDB, finalizedBlockHash, finalizedBlockNumber)
176+
if l2Header == nil {
177+
log.Warn("latest finalized L2 block is unknown", "blockNum", finalizedBlockNumber)
191178
} else {
192-
batch, err = read.SequencerBatchCount(consensusDB)
193-
}
194-
if err != nil {
195-
return nil, err
196-
}
197-
for {
198-
if ctx.Err() != nil {
199-
return nil, ctx.Err()
200-
}
201-
if batch == 0 {
202-
// No batch has been finalized
203-
break
204-
}
205-
batch -= 1
206-
var meta mel.BatchMetadata
207-
if melEnabled {
208-
meta, err = read.MELBatchMetadata(consensusDB, batch)
209-
} else {
210-
meta, err = read.BatchMetadata(consensusDB, batch)
211-
}
179+
err = roots.addHeader(l2Header, false)
212180
if err != nil {
213181
return nil, err
214182
}
215-
if meta.ParentChainBlock <= l1BlockNum {
216-
// #nosec G115
217-
signedBlockNum := int64(arbutil.MessageIndexToBlockNumber(meta.MessageCount, genesisNum)) - 1
218-
// #nosec G115
219-
blockNum := uint64(signedBlockNum)
220-
l2Hash := rawdb.ReadCanonicalHash(executionDB, blockNum)
221-
l2Header := rawdb.ReadHeader(executionDB, l2Hash, blockNum)
222-
if l2Header == nil {
223-
log.Warn("latest finalized L2 block is unknown", "blockNum", signedBlockNum)
224-
break
225-
}
226-
err = roots.addHeader(l2Header, false)
227-
if err != nil {
228-
return nil, err
229-
}
230-
break
231-
}
232183
}
233184
}
234185
roots.roots = append(roots.roots, common.Hash{}) // the latest snapshot

execution/gethexec/node.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -612,7 +612,7 @@ func (n *ExecutionNode) SetFinalityData(
612612
finalizedFinalityData *arbutil.FinalityData,
613613
validatedFinalityData *arbutil.FinalityData,
614614
) containers.PromiseInterface[struct{}] {
615-
err := n.SyncMonitor.SetFinalityData(safeFinalityData, finalizedFinalityData, validatedFinalityData)
615+
err := n.SyncMonitor.SetFinalityData(n.ExecutionDB, safeFinalityData, finalizedFinalityData, validatedFinalityData)
616616
if err != nil {
617617
return containers.NewReadyPromise(struct{}{}, err)
618618
}

execution/gethexec/sync_monitor.go

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,16 @@ import (
1212

1313
"github.com/ethereum/go-ethereum/common"
1414
"github.com/ethereum/go-ethereum/core/types"
15+
"github.com/ethereum/go-ethereum/ethdb"
1516
"github.com/ethereum/go-ethereum/log"
1617

1718
"github.com/offchainlabs/nitro/arbutil"
1819
"github.com/offchainlabs/nitro/consensus"
1920
"github.com/offchainlabs/nitro/execution"
2021
)
2122

23+
var ValidatedBlockHashKey = []byte("LastValidatedBlockHashKey")
24+
2225
type syncDataEntry struct {
2326
maxMessageCount arbutil.MessageIndex
2427
timestamp time.Time
@@ -274,6 +277,7 @@ func (s *SyncMonitor) getFinalityBlockHeader(
274277
}
275278

276279
func (s *SyncMonitor) SetFinalityData(
280+
executionDB ethdb.Database,
277281
safeFinalityData *arbutil.FinalityData,
278282
finalizedFinalityData *arbutil.FinalityData,
279283
validatedFinalityData *arbutil.FinalityData,
@@ -288,6 +292,11 @@ func (s *SyncMonitor) SetFinalityData(
288292
}
289293
s.exec.bc.SetFinalized(finalizedBlockHeader)
290294

295+
if executionDB != nil && finalizedBlockHeader != nil {
296+
finalizedBlockHash := finalizedBlockHeader.Hash()
297+
executionDB.Put(ValidatedBlockHashKey, finalizedBlockHash.Bytes())
298+
}
299+
291300
safeBlockHeader, err := s.getFinalityBlockHeader(
292301
s.config.SafeBlockWaitForBlockValidator,
293302
validatedFinalityData,

system_tests/finality_data_test.go

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -174,20 +174,20 @@ func TestFinalityDataWaitForBlockValidator(t *testing.T) {
174174
BlockHash: validatedMsgResult.BlockHash,
175175
}
176176

177-
err = builder.L2.ExecNode.SyncMonitor.SetFinalityData(&safeFinalityData, &finalizedFinalityData, &validatedFinalityData)
177+
err = builder.L2.ExecNode.SyncMonitor.SetFinalityData(nil, &safeFinalityData, &finalizedFinalityData, &validatedFinalityData)
178178
Require(t, err)
179179

180180
// wait for block validator is set to true in second node
181181
checksFinalityData(t, "first node", ctx, builder.L2, validatedMsgIdx, validatedMsgIdx)
182182

183-
err = testClient2ndNode.ExecNode.SyncMonitor.SetFinalityData(&safeFinalityData, &finalizedFinalityData, &validatedFinalityData)
183+
err = testClient2ndNode.ExecNode.SyncMonitor.SetFinalityData(nil, &safeFinalityData, &finalizedFinalityData, &validatedFinalityData)
184184
Require(t, err)
185185

186186
// wait for block validator is no set to true in second node
187187
checksFinalityData(t, "2nd node", ctx, testClient2ndNode, finalizedMsgIdx, safeMsgIdx)
188188

189189
// if validatedFinalityData is nil, error should be returned if waitForBlockValidator is set to true
190-
err = builder.L2.ExecNode.SyncMonitor.SetFinalityData(&safeFinalityData, &finalizedFinalityData, nil)
190+
err = builder.L2.ExecNode.SyncMonitor.SetFinalityData(nil, &safeFinalityData, &finalizedFinalityData, nil)
191191
if err == nil {
192192
t.Fatalf("err should not be nil")
193193
}
@@ -247,7 +247,7 @@ func TestFinalityDataPushedFromConsensusToExecution(t *testing.T) {
247247
ensureSafeBlockDoesNotExist(t, ctx, builder.L2, "first node after generating blocks")
248248

249249
// if nil is passed finality data should not be set
250-
err := builder.L2.ExecNode.SyncMonitor.SetFinalityData(nil, nil, nil)
250+
err := builder.L2.ExecNode.SyncMonitor.SetFinalityData(nil, nil, nil, nil)
251251
Require(t, err)
252252
ensureFinalizedBlockDoesNotExist(t, ctx, builder.L2, "first node after generating blocks and setting finality data to nil")
253253
ensureSafeBlockDoesNotExist(t, ctx, builder.L2, "first node after generating blocks and setting finality data to nil")
@@ -310,7 +310,7 @@ func TestFinalityAfterReorg(t *testing.T) {
310310
BlockHash: finalizedMsgResult.BlockHash,
311311
}
312312

313-
err = builder.L2.ExecNode.SyncMonitor.SetFinalityData(&safeFinalityData, &finalizedFinalityData, nil)
313+
err = builder.L2.ExecNode.SyncMonitor.SetFinalityData(nil, &safeFinalityData, &finalizedFinalityData, nil)
314314
Require(t, err)
315315

316316
checksFinalityData(t, "before reorg", ctx, builder.L2, finalizedFinalityData.MsgIdx, safeFinalityData.MsgIdx)
@@ -360,7 +360,7 @@ func TestSetFinalityBlockHashMismatch(t *testing.T) {
360360
BlockHash: common.Hash{},
361361
}
362362

363-
err := builder.L2.ExecNode.SyncMonitor.SetFinalityData(&safeFinalityData, &finalizedFinalityData, nil)
363+
err := builder.L2.ExecNode.SyncMonitor.SetFinalityData(nil, &safeFinalityData, &finalizedFinalityData, nil)
364364
if err == nil {
365365
t.Fatalf("err should not be nil")
366366
}
@@ -408,13 +408,13 @@ func TestFinalityDataNodeOutOfSync(t *testing.T) {
408408
BlockHash: finalizedMsgResult.BlockHash,
409409
}
410410

411-
err = builder.L2.ExecNode.SyncMonitor.SetFinalityData(&safeFinalityData, &finalizedFinalityData, nil)
411+
err = builder.L2.ExecNode.SyncMonitor.SetFinalityData(nil, &safeFinalityData, &finalizedFinalityData, nil)
412412
Require(t, err)
413413

414414
checksFinalityData(t, "before out of sync", ctx, builder.L2, finalizedFinalityData.MsgIdx, safeFinalityData.MsgIdx)
415415

416416
// out of sync node
417-
err = builder.L2.ExecNode.SyncMonitor.SetFinalityData(nil, nil, nil)
417+
err = builder.L2.ExecNode.SyncMonitor.SetFinalityData(nil, nil, nil, nil)
418418
Require(t, err)
419419

420420
ensureFinalizedBlockDoesNotExist(t, ctx, builder.L2, "out of sync")

system_tests/pruning_test.go

Lines changed: 25 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,9 @@ func testPruning(t *testing.T, mode string, pruneParallelStorageTraversal bool)
4848
// PathScheme prunes the state trie by itself, so only HashScheme should be tested
4949
builder.RequireScheme(t, rawdb.HashScheme)
5050

51+
// Needed to create safe blocks; hence forcing SetFinalityData call
52+
builder.nodeConfig.ParentChainReader.UseFinalityData = true
53+
5154
_ = builder.Build(t)
5255
l2cleanupDone := false
5356
defer func() {
@@ -70,6 +73,14 @@ func testPruning(t *testing.T, mode string, pruneParallelStorageTraversal bool)
7073
}
7174
lastBlock, err := builder.L2.Client.BlockNumber(ctx)
7275
Require(t, err)
76+
77+
// Cache both validated and finalized block hashes for l2 executionDB to later
78+
// add to the new executionDB below
79+
data, err := builder.L2.ExecNode.ExecutionDB.Get(gethexec.ValidatedBlockHashKey)
80+
Require(t, err)
81+
validatedBlockHash := common.BytesToHash(data)
82+
finalizedBlockHash := rawdb.ReadFinalizedBlockHash(builder.L2.ExecNode.ExecutionDB)
83+
7384
l2cleanupDone = true
7485
builder.L2.cleanup()
7586
t.Log("stopped l2 node")
@@ -78,10 +89,16 @@ func testPruning(t *testing.T, mode string, pruneParallelStorageTraversal bool)
7889
stack, err := node.New(builder.l2StackConfig)
7990
Require(t, err)
8091
defer stack.Close()
81-
chainDB, err := stack.OpenDatabaseWithOptions("l2chaindata", node.DatabaseOptions{MetricsNamespace: "l2chaindata/", PebbleExtraOptions: conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")})
92+
executionDB, err := stack.OpenDatabaseWithOptions("l2chaindata", node.DatabaseOptions{MetricsNamespace: "l2chaindata/", PebbleExtraOptions: conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")})
93+
Require(t, err)
94+
defer executionDB.Close()
95+
executionDBEntriesBeforePruning := countStateEntries(executionDB)
96+
97+
// Since we're dealing with a new executionDB we store both validatedBlockHash and
98+
// finalized blocks back into this new executionDB.
99+
err = executionDB.Put(gethexec.ValidatedBlockHashKey, validatedBlockHash.Bytes())
82100
Require(t, err)
83-
defer chainDB.Close()
84-
executionDBEntriesBeforePruning := countStateEntries(chainDB)
101+
rawdb.WriteFinalizedBlockHash(executionDB, finalizedBlockHash)
85102

86103
prand := testhelpers.NewPseudoRandomDataSource(t, 1)
87104
var testKeys [][]byte
@@ -90,11 +107,11 @@ func testPruning(t *testing.T, mode string, pruneParallelStorageTraversal bool)
90107
testKeys = append(testKeys, prand.GetHash().Bytes())
91108
}
92109
for _, key := range testKeys {
93-
err = chainDB.Put(key, common.FromHex("0xdeadbeef"))
110+
err = executionDB.Put(key, common.FromHex("0xdeadbeef"))
94111
Require(t, err)
95112
}
96113
for _, key := range testKeys {
97-
if has, _ := chainDB.Has(key); !has {
114+
if has, _ := executionDB.Has(key); !has {
98115
Fatal(t, "internal test error - failed to check existence of test key")
99116
}
100117
}
@@ -104,16 +121,16 @@ func testPruning(t *testing.T, mode string, pruneParallelStorageTraversal bool)
104121
initConfig.PruneParallelStorageTraversal = pruneParallelStorageTraversal
105122
coreCacheConfig := gethexec.DefaultCacheConfigFor(&builder.execConfig.Caching)
106123
persistentConfig := conf.PersistentConfigDefault
107-
err = pruning.PruneExecutionDB(ctx, chainDB, stack, &initConfig, coreCacheConfig, &persistentConfig, builder.L1.Client, *builder.L2.ConsensusNode.DeployInfo, false, false)
124+
err = pruning.PruneExecutionDB(ctx, executionDB, stack, &initConfig, coreCacheConfig, &persistentConfig, builder.L1.Client, *builder.L2.ConsensusNode.DeployInfo, false, false)
108125
Require(t, err)
109126

110127
for _, key := range testKeys {
111-
if has, _ := chainDB.Has(key); has {
128+
if has, _ := executionDB.Has(key); has {
112129
Fatal(t, "test key hasn't been pruned as expected")
113130
}
114131
}
115132

116-
executionDBEntriesAfterPruning := countStateEntries(chainDB)
133+
executionDBEntriesAfterPruning := countStateEntries(executionDB)
117134
t.Log("db entries pre-pruning:", executionDBEntriesBeforePruning)
118135
t.Log("db entries post-pruning:", executionDBEntriesAfterPruning)
119136

0 commit comments

Comments
 (0)