Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 42 additions & 1 deletion core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,9 @@ var (
errChainStopped = errors.New("blockchain is stopped")
errInvalidOldChain = errors.New("invalid old chain")
errInvalidNewChain = errors.New("invalid new chain")

avgAccessDepthInBlock = metrics.NewRegisteredGauge("trie/access/depth/avg", nil)
minAccessDepthInBlock = metrics.NewRegisteredGauge("trie/access/depth/min", nil)
Comment on lines +116 to +117
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ideally, add every access depth and let the consumer do its own filtering.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we're going to want to separate account trie and storage tries

)

var (
Expand Down Expand Up @@ -331,6 +334,8 @@ type BlockChain struct {
processor Processor // Block transaction processor interface
logger *tracing.Hooks

stateSizeGen *state.StateSizeGenerator // State size tracking

lastForkReadyAlert time.Time // Last time there was a fork readiness print out
}

Expand Down Expand Up @@ -523,6 +528,11 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine,
if bc.cfg.TxLookupLimit >= 0 {
bc.txIndexer = newTxIndexer(uint64(bc.cfg.TxLookupLimit), bc)
}

// Start state size tracker
bc.stateSizeGen = state.NewStateSizeGenerator(bc.statedb.DiskDB(), bc.triedb, head.Root)
log.Info("Started state size generator", "root", head.Root)

return bc, nil
}

Expand Down Expand Up @@ -1249,6 +1259,12 @@ func (bc *BlockChain) stopWithoutSaving() {
// Signal shutdown to all goroutines.
bc.InterruptInsert(true)

// Stop state size generator if running
if bc.stateSizeGen != nil {
bc.stateSizeGen.Stop()
log.Info("Stopped state size generator")
}

// Now wait for all chain modifications to end and persistent goroutines to exit.
//
// Note: Close waits for the mutex to become available, i.e. any running chain
Expand Down Expand Up @@ -1310,6 +1326,7 @@ func (bc *BlockChain) Stop() {
}
}
}

// Allow tracers to clean-up and release resources.
if bc.logger != nil && bc.logger.OnClose != nil {
bc.logger.OnClose()
Expand Down Expand Up @@ -1583,10 +1600,15 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
log.Crit("Failed to write block into disk", "err", err)
}
// Commit all cached state changes into underlying memory database.
root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.chainConfig.IsCancun(block.Number(), block.Time()))
root, stateUpdate, err := statedb.CommitWithUpdate(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.chainConfig.IsCancun(block.Number(), block.Time()))
if err != nil {
return err
}

// Track state size changes if generator is running
if bc.stateSizeGen != nil && stateUpdate != nil {
bc.stateSizeGen.Track(stateUpdate)
}
// If node is running in path mode, skip explicit gc operation
// which is unnecessary in this mode.
if bc.triedb.Scheme() == rawdb.PathScheme {
Expand Down Expand Up @@ -2083,6 +2105,7 @@ func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, s
return nil, fmt.Errorf("stateless self-validation receipt root mismatch (cross: %x local: %x)", crossReceiptRoot, block.ReceiptHash())
}
}

xvtime := time.Since(xvstart)
proctime := time.Since(startTime) // processing + validation + cross validation

Expand Down Expand Up @@ -2118,6 +2141,24 @@ func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, s
if err != nil {
return nil, err
}

// If witness was generated, update metrics regarding the access paths.
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

note for later: this is creating a dependency on the witness creation, so maybe it would be a good idea to create a specific object that is passed to ProcessBlock. But let's see that for when we want to merge it in master.

if witness != nil {
paths := witness.Paths
totaldepth, pathnum, mindepth := 0, 0, -1
if len(paths) > 0 {
for path, _ := range paths {
if len(path) < mindepth || mindepth < 0 {
mindepth = len(path)
}
totaldepth += len(path)
pathnum++
}
avgAccessDepthInBlock.Update(int64(totaldepth) / int64(pathnum))
minAccessDepthInBlock.Update(int64(mindepth))
}
}

// Update the metrics touched during block commit
accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
Expand Down
13 changes: 13 additions & 0 deletions core/rawdb/accessors_metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,3 +187,16 @@ func WriteTransitionStatus(db ethdb.KeyValueWriter, data []byte) {
log.Crit("Failed to store the eth2 transition status", "err", err)
}
}

// WriteStateSizeMetrics writes the state size metrics to the database
func WriteStateSizeMetrics(db ethdb.KeyValueWriter, data []byte) {
if err := db.Put(stateSizeMetricsKey, data); err != nil {
log.Warn("Failed to store state size metrics", "err", err)
}
}

// ReadStateSizeMetrics reads the state size metrics from the database
func ReadStateSizeMetrics(db ethdb.KeyValueReader) []byte {
data, _ := db.Get(stateSizeMetricsKey)
return data
}
3 changes: 3 additions & 0 deletions core/rawdb/schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,9 @@ var (
// snapSyncStatusFlagKey flags that status of snap sync.
snapSyncStatusFlagKey = []byte("SnapSyncStatus")

// stateSizeMetricsKey tracks the state size metrics.
stateSizeMetricsKey = []byte("state-size-metrics")

// Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td (deprecated)
Expand Down
9 changes: 9 additions & 0 deletions core/state/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,10 @@ type Trie interface {
// The returned map could be nil if the witness is empty.
Witness() map[string]struct{}

// WitnessPaths returns a set of paths for all trie nodes. For future reference,
// witness can be deprecated and used as a replacement to witness.
Copy link

@gballet gballet Aug 15, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
// witness can be deprecated and used as a replacement to witness.
// witness can be deprecated and these paths can be used as a replacement to witness.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I disagree with this comment, because the witness will be spec'd by something exterior and this external consumer will not be interested in the paths because of size reasons. But we shall see.

WitnessPaths() map[string]struct{}

// NodeIterator returns an iterator that returns nodes of the trie. Iteration
// starts at the key after the given start key. And error will be returned
// if fails to create node iterator.
Expand Down Expand Up @@ -277,6 +281,11 @@ func (db *CachingDB) TrieDB() *triedb.Database {
return db.triedb
}

// DiskDB returns the underlying disk database for direct access.
func (db *CachingDB) DiskDB() ethdb.KeyValueStore {
return db.disk
}

// PointCache returns the cache of evaluated curve points.
func (db *CachingDB) PointCache() *utils.PointCache {
return db.pointCache
Expand Down
10 changes: 10 additions & 0 deletions core/state/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,4 +29,14 @@ var (
storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil)
accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil)
storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil)

// State size metrics
accountCountGauge = metrics.NewRegisteredGauge("state/account/count", nil)
accountBytesGauge = metrics.NewRegisteredGauge("state/account/bytes", nil)
storageCountGauge = metrics.NewRegisteredGauge("state/storage/count", nil)
storageBytesGauge = metrics.NewRegisteredGauge("state/storage/bytes", nil)
trienodeCountGauge = metrics.NewRegisteredGauge("state/trienode/count", nil)
trienodeBytesGauge = metrics.NewRegisteredGauge("state/trienode/bytes", nil)
contractCountGauge = metrics.NewRegisteredGauge("state/contract/count", nil)
contractBytesGauge = metrics.NewRegisteredGauge("state/contract/bytes", nil)
)
23 changes: 17 additions & 6 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,7 @@ func NewWithReader(root common.Hash, db Database, reader Reader) (*StateDB, erro
if db.TrieDB().IsVerkle() {
sdb.accessEvents = NewAccessEvents(db.PointCache())
}

return sdb, nil
}

Expand Down Expand Up @@ -841,7 +842,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// If witness building is enabled and the state object has a trie,
// gather the witnesses for its specific storage trie
if s.witness != nil && obj.trie != nil {
s.witness.AddState(obj.trie.Witness())
s.witness.AddState(obj.trie.Witness(), obj.trie.WitnessPaths())
}
}
return nil
Expand All @@ -858,9 +859,9 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
continue
}
if trie := obj.getPrefetchedTrie(); trie != nil {
s.witness.AddState(trie.Witness())
s.witness.AddState(trie.Witness(), trie.WitnessPaths())
} else if obj.trie != nil {
s.witness.AddState(obj.trie.Witness())
s.witness.AddState(obj.trie.Witness(), obj.trie.WitnessPaths())
}
}
// Pull in only-read and non-destructed trie witnesses
Expand All @@ -874,9 +875,9 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
continue
}
if trie := obj.getPrefetchedTrie(); trie != nil {
s.witness.AddState(trie.Witness())
s.witness.AddState(trie.Witness(), trie.WitnessPaths())
} else if obj.trie != nil {
s.witness.AddState(obj.trie.Witness())
s.witness.AddState(obj.trie.Witness(), obj.trie.WitnessPaths())
}
}
}
Expand Down Expand Up @@ -942,7 +943,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {

// If witness building is enabled, gather the account trie witness
if s.witness != nil {
s.witness.AddState(s.trie.Witness())
s.witness.AddState(s.trie.Witness(), nil)
}
return hash
}
Expand Down Expand Up @@ -1356,6 +1357,16 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, noStorageWiping
return ret.root, nil
}

// CommitWithUpdate writes the state mutations and returns both the root hash and the state update.
// This is useful for tracking state changes at the blockchain level.
func (s *StateDB) CommitWithUpdate(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, *stateUpdate, error) {
ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping)
if err != nil {
return common.Hash{}, nil, err
}
return ret.root, ret, nil
}

// Prepare handles the preparatory steps for executing a state transition with.
// This method must be invoked before state transition.
//
Expand Down
Loading