diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go
index 9868142f53d..71ff821bb9b 100644
--- a/cmd/geth/chaincmd.go
+++ b/cmd/geth/chaincmd.go
@@ -108,6 +108,7 @@ if one is set. Otherwise it prints the genesis from the datadir.`,
utils.MetricsInfluxDBTokenFlag,
utils.MetricsInfluxDBBucketFlag,
utils.MetricsInfluxDBOrganizationFlag,
+ utils.StateSizeTrackingFlag,
utils.TxLookupLimitFlag,
utils.VMTraceFlag,
utils.VMTraceJsonConfigFlag,
diff --git a/cmd/geth/config.go b/cmd/geth/config.go
index 96bd715e889..33a3eadea84 100644
--- a/cmd/geth/config.go
+++ b/cmd/geth/config.go
@@ -209,7 +209,7 @@ func constructDevModeBanner(ctx *cli.Context, cfg gethConfig) string {
0x%x (10^49 ETH)
`, cfg.Eth.Miner.PendingFeeRecipient)
if cfg.Eth.Miner.PendingFeeRecipient == utils.DeveloperAddr {
- devModeBanner += fmt.Sprintf(`
+ devModeBanner += fmt.Sprintf(`
Private Key
------------------
0x%x
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 2da5c432161..750bf55927c 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -200,6 +200,7 @@ var (
utils.MetricsInfluxDBTokenFlag,
utils.MetricsInfluxDBBucketFlag,
utils.MetricsInfluxDBOrganizationFlag,
+ utils.StateSizeTrackingFlag,
}
)
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index bfc1ff0983d..aca3ce91961 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -270,6 +270,12 @@ var (
Usage: "Scheme to use for storing ethereum state ('hash' or 'path')",
Category: flags.StateCategory,
}
+ StateSizeTrackingFlag = &cli.BoolFlag{
+ Name: "state.size-tracking",
+ Usage: "Enable state size tracking, retrieve state size with debug_stateSize.",
+ Value: ethconfig.Defaults.EnableStateSizeTracking,
+ Category: flags.StateCategory,
+ }
StateHistoryFlag = &cli.Uint64Flag{
Name: "history.state",
Usage: "Number of recent blocks to retain state history for, only relevant in state.scheme=path (default = 90,000 blocks, 0 = entire chain)",
@@ -1726,6 +1732,11 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
cfg.EthDiscoveryURLs = SplitAndTrim(urls)
}
}
+
+ if ctx.Bool(StateSizeTrackingFlag.Name) {
+ log.Info("Enabling state size metrics")
+ cfg.EnableStateSizeTracking = true
+ }
// Override any default configs for hard coded networks.
switch {
case ctx.Bool(MainnetFlag.Name):
@@ -2208,6 +2219,9 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
// - DATADIR/triedb/merkle.journal
// - DATADIR/triedb/verkle.journal
TrieJournalDirectory: stack.ResolvePath("triedb"),
+
+ // Enable state size tracking if enabled
+ StateSizeTracking: ctx.Bool(StateSizeTrackingFlag.Name),
}
if options.ArchiveMode && !options.Preimages {
options.Preimages = true
diff --git a/core/blockchain.go b/core/blockchain.go
index dc6e7e90401..7b93a7606e3 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -196,6 +196,9 @@ type BlockChainConfig struct {
// If the value is zero, all transactions of the entire chain will be indexed.
// If the value is -1, indexing is disabled.
TxLookupLimit int64
+
+ // StateSizeTracking indicates whether the state size tracking is enabled.
+ StateSizeTracking bool
}
// DefaultConfig returns the default config.
@@ -333,6 +336,7 @@ type BlockChain struct {
prefetcher Prefetcher
processor Processor // Block transaction processor interface
logger *tracing.Hooks
+ stateSizer *state.SizeTracker // State size tracking
lastForkReadyAlert time.Time // Last time there was a fork readiness print out
}
@@ -526,6 +530,17 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine,
if bc.cfg.TxLookupLimit >= 0 {
bc.txIndexer = newTxIndexer(uint64(bc.cfg.TxLookupLimit), bc)
}
+
+ // Start state size tracker
+ if bc.cfg.StateSizeTracking {
+ stateSizer, err := state.NewSizeTracker(bc.db, bc.triedb)
+ if err == nil {
+ bc.stateSizer = stateSizer
+ log.Info("Initialized state sizer")
+ } else {
+ log.Info("Failed to setup size tracker", "err", err)
+ }
+ }
return bc, nil
}
@@ -1252,6 +1267,10 @@ func (bc *BlockChain) stopWithoutSaving() {
// Signal shutdown to all goroutines.
bc.InterruptInsert(true)
+ // Stop state size tracker
+ if bc.stateSizer != nil {
+ bc.stateSizer.Stop()
+ }
// Now wait for all chain modifications to end and persistent goroutines to exit.
//
// Note: Close waits for the mutex to become available, i.e. any running chain
@@ -1313,6 +1332,7 @@ func (bc *BlockChain) Stop() {
}
}
}
+
// Allow tracers to clean-up and release resources.
if bc.logger != nil && bc.logger.OnClose != nil {
bc.logger.OnClose()
@@ -1586,10 +1606,14 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
log.Crit("Failed to write block into disk", "err", err)
}
// Commit all cached state changes into underlying memory database.
- root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.chainConfig.IsCancun(block.Number(), block.Time()))
+ root, stateUpdate, err := statedb.CommitWithUpdate(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.chainConfig.IsCancun(block.Number(), block.Time()))
if err != nil {
return err
}
+ // Emit the state update to the state sizestats if it's active
+ if bc.stateSizer != nil {
+ bc.stateSizer.Notify(stateUpdate)
+ }
// If node is running in path mode, skip explicit gc operation
// which is unnecessary in this mode.
if bc.triedb.Scheme() == rawdb.PathScheme {
@@ -2791,3 +2815,8 @@ func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) {
func (bc *BlockChain) GetTrieFlushInterval() time.Duration {
return time.Duration(bc.flushInterval.Load())
}
+
+// StateSizer returns the state size tracker, or nil if it's not initialized
+func (bc *BlockChain) StateSizer() *state.SizeTracker {
+ return bc.stateSizer
+}
diff --git a/core/state/database.go b/core/state/database.go
index 3a0ac422ee4..052e2acab93 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -285,6 +285,11 @@ func (db *CachingDB) TrieDB() *triedb.Database {
return db.triedb
}
+// DiskDB returns the underlying disk database for direct access.
+func (db *CachingDB) DiskDB() ethdb.KeyValueStore {
+ return db.disk
+}
+
// PointCache returns the cache of evaluated curve points.
func (db *CachingDB) PointCache() *utils.PointCache {
return db.pointCache
diff --git a/core/state/state_sizer.go b/core/state/state_sizer.go
new file mode 100644
index 00000000000..2066c94845d
--- /dev/null
+++ b/core/state/state_sizer.go
@@ -0,0 +1,638 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package state
+
+import (
+ "container/heap"
+ "errors"
+ "fmt"
+ "maps"
+ "runtime"
+ "slices"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/triedb"
+ "golang.org/x/sync/errgroup"
+)
+
+const (
+ statEvictThreshold = 128 // the depth of statistic to be preserved
+)
+
+// Database key scheme for states.
+var (
+ accountKeySize = int64(len(rawdb.SnapshotAccountPrefix) + common.HashLength)
+ storageKeySize = int64(len(rawdb.SnapshotStoragePrefix) + common.HashLength*2)
+ accountTrienodePrefixSize = int64(len(rawdb.TrieNodeAccountPrefix))
+ storageTrienodePrefixSize = int64(len(rawdb.TrieNodeStoragePrefix) + common.HashLength)
+ codeKeySize = int64(len(rawdb.CodePrefix) + common.HashLength)
+)
+
+// SizeStats represents either the current state size statistics or the size
+// differences resulting from a state transition.
+type SizeStats struct {
+ StateRoot common.Hash // State root hash at the time of measurement
+ BlockNumber uint64 // Associated block number at the time of measurement
+
+ Accounts int64 // Total number of accounts in the state
+ AccountBytes int64 // Total storage size used by all account data (in bytes)
+ Storages int64 // Total number of storage slots across all accounts
+ StorageBytes int64 // Total storage size used by all storage slot data (in bytes)
+ AccountTrienodes int64 // Total number of account trie nodes in the state
+ AccountTrienodeBytes int64 // Total storage size occupied by account trie nodes (in bytes)
+ StorageTrienodes int64 // Total number of storage trie nodes in the state
+ StorageTrienodeBytes int64 // Total storage size occupied by storage trie nodes (in bytes)
+ ContractCodes int64 // Total number of contract codes in the state
+ ContractCodeBytes int64 // Total size of all contract code (in bytes)
+}
+
+func (s SizeStats) String() string {
+ return fmt.Sprintf("Accounts: %d(%s), Storages: %d(%s), AccountTrienodes: %d(%s), StorageTrienodes: %d(%s), Codes: %d(%s)",
+ s.Accounts, common.StorageSize(s.AccountBytes),
+ s.Storages, common.StorageSize(s.StorageBytes),
+ s.AccountTrienodes, common.StorageSize(s.AccountTrienodeBytes),
+ s.StorageTrienodes, common.StorageSize(s.StorageTrienodeBytes),
+ s.ContractCodes, common.StorageSize(s.ContractCodeBytes),
+ )
+}
+
+// add applies the given state diffs and produces a new version of the statistics.
+func (s SizeStats) add(diff SizeStats) SizeStats {
+ s.StateRoot = diff.StateRoot
+ s.BlockNumber = diff.BlockNumber
+
+ s.Accounts += diff.Accounts
+ s.AccountBytes += diff.AccountBytes
+ s.Storages += diff.Storages
+ s.StorageBytes += diff.StorageBytes
+ s.AccountTrienodes += diff.AccountTrienodes
+ s.AccountTrienodeBytes += diff.AccountTrienodeBytes
+ s.StorageTrienodes += diff.StorageTrienodes
+ s.StorageTrienodeBytes += diff.StorageTrienodeBytes
+ s.ContractCodes += diff.ContractCodes
+ s.ContractCodeBytes += diff.ContractCodeBytes
+ return s
+}
+
+// calSizeStats measures the state size changes of the provided state update.
+func calSizeStats(update *stateUpdate) (SizeStats, error) {
+ stats := SizeStats{
+ BlockNumber: update.blockNumber,
+ StateRoot: update.root,
+ }
+
+ // Measure the account changes
+ for addr, oldValue := range update.accountsOrigin {
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
+ newValue, exists := update.accounts[addrHash]
+ if !exists {
+ return SizeStats{}, fmt.Errorf("account %x not found", addr)
+ }
+ oldLen, newLen := len(oldValue), len(newValue)
+
+ switch {
+ case oldLen > 0 && newLen == 0:
+ // Account deletion
+ stats.Accounts -= 1
+ stats.AccountBytes -= accountKeySize + int64(oldLen)
+ case oldLen == 0 && newLen > 0:
+ // Account creation
+ stats.Accounts += 1
+ stats.AccountBytes += accountKeySize + int64(newLen)
+ default:
+ // Account update
+ stats.AccountBytes += int64(newLen - oldLen)
+ }
+ }
+
+ // Measure storage changes
+ for addr, slots := range update.storagesOrigin {
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
+ subset, exists := update.storages[addrHash]
+ if !exists {
+ return SizeStats{}, fmt.Errorf("storage %x not found", addr)
+ }
+ for key, oldValue := range slots {
+ var (
+ exists bool
+ newValue []byte
+ )
+ if update.rawStorageKey {
+ newValue, exists = subset[crypto.Keccak256Hash(key.Bytes())]
+ } else {
+ newValue, exists = subset[key]
+ }
+ if !exists {
+ return SizeStats{}, fmt.Errorf("storage slot %x-%x not found", addr, key)
+ }
+ oldLen, newLen := len(oldValue), len(newValue)
+
+ switch {
+ case oldLen > 0 && newLen == 0:
+ // Storage deletion
+ stats.Storages -= 1
+ stats.StorageBytes -= storageKeySize + int64(oldLen)
+ case oldLen == 0 && newLen > 0:
+ // Storage creation
+ stats.Storages += 1
+ stats.StorageBytes += storageKeySize + int64(newLen)
+ default:
+ // Storage update
+ stats.StorageBytes += int64(newLen - oldLen)
+ }
+ }
+ }
+
+ // Measure trienode changes
+ for owner, subset := range update.nodes.Sets {
+ var (
+ keyPrefix int64
+ isAccount = owner == (common.Hash{})
+ )
+ if isAccount {
+ keyPrefix = accountTrienodePrefixSize
+ } else {
+ keyPrefix = storageTrienodePrefixSize
+ }
+
+ // Iterate over Origins since every modified node has an origin entry
+ for path, oldNode := range subset.Origins {
+ newNode, exists := subset.Nodes[path]
+ if !exists {
+ return SizeStats{}, fmt.Errorf("node %x-%v not found", owner, path)
+ }
+ keySize := keyPrefix + int64(len(path))
+
+ switch {
+ case len(oldNode) > 0 && len(newNode.Blob) == 0:
+ // Node deletion
+ if isAccount {
+ stats.AccountTrienodes -= 1
+ stats.AccountTrienodeBytes -= keySize + int64(len(oldNode))
+ } else {
+ stats.StorageTrienodes -= 1
+ stats.StorageTrienodeBytes -= keySize + int64(len(oldNode))
+ }
+ case len(oldNode) == 0 && len(newNode.Blob) > 0:
+ // Node creation
+ if isAccount {
+ stats.AccountTrienodes += 1
+ stats.AccountTrienodeBytes += keySize + int64(len(newNode.Blob))
+ } else {
+ stats.StorageTrienodes += 1
+ stats.StorageTrienodeBytes += keySize + int64(len(newNode.Blob))
+ }
+ default:
+ // Node update
+ if isAccount {
+ stats.AccountTrienodeBytes += int64(len(newNode.Blob) - len(oldNode))
+ } else {
+ stats.StorageTrienodeBytes += int64(len(newNode.Blob) - len(oldNode))
+ }
+ }
+ }
+ }
+
+ // Measure code changes. Note that the reported contract code size may be slightly
+ // inaccurate due to database deduplication (code is stored by its hash). However,
+ // this deviation is negligible and acceptable for measurement purposes.
+ for _, code := range update.codes {
+ stats.ContractCodes += 1
+ stats.ContractCodeBytes += codeKeySize + int64(len(code.blob))
+ }
+ return stats, nil
+}
+
+type stateSizeQuery struct {
+ root *common.Hash // nil means latest
+ err error // non-nil if the state size is not yet initialized
+ result chan *SizeStats // nil means the state is unknown
+}
+
+// SizeTracker handles the state size initialization and tracks of state size metrics.
+type SizeTracker struct {
+ db ethdb.KeyValueStore
+ triedb *triedb.Database
+ abort chan struct{}
+ aborted chan struct{}
+ updateCh chan *stateUpdate
+ queryCh chan *stateSizeQuery
+}
+
+// NewSizeTracker creates a new state size tracker and starts it automatically
+func NewSizeTracker(db ethdb.KeyValueStore, triedb *triedb.Database) (*SizeTracker, error) {
+ if triedb.Scheme() != rawdb.PathScheme {
+ return nil, errors.New("state size tracker is not compatible with hash mode")
+ }
+ t := &SizeTracker{
+ db: db,
+ triedb: triedb,
+ abort: make(chan struct{}),
+ aborted: make(chan struct{}),
+ updateCh: make(chan *stateUpdate),
+ queryCh: make(chan *stateSizeQuery),
+ }
+ go t.run()
+ return t, nil
+}
+
+func (t *SizeTracker) Stop() {
+ close(t.abort)
+ <-t.aborted
+}
+
+// sizeStatsHeap is a heap.Interface implementation over statesize statistics for
+// retrieving the oldest statistics for eviction.
+type sizeStatsHeap []SizeStats
+
+func (h sizeStatsHeap) Len() int { return len(h) }
+func (h sizeStatsHeap) Less(i, j int) bool { return h[i].BlockNumber < h[j].BlockNumber }
+func (h sizeStatsHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
+
+func (h *sizeStatsHeap) Push(x any) {
+ *h = append(*h, x.(SizeStats))
+}
+
+func (h *sizeStatsHeap) Pop() any {
+ old := *h
+ n := len(old)
+ x := old[n-1]
+ *h = old[0 : n-1]
+ return x
+}
+
+// run performs the state size initialization and handles updates
+func (t *SizeTracker) run() {
+ defer close(t.aborted)
+
+ var last common.Hash
+ stats, err := t.init() // launch background thread for state size init
+ if err != nil {
+ return
+ }
+ h := sizeStatsHeap(slices.Collect(maps.Values(stats)))
+ heap.Init(&h)
+
+ for {
+ select {
+ case u := <-t.updateCh:
+ base, found := stats[u.originRoot]
+ if !found {
+ log.Debug("Ignored the state size without parent", "parent", u.originRoot, "root", u.root, "number", u.blockNumber)
+ continue
+ }
+ diff, err := calSizeStats(u)
+ if err != nil {
+ continue
+ }
+ stat := base.add(diff)
+ stats[u.root] = stat
+ last = u.root
+
+ heap.Push(&h, stats[u.root])
+ for u.blockNumber-h[0].BlockNumber > statEvictThreshold {
+ delete(stats, h[0].StateRoot)
+ heap.Pop(&h)
+ }
+ log.Debug("Update state size", "number", stat.BlockNumber, "root", stat.StateRoot, "stat", stat)
+
+ case r := <-t.queryCh:
+ var root common.Hash
+ if r.root != nil {
+ root = *r.root
+ } else {
+ root = last
+ }
+ if s, ok := stats[root]; ok {
+ r.result <- &s
+ } else {
+ r.result <- nil
+ }
+
+ case <-t.abort:
+ return
+ }
+ }
+}
+
+type buildResult struct {
+ stat SizeStats
+ root common.Hash
+ blockNumber uint64
+ elapsed time.Duration
+ err error
+}
+
+func (t *SizeTracker) init() (map[common.Hash]SizeStats, error) {
+ // Wait for snapshot completion and then init
+ ticker := time.NewTicker(10 * time.Second)
+ defer ticker.Stop()
+
+wait:
+ for {
+ select {
+ case <-ticker.C:
+ if t.triedb.SnapshotCompleted() {
+ break wait
+ }
+ case <-t.updateCh:
+ continue
+ case r := <-t.queryCh:
+ r.err = errors.New("state size is not initialized yet")
+ r.result <- nil
+ case <-t.abort:
+ return nil, errors.New("size tracker closed")
+ }
+ }
+
+ var (
+ updates = make(map[common.Hash]*stateUpdate)
+ children = make(map[common.Hash][]common.Hash)
+ done chan buildResult
+ )
+
+ for {
+ select {
+ case u := <-t.updateCh:
+ updates[u.root] = u
+ children[u.originRoot] = append(children[u.originRoot], u.root)
+ log.Debug("Received state update", "root", u.root, "blockNumber", u.blockNumber)
+
+ case r := <-t.queryCh:
+ r.err = errors.New("state size is not initialized yet")
+ r.result <- nil
+
+ case <-ticker.C:
+ // Only check timer if build hasn't started yet
+ if done != nil {
+ continue
+ }
+ root := rawdb.ReadSnapshotRoot(t.db)
+ if root == (common.Hash{}) {
+ continue
+ }
+ entry, exists := updates[root]
+ if !exists {
+ continue
+ }
+ done = make(chan buildResult)
+ go t.build(entry.root, entry.blockNumber, done)
+ log.Info("Measuring persistent state size", "root", root.Hex(), "number", entry.blockNumber)
+
+ case result := <-done:
+ if result.err != nil {
+ return nil, result.err
+ }
+ var (
+ stats = make(map[common.Hash]SizeStats)
+ apply func(root common.Hash, stat SizeStats) error
+ )
+ apply = func(root common.Hash, base SizeStats) error {
+ for _, child := range children[root] {
+ entry, ok := updates[child]
+ if !ok {
+ return fmt.Errorf("the state update is not found, %x", child)
+ }
+ diff, err := calSizeStats(entry)
+ if err != nil {
+ return err
+ }
+ stats[child] = base.add(diff)
+ if err := apply(child, stats[child]); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ if err := apply(result.root, result.stat); err != nil {
+ return nil, err
+ }
+
+ // Set initial latest stats
+ stats[result.root] = result.stat
+ log.Info("Measured persistent state size", "root", result.root, "number", result.blockNumber, "stat", result.stat, "elapsed", common.PrettyDuration(result.elapsed))
+ return stats, nil
+
+ case <-t.abort:
+ return nil, errors.New("size tracker closed")
+ }
+ }
+}
+
+func (t *SizeTracker) build(root common.Hash, blockNumber uint64, done chan buildResult) {
+ // Metrics will be directly updated by each goroutine
+ var (
+ accounts, accountBytes int64
+ storages, storageBytes int64
+ codes, codeBytes int64
+
+ accountTrienodes, accountTrienodeBytes int64
+ storageTrienodes, storageTrienodeBytes int64
+
+ group errgroup.Group
+ start = time.Now()
+ )
+
+ // Start all table iterations concurrently with direct metric updates
+ group.Go(func() error {
+ count, bytes, err := t.iterateTableParallel(t.abort, rawdb.SnapshotAccountPrefix, "account")
+ if err != nil {
+ return err
+ }
+ accounts, accountBytes = count, bytes
+ return nil
+ })
+
+ group.Go(func() error {
+ count, bytes, err := t.iterateTableParallel(t.abort, rawdb.SnapshotStoragePrefix, "storage")
+ if err != nil {
+ return err
+ }
+ storages, storageBytes = count, bytes
+ return nil
+ })
+
+ group.Go(func() error {
+ count, bytes, err := t.iterateTableParallel(t.abort, rawdb.TrieNodeAccountPrefix, "accountnode")
+ if err != nil {
+ return err
+ }
+ accountTrienodes, accountTrienodeBytes = count, bytes
+ return nil
+ })
+
+ group.Go(func() error {
+ count, bytes, err := t.iterateTableParallel(t.abort, rawdb.TrieNodeStoragePrefix, "storagenode")
+ if err != nil {
+ return err
+ }
+ storageTrienodes, storageTrienodeBytes = count, bytes
+ return nil
+ })
+
+ group.Go(func() error {
+ count, bytes, err := t.iterateTable(t.abort, rawdb.CodePrefix, "contractcode")
+ if err != nil {
+ return err
+ }
+ codes, codeBytes = count, bytes
+ return nil
+ })
+
+ // Wait for all goroutines to complete
+ if err := group.Wait(); err != nil {
+ done <- buildResult{err: err}
+ } else {
+ stat := SizeStats{
+ StateRoot: root,
+ BlockNumber: blockNumber,
+ Accounts: accounts,
+ AccountBytes: accountBytes,
+ Storages: storages,
+ StorageBytes: storageBytes,
+ AccountTrienodes: accountTrienodes,
+ AccountTrienodeBytes: accountTrienodeBytes,
+ StorageTrienodes: storageTrienodes,
+ StorageTrienodeBytes: storageTrienodeBytes,
+ ContractCodes: codes,
+ ContractCodeBytes: codeBytes,
+ }
+ done <- buildResult{
+ root: root,
+ blockNumber: blockNumber,
+ stat: stat,
+ elapsed: time.Since(start),
+ }
+ }
+}
+
+// iterateTable performs iteration over a specific table and returns the results.
+func (t *SizeTracker) iterateTable(closed chan struct{}, prefix []byte, name string) (int64, int64, error) {
+ var (
+ start = time.Now()
+ logged = time.Now()
+ count, bytes int64
+ )
+
+ iter := t.db.NewIterator(prefix, nil)
+ defer iter.Release()
+
+ log.Debug("Iterating state", "category", name)
+ for iter.Next() {
+ count++
+ bytes += int64(len(iter.Key()) + len(iter.Value()))
+
+ if time.Since(logged) > time.Second*8 {
+ logged = time.Now()
+
+ select {
+ case <-closed:
+ log.Debug("State iteration cancelled", "category", name)
+ return 0, 0, errors.New("size tracker closed")
+ default:
+ log.Debug("Iterating state", "category", name, "count", count, "size", common.StorageSize(bytes))
+ }
+ }
+ }
+ // Check for iterator errors
+ if err := iter.Error(); err != nil {
+ log.Error("Iterator error", "category", name, "err", err)
+ return 0, 0, err
+ }
+ log.Debug("Finished state iteration", "category", name, "count", count, "size", common.StorageSize(bytes), "elapsed", common.PrettyDuration(time.Since(start)))
+ return count, bytes, nil
+}
+
+// iterateTableParallel performs parallel iteration over a table by splitting into
+// hex ranges. For storage tables, it splits on the first byte of the account hash
+// (after the prefix).
+func (t *SizeTracker) iterateTableParallel(closed chan struct{}, prefix []byte, name string) (int64, int64, error) {
+ var (
+ totalCount int64
+ totalBytes int64
+
+ start = time.Now()
+ workers = runtime.NumCPU()
+ group errgroup.Group
+ mu sync.Mutex
+ )
+ group.SetLimit(workers)
+ log.Debug("Starting parallel state iteration", "category", name, "workers", workers)
+
+ if len(prefix) > 0 {
+ if blob, err := t.db.Get(prefix); err == nil && len(blob) > 0 {
+ // If there's a direct hit on the prefix, include it in the stats
+ totalCount = 1
+ totalBytes = int64(len(prefix) + len(blob))
+ }
+ }
+ for i := 0; i < 256; i++ {
+ h := byte(i)
+ group.Go(func() error {
+ count, bytes, err := t.iterateTable(closed, slices.Concat(prefix, []byte{h}), fmt.Sprintf("%s-%02x", name, h))
+ if err != nil {
+ return err
+ }
+ mu.Lock()
+ totalCount += count
+ totalBytes += bytes
+ mu.Unlock()
+ return nil
+ })
+ }
+ if err := group.Wait(); err != nil {
+ return 0, 0, err
+ }
+ log.Debug("Finished parallel state iteration", "category", name, "count", totalCount, "size", common.StorageSize(totalBytes), "elapsed", common.PrettyDuration(time.Since(start)))
+ return totalCount, totalBytes, nil
+}
+
+// Notify is an async method used to send the state update to the size tracker.
+// It ignores empty updates (where no state changes occurred).
+// If the channel is full, it drops the update to avoid blocking.
+func (t *SizeTracker) Notify(update *stateUpdate) {
+ if update == nil || update.empty() {
+ return
+ }
+ select {
+ case t.updateCh <- update:
+ case <-t.abort:
+ return
+ }
+}
+
+// Query returns the state size specified by the root, or nil if not available.
+// If the root is nil, query the size of latest chain head;
+// If the root is non-nil, query the size of the specified state;
+func (t *SizeTracker) Query(root *common.Hash) (*SizeStats, error) {
+ r := &stateSizeQuery{
+ root: root,
+ result: make(chan *SizeStats, 1),
+ }
+ select {
+ case <-t.aborted:
+ return nil, errors.New("state sizer has been closed")
+ case t.queryCh <- r:
+ return <-r.result, r.err
+ }
+}
diff --git a/core/state/state_sizer_test.go b/core/state/state_sizer_test.go
new file mode 100644
index 00000000000..756744d1664
--- /dev/null
+++ b/core/state/state_sizer_test.go
@@ -0,0 +1,261 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package state
+
+import (
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/tracing"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb/pebble"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
+ "github.com/holiman/uint256"
+)
+
+func TestSizeTracker(t *testing.T) {
+ tempDir := t.TempDir()
+
+ pdb, err := pebble.New(tempDir, 0, 0, "", false)
+ if err != nil {
+ t.Fatalf("Failed to create pebble database: %v", err)
+ }
+ defer pdb.Close()
+
+ db := rawdb.NewDatabase(pdb)
+ defer db.Close()
+
+ tdb := triedb.NewDatabase(db, &triedb.Config{PathDB: pathdb.Defaults})
+ sdb := NewDatabase(tdb, nil)
+
+ // Generate 50 blocks to establish a baseline
+ baselineBlockNum := uint64(50)
+ currentRoot := types.EmptyRootHash
+
+ addr1 := common.BytesToAddress([]byte{1, 0, 0, 1})
+ addr2 := common.BytesToAddress([]byte{1, 0, 0, 2})
+ addr3 := common.BytesToAddress([]byte{1, 0, 0, 3})
+
+ // Create initial state with fixed accounts
+ state, _ := New(currentRoot, sdb)
+ state.AddBalance(addr1, uint256.NewInt(1000), tracing.BalanceChangeUnspecified)
+ state.SetNonce(addr1, 1, tracing.NonceChangeUnspecified)
+ state.SetState(addr1, common.HexToHash("0x1111"), common.HexToHash("0xaaaa"))
+ state.SetState(addr1, common.HexToHash("0x2222"), common.HexToHash("0xbbbb"))
+
+ state.AddBalance(addr2, uint256.NewInt(2000), tracing.BalanceChangeUnspecified)
+ state.SetNonce(addr2, 2, tracing.NonceChangeUnspecified)
+ state.SetCode(addr2, []byte{0x60, 0x80, 0x60, 0x40, 0x52}, tracing.CodeChangeUnspecified)
+
+ state.AddBalance(addr3, uint256.NewInt(3000), tracing.BalanceChangeUnspecified)
+ state.SetNonce(addr3, 3, tracing.NonceChangeUnspecified)
+
+ currentRoot, _, err = state.CommitWithUpdate(1, true, false)
+ if err != nil {
+ t.Fatalf("Failed to commit initial state: %v", err)
+ }
+ if err := tdb.Commit(currentRoot, false); err != nil {
+ t.Fatalf("Failed to commit initial trie: %v", err)
+ }
+
+ for i := 1; i < 50; i++ { // blocks 2-50
+ blockNum := uint64(i + 1)
+
+ newState, err := New(currentRoot, sdb)
+ if err != nil {
+ t.Fatalf("Failed to create new state at block %d: %v", blockNum, err)
+ }
+
+ testAddr := common.BigToAddress(uint256.NewInt(uint64(i + 100)).ToBig())
+ newState.AddBalance(testAddr, uint256.NewInt(uint64((i+1)*1000)), tracing.BalanceChangeUnspecified)
+ newState.SetNonce(testAddr, uint64(i+10), tracing.NonceChangeUnspecified)
+
+ if i%2 == 0 {
+ newState.SetState(addr1, common.BigToHash(uint256.NewInt(uint64(i+0x1000)).ToBig()), common.BigToHash(uint256.NewInt(uint64(i+0x2000)).ToBig()))
+ }
+
+ if i%3 == 0 {
+ newState.SetCode(testAddr, []byte{byte(i), 0x60, 0x80, byte(i + 1), 0x52}, tracing.CodeChangeUnspecified)
+ }
+
+ root, _, err := newState.CommitWithUpdate(blockNum, true, false)
+ if err != nil {
+ t.Fatalf("Failed to commit state at block %d: %v", blockNum, err)
+ }
+ if err := tdb.Commit(root, false); err != nil {
+ t.Fatalf("Failed to commit trie at block %d: %v", blockNum, err)
+ }
+
+ currentRoot = root
+ }
+
+ baselineRoot := currentRoot
+
+ // Wait for snapshot completion
+ for !tdb.SnapshotCompleted() {
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ // Calculate baseline from the intermediate persisted state
+ baselineTracker := &SizeTracker{
+ db: db,
+ triedb: tdb,
+ abort: make(chan struct{}),
+ }
+
+ done := make(chan buildResult)
+ go baselineTracker.build(baselineRoot, baselineBlockNum, done)
+ var baselineResult buildResult
+ select {
+ case baselineResult = <-done:
+ if baselineResult.err != nil {
+ t.Fatalf("Failed to get baseline stats: %v", baselineResult.err)
+ }
+ case <-time.After(30 * time.Second):
+ t.Fatal("Timeout waiting for baseline stats")
+ }
+ baseline := baselineResult.stat
+
+ // Now start the tracker and notify it of updates that happen AFTER the baseline
+ tracker, err := NewSizeTracker(db, tdb)
+ if err != nil {
+ t.Fatalf("Failed to create size tracker: %v", err)
+ }
+ defer tracker.Stop()
+
+ var trackedUpdates []SizeStats
+ currentRoot = baselineRoot
+
+ // Generate additional blocks beyond the baseline and track them
+ for i := 49; i < 130; i++ { // blocks 51-132
+ blockNum := uint64(i + 2)
+ newState, err := New(currentRoot, sdb)
+ if err != nil {
+ t.Fatalf("Failed to create new state at block %d: %v", blockNum, err)
+ }
+
+ testAddr := common.BigToAddress(uint256.NewInt(uint64(i + 100)).ToBig())
+ newState.AddBalance(testAddr, uint256.NewInt(uint64((i+1)*1000)), tracing.BalanceChangeUnspecified)
+ newState.SetNonce(testAddr, uint64(i+10), tracing.NonceChangeUnspecified)
+
+ if i%2 == 0 {
+ newState.SetState(addr1, common.BigToHash(uint256.NewInt(uint64(i+0x1000)).ToBig()), common.BigToHash(uint256.NewInt(uint64(i+0x2000)).ToBig()))
+ }
+
+ if i%3 == 0 {
+ newState.SetCode(testAddr, []byte{byte(i), 0x60, 0x80, byte(i + 1), 0x52}, tracing.CodeChangeUnspecified)
+ }
+
+ root, update, err := newState.CommitWithUpdate(blockNum, true, false)
+ if err != nil {
+ t.Fatalf("Failed to commit state at block %d: %v", blockNum, err)
+ }
+ if err := tdb.Commit(root, false); err != nil {
+ t.Fatalf("Failed to commit trie at block %d: %v", blockNum, err)
+ }
+
+ diff, err := calSizeStats(update)
+ if err != nil {
+ t.Fatalf("Failed to calculate size stats for block %d: %v", blockNum, err)
+ }
+ trackedUpdates = append(trackedUpdates, diff)
+ tracker.Notify(update)
+ currentRoot = root
+ }
+
+ if len(trackedUpdates) != 130-49 {
+ t.Errorf("Expected %d tracked updates, got %d", 130-49, len(trackedUpdates))
+ }
+
+ finalRoot := rawdb.ReadSnapshotRoot(db)
+
+ // Ensure all commits are flushed to disk
+ if err := tdb.Close(); err != nil {
+ t.Fatalf("Failed to close triedb: %v", err)
+ }
+
+ // Reopen the database to simulate a restart
+ tdb = triedb.NewDatabase(db, &triedb.Config{PathDB: pathdb.Defaults})
+
+ finalTracker := &SizeTracker{
+ db: db,
+ triedb: tdb,
+ abort: make(chan struct{}),
+ }
+
+ finalDone := make(chan buildResult)
+ go finalTracker.build(finalRoot, uint64(132), finalDone)
+ var result buildResult
+ select {
+ case result = <-finalDone:
+ if result.err != nil {
+ t.Fatalf("Failed to build final stats: %v", result.err)
+ }
+ case <-time.After(30 * time.Second):
+ t.Fatal("Timeout waiting for final stats")
+ }
+
+ actualStats := result.stat
+
+ expectedStats := baseline
+ for _, diff := range trackedUpdates {
+ expectedStats = expectedStats.add(diff)
+ }
+
+ // The final measured stats should match our calculated expected stats exactly
+ if actualStats.Accounts != expectedStats.Accounts {
+ t.Errorf("Account count mismatch: baseline(%d) + tracked_changes = %d, but final_measurement = %d", baseline.Accounts, expectedStats.Accounts, actualStats.Accounts)
+ }
+ if actualStats.AccountBytes != expectedStats.AccountBytes {
+ t.Errorf("Account bytes mismatch: expected %d, got %d", expectedStats.AccountBytes, actualStats.AccountBytes)
+ }
+ if actualStats.Storages != expectedStats.Storages {
+ t.Errorf("Storage count mismatch: baseline(%d) + tracked_changes = %d, but final_measurement = %d", baseline.Storages, expectedStats.Storages, actualStats.Storages)
+ }
+ if actualStats.StorageBytes != expectedStats.StorageBytes {
+ t.Errorf("Storage bytes mismatch: expected %d, got %d", expectedStats.StorageBytes, actualStats.StorageBytes)
+ }
+ if actualStats.ContractCodes != expectedStats.ContractCodes {
+ t.Errorf("Contract code count mismatch: baseline(%d) + tracked_changes = %d, but final_measurement = %d", baseline.ContractCodes, expectedStats.ContractCodes, actualStats.ContractCodes)
+ }
+ if actualStats.ContractCodeBytes != expectedStats.ContractCodeBytes {
+ t.Errorf("Contract code bytes mismatch: expected %d, got %d", expectedStats.ContractCodeBytes, actualStats.ContractCodeBytes)
+ }
+ if actualStats.AccountTrienodes != expectedStats.AccountTrienodes {
+ t.Errorf("Account trie nodes mismatch: expected %d, got %d", expectedStats.AccountTrienodes, actualStats.AccountTrienodes)
+ }
+ if actualStats.AccountTrienodeBytes != expectedStats.AccountTrienodeBytes {
+ t.Errorf("Account trie node bytes mismatch: expected %d, got %d", expectedStats.AccountTrienodeBytes, actualStats.AccountTrienodeBytes)
+ }
+ if actualStats.StorageTrienodes != expectedStats.StorageTrienodes {
+ t.Errorf("Storage trie nodes mismatch: expected %d, got %d", expectedStats.StorageTrienodes, actualStats.StorageTrienodes)
+ }
+ if actualStats.StorageTrienodeBytes != expectedStats.StorageTrienodeBytes {
+ t.Errorf("Storage trie node bytes mismatch: expected %d, got %d", expectedStats.StorageTrienodeBytes, actualStats.StorageTrienodeBytes)
+ }
+
+ t.Logf("Baseline stats: Accounts=%d, AccountBytes=%d, Storages=%d, StorageBytes=%d, ContractCodes=%d",
+ baseline.Accounts, baseline.AccountBytes, baseline.Storages, baseline.StorageBytes, baseline.ContractCodes)
+ t.Logf("Expected stats: Accounts=%d, AccountBytes=%d, Storages=%d, StorageBytes=%d, ContractCodes=%d",
+ expectedStats.Accounts, expectedStats.AccountBytes, expectedStats.Storages, expectedStats.StorageBytes, expectedStats.ContractCodes)
+ t.Logf("Final stats: Accounts=%d, AccountBytes=%d, Storages=%d, StorageBytes=%d, ContractCodes=%d",
+ actualStats.Accounts, actualStats.AccountBytes, actualStats.Storages, actualStats.StorageBytes, actualStats.ContractCodes)
+ t.Logf("Tracked %d state updates from %d blocks successfully", len(trackedUpdates), 81)
+}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index cdfd6382212..f74065666bc 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -186,6 +186,7 @@ func NewWithReader(root common.Hash, db Database, reader Reader) (*StateDB, erro
if db.TrieDB().IsVerkle() {
sdb.accessEvents = NewAccessEvents(db.PointCache())
}
+
return sdb, nil
}
@@ -667,7 +668,7 @@ func (s *StateDB) CreateContract(addr common.Address) {
// Copy creates a deep, independent copy of the state.
// Snapshots of the copied state cannot be applied to the copy.
func (s *StateDB) Copy() *StateDB {
- // Copy all the basic fields, initialize the memory ones
+ // Copy all the basic fields, init the memory ones
state := &StateDB{
db: s.db,
reader: s.reader,
@@ -1155,7 +1156,7 @@ func (s *StateDB) GetTrie() Trie {
// commit gathers the state mutations accumulated along with the associated
// trie changes, resetting all internal flags with the new state as the base.
-func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) {
+func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNumber uint64) (*stateUpdate, error) {
// Short circuit in case any database failure occurred earlier.
if s.dbErr != nil {
return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
@@ -1307,13 +1308,13 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool) (*stateU
origin := s.originalRoot
s.originalRoot = root
- return newStateUpdate(noStorageWiping, origin, root, deletes, updates, nodes), nil
+ return newStateUpdate(noStorageWiping, origin, root, blockNumber, deletes, updates, nodes), nil
}
// commitAndFlush is a wrapper of commit which also commits the state mutations
// to the configured data stores.
func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) {
- ret, err := s.commit(deleteEmptyObjects, noStorageWiping)
+ ret, err := s.commit(deleteEmptyObjects, noStorageWiping, block)
if err != nil {
return nil, err
}
@@ -1378,6 +1379,16 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, noStorageWiping
return ret.root, nil
}
+// CommitWithUpdate writes the state mutations and returns both the root hash and the state update.
+// This is useful for tracking state changes at the blockchain level.
+func (s *StateDB) CommitWithUpdate(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, *stateUpdate, error) {
+ ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping)
+ if err != nil {
+ return common.Hash{}, nil, err
+ }
+ return ret.root, ret, nil
+}
+
// Prepare handles the preparatory steps for executing a state transition with.
// This method must be invoked before state transition.
//
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index 147546a3c75..20bcbabdf8b 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -281,7 +281,7 @@ func TestCopyObjectState(t *testing.T) {
cpy := orig.Copy()
for _, op := range cpy.mutations {
if have, want := op.applied, false; have != want {
- t.Fatalf("Error in test itself, the 'done' flag should not be set before Commit, have %v want %v", have, want)
+ t.Fatalf("Error in test itself, the 'aborted' flag should not be set before Commit, have %v want %v", have, want)
}
}
orig.Commit(0, true, false)
diff --git a/core/state/stateupdate.go b/core/state/stateupdate.go
index 75c4ca028c4..a62e2b2d2df 100644
--- a/core/state/stateupdate.go
+++ b/core/state/stateupdate.go
@@ -64,8 +64,10 @@ type accountUpdate struct {
// execution. It contains information about mutated contract codes, accounts,
// and storage slots, along with their original values.
type stateUpdate struct {
- originRoot common.Hash // hash of the state before applying mutation
- root common.Hash // hash of the state after applying mutation
+ originRoot common.Hash // hash of the state before applying mutation
+ root common.Hash // hash of the state after applying mutation
+ blockNumber uint64 // Associated block number
+
accounts map[common.Hash][]byte // accounts stores mutated accounts in 'slim RLP' encoding
accountsOrigin map[common.Address][]byte // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding
@@ -95,7 +97,7 @@ func (sc *stateUpdate) empty() bool {
//
// rawStorageKey is a flag indicating whether to use the raw storage slot key or
// the hash of the slot key for constructing state update object.
-func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate {
+func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash, blockNumber uint64, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate {
var (
accounts = make(map[common.Hash][]byte)
accountsOrigin = make(map[common.Address][]byte)
@@ -164,6 +166,7 @@ func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash
return &stateUpdate{
originRoot: originRoot,
root: root,
+ blockNumber: blockNumber,
accounts: accounts,
accountsOrigin: accountsOrigin,
storages: storages,
diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go
index 41349c0c0e7..37cd2b6826d 100644
--- a/core/state/trie_prefetcher_test.go
+++ b/core/state/trie_prefetcher_test.go
@@ -73,7 +73,7 @@ func TestVerklePrefetcher(t *testing.T) {
state, err := New(types.EmptyRootHash, sdb)
if err != nil {
- t.Fatalf("failed to initialize state: %v", err)
+ t.Fatalf("failed to init state: %v", err)
}
// Create an account and check if the retrieved balance is correct
addr := testrand.Address()
diff --git a/eth/api_debug.go b/eth/api_debug.go
index 188dee11aa5..1d476be9892 100644
--- a/eth/api_debug.go
+++ b/eth/api_debug.go
@@ -443,3 +443,39 @@ func (api *DebugAPI) GetTrieFlushInterval() (string, error) {
}
return api.eth.blockchain.GetTrieFlushInterval().String(), nil
}
+
+// StateSize returns the current state size statistics from the state size tracker.
+// Returns an error if the state size tracker is not initialized or if stats are not ready.
+func (api *DebugAPI) StateSize(root *common.Hash) (interface{}, error) {
+ sizer := api.eth.blockchain.StateSizer()
+ if sizer == nil {
+ return nil, errors.New("state size tracker is not enabled")
+ }
+ stats, err := sizer.Query(root)
+ if err != nil {
+ return nil, err
+ }
+ if stats == nil {
+ var s string
+ if root == nil {
+ s = "latest"
+ } else {
+ s = root.Hex()
+ }
+ return nil, fmt.Errorf("state size %s is not available", s)
+ }
+ return map[string]interface{}{
+ "stateRoot": stats.StateRoot,
+ "blockNumber": hexutil.Uint64(stats.BlockNumber),
+ "accounts": hexutil.Uint64(stats.Accounts),
+ "accountBytes": hexutil.Uint64(stats.AccountBytes),
+ "storages": hexutil.Uint64(stats.Storages),
+ "storageBytes": hexutil.Uint64(stats.StorageBytes),
+ "accountTrienodes": hexutil.Uint64(stats.AccountTrienodes),
+ "accountTrienodeBytes": hexutil.Uint64(stats.AccountTrienodeBytes),
+ "storageTrienodes": hexutil.Uint64(stats.StorageTrienodes),
+ "storageTrienodeBytes": hexutil.Uint64(stats.StorageTrienodeBytes),
+ "contractCodes": hexutil.Uint64(stats.ContractCodes),
+ "contractCodeBytes": hexutil.Uint64(stats.ContractCodeBytes),
+ }, nil
+}
diff --git a/eth/backend.go b/eth/backend.go
index 7616ec9d313..43567331891 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -241,6 +241,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
// - DATADIR/triedb/merkle.journal
// - DATADIR/triedb/verkle.journal
TrieJournalDirectory: stack.ResolvePath("triedb"),
+ StateSizeTracking: config.EnableStateSizeTracking,
}
)
if config.VMTrace != "" {
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index 82c3c500a77..dc771410816 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -144,6 +144,9 @@ type Config struct {
// Enables tracking of SHA3 preimages in the VM
EnablePreimageRecording bool
+ // Enables tracking of state size
+ EnableStateSizeTracking bool
+
// Enables VM tracing
VMTrace string
VMTraceJsonConfig string
diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go
index 0a188ba23c4..2fdd219dee8 100644
--- a/eth/ethconfig/gen_config.go
+++ b/eth/ethconfig/gen_config.go
@@ -49,6 +49,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
BlobPool blobpool.Config
GPO gasprice.Config
EnablePreimageRecording bool
+ EnableStateSizeTracking bool
VMTrace string
VMTraceJsonConfig string
RPCGasCap uint64
@@ -90,6 +91,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.BlobPool = c.BlobPool
enc.GPO = c.GPO
enc.EnablePreimageRecording = c.EnablePreimageRecording
+ enc.EnableStateSizeTracking = c.EnableStateSizeTracking
enc.VMTrace = c.VMTrace
enc.VMTraceJsonConfig = c.VMTraceJsonConfig
enc.RPCGasCap = c.RPCGasCap
@@ -135,6 +137,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
BlobPool *blobpool.Config
GPO *gasprice.Config
EnablePreimageRecording *bool
+ EnableStateSizeTracking *bool
VMTrace *string
VMTraceJsonConfig *string
RPCGasCap *uint64
@@ -243,6 +246,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.EnablePreimageRecording != nil {
c.EnablePreimageRecording = *dec.EnablePreimageRecording
}
+ if dec.EnableStateSizeTracking != nil {
+ c.EnableStateSizeTracking = *dec.EnableStateSizeTracking
+ }
if dec.VMTrace != nil {
c.VMTrace = *dec.VMTrace
}
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index e81e23ef167..fdbb0a3f38d 100644
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -468,6 +468,12 @@ web3._extend({
call: 'debug_sync',
params: 1
}),
+ new web3._extend.Method({
+ name: 'stateSize',
+ call: 'debug_stateSize',
+ params: 1,
+ inputFormatter:[null],
+ }),
],
properties: []
});
diff --git a/triedb/database.go b/triedb/database.go
index e2f4334d6e0..d2637bd909a 100644
--- a/triedb/database.go
+++ b/triedb/database.go
@@ -375,3 +375,12 @@ func (db *Database) IsVerkle() bool {
func (db *Database) Disk() ethdb.Database {
return db.disk
}
+
+// SnapshotCompleted returns the indicator if the snapshot is completed.
+func (db *Database) SnapshotCompleted() bool {
+ pdb, ok := db.backend.(*pathdb.Database)
+ if !ok {
+ return false
+ }
+ return pdb.SnapshotCompleted()
+}
diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go
index 423b921d47d..0be875af8f3 100644
--- a/triedb/pathdb/database.go
+++ b/triedb/pathdb/database.go
@@ -136,8 +136,7 @@ type Database struct {
stateFreezer ethdb.ResettableAncientStore // Freezer for storing state histories, nil possible in tests
stateIndexer *historyIndexer // History indexer historical state data, nil possible
-
- lock sync.RWMutex // Lock to prevent mutations from happening at the same time
+ lock sync.RWMutex // Lock to prevent mutations from happening at the same time
}
// New attempts to load an already existing layer from a persistent key-value
@@ -681,3 +680,14 @@ func (db *Database) StorageIterator(root common.Hash, account common.Hash, seek
}
return newFastStorageIterator(db, root, account, seek)
}
+
+// SnapshotCompleted returns the snapshot root if the snapshot generation is completed.
+func (db *Database) SnapshotCompleted() bool {
+ db.lock.RLock()
+ wait := db.waitSync
+ db.lock.RUnlock()
+ if wait {
+ return false
+ }
+ return db.tree.bottom().genComplete()
+}