Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions cmd/geth/chaincmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ if one is set. Otherwise it prints the genesis from the datadir.`,
utils.CacheNoPrefetchFlag,
utils.CachePreimagesFlag,
utils.NoCompactionFlag,
utils.LogSlowBlockFlag,
utils.MetricsEnabledFlag,
utils.MetricsEnabledExpensiveFlag,
utils.MetricsHTTPFlag,
Expand Down
1 change: 1 addition & 0 deletions cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,7 @@ var (
utils.BeaconGenesisTimeFlag,
utils.BeaconCheckpointFlag,
utils.BeaconCheckpointFileFlag,
utils.LogSlowBlockFlag,
}, utils.NetworkFlags, utils.DatabaseFlags)

rpcFlags = []cli.Flag{
Expand Down
8 changes: 8 additions & 0 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -651,6 +651,11 @@ var (
Usage: "Disables db compaction after import",
Category: flags.LoggingCategory,
}
LogSlowBlockFlag = &cli.Uint64Flag{
Name: "debug.logslowblock",
Usage: "The block execution speed threshold (Mgas/s) below which detailed statistics are logged",
Category: flags.LoggingCategory,
}

// MISC settings
SyncTargetFlag = &cli.StringFlag{
Expand Down Expand Up @@ -1699,6 +1704,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.IsSet(LogNoHistoryFlag.Name) {
cfg.LogNoHistory = true
}
if ctx.IsSet(LogSlowBlockFlag.Name) {
cfg.SlowBlockThreshold = ctx.Uint64(LogSlowBlockFlag.Name)
}
if ctx.IsSet(LogExportCheckpointsFlag.Name) {
cfg.LogExportCheckpoints = ctx.String(LogExportCheckpointsFlag.Name)
}
Expand Down
136 changes: 78 additions & 58 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,10 @@ type BlockChainConfig struct {

// StateSizeTracking indicates whether the state size tracking is enabled.
StateSizeTracking bool

// SlowBlockThreshold is the block execution speed threshold (Mgas/s)
// below which detailed statistics are logged.
SlowBlockThreshold uint64
}

// DefaultConfig returns the default config.
Expand Down Expand Up @@ -338,6 +342,7 @@ type BlockChain struct {
stateSizer *state.SizeTracker // State size tracking

lastForkReadyAlert time.Time // Last time there was a fork readiness print out
slowBlockThreshold uint64 // Block execution speed threshold (Mgas/s) below which detailed statistics are logged
}

// NewBlockChain returns a fully initialised block chain using information
Expand Down Expand Up @@ -372,19 +377,20 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine,
log.Info("")

bc := &BlockChain{
chainConfig: chainConfig,
cfg: cfg,
db: db,
triedb: triedb,
triegc: prque.New[int64, common.Hash](nil),
chainmu: syncx.NewClosableMutex(),
bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit),
bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit),
receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit),
blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit),
txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit),
engine: engine,
logger: cfg.VmConfig.Tracer,
chainConfig: chainConfig,
cfg: cfg,
db: db,
triedb: triedb,
triegc: prque.New[int64, common.Hash](nil),
chainmu: syncx.NewClosableMutex(),
bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit),
bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit),
receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit),
blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit),
txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit),
engine: engine,
logger: cfg.VmConfig.Tracer,
slowBlockThreshold: cfg.SlowBlockThreshold,
}
bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped)
if err != nil {
Expand Down Expand Up @@ -1842,7 +1848,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
// still need re-execution to generate snapshots that are missing
case err != nil && !errors.Is(err, ErrKnownBlock):
stats.ignored += len(it.chain)
bc.reportBlock(block, nil, err)
bc.reportBadBlock(block, nil, err)
return nil, it.index, err
}
// Track the singleton witness from this chain insertion (if any)
Expand Down Expand Up @@ -1910,6 +1916,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
if err != nil {
return nil, it.index, err
}
res.stats.reportMetrics()
res.stats.logSlow(block, bc.slowBlockThreshold)

// Report the import stats before returning the various results
stats.processed++
stats.usedGas += res.usedGas
Expand Down Expand Up @@ -1970,15 +1979,20 @@ type blockProcessingResult struct {
procTime time.Duration
status WriteStatus
witness *stateless.Witness
stats *ExecuteStats
}

func (bpr *blockProcessingResult) Witness() *stateless.Witness {
return bpr.witness
}

func (bpr *blockProcessingResult) Stats() *ExecuteStats {
return bpr.stats
}

// ProcessBlock executes and validates the given block. If there was no error
// it writes the block and associated state to database.
func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (_ *blockProcessingResult, blockEndErr error) {
func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (result *blockProcessingResult, blockEndErr error) {
var (
err error
startTime = time.Now()
Expand Down Expand Up @@ -2012,16 +2026,22 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
}
// Upload the statistics of reader at the end
defer func() {
stats := prefetch.GetStats()
accountCacheHitPrefetchMeter.Mark(stats.AccountHit)
accountCacheMissPrefetchMeter.Mark(stats.AccountMiss)
storageCacheHitPrefetchMeter.Mark(stats.StorageHit)
storageCacheMissPrefetchMeter.Mark(stats.StorageMiss)
stats = process.GetStats()
accountCacheHitMeter.Mark(stats.AccountHit)
accountCacheMissMeter.Mark(stats.AccountMiss)
storageCacheHitMeter.Mark(stats.StorageHit)
storageCacheMissMeter.Mark(stats.StorageMiss)
pStat := prefetch.GetStats()
accountCacheHitPrefetchMeter.Mark(pStat.AccountHit)
accountCacheMissPrefetchMeter.Mark(pStat.AccountMiss)
storageCacheHitPrefetchMeter.Mark(pStat.StorageHit)
storageCacheMissPrefetchMeter.Mark(pStat.StorageMiss)

rStat := process.GetStats()
accountCacheHitMeter.Mark(rStat.AccountHit)
accountCacheMissMeter.Mark(rStat.AccountMiss)
storageCacheHitMeter.Mark(rStat.StorageHit)
storageCacheMissMeter.Mark(rStat.StorageMiss)

if result != nil {
result.stats.StatePrefetchCacheStats = pStat
result.stats.StateReadCacheStats = rStat
}
}()

go func(start time.Time, throwaway *state.StateDB, block *types.Block) {
Expand Down Expand Up @@ -2078,14 +2098,14 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
pstart := time.Now()
res, err := bc.processor.Process(block, statedb, bc.cfg.VmConfig)
if err != nil {
bc.reportBlock(block, res, err)
bc.reportBadBlock(block, res, err)
return nil, err
}
ptime := time.Since(pstart)

vstart := time.Now()
if err := bc.validator.ValidateState(block, statedb, res, false); err != nil {
bc.reportBlock(block, res, err)
bc.reportBadBlock(block, res, err)
return nil, err
}
vtime := time.Since(vstart)
Expand Down Expand Up @@ -2119,26 +2139,28 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
}
}

xvtime := time.Since(xvstart)
proctime := time.Since(startTime) // processing + validation + cross validation

var (
xvtime = time.Since(xvstart)
proctime = time.Since(startTime) // processing + validation + cross validation
stats = &ExecuteStats{}
)
// Update the metrics touched during block processing and validation
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
if statedb.AccountLoaded != 0 {
accountReadSingleTimer.Update(statedb.AccountReads / time.Duration(statedb.AccountLoaded))
}
if statedb.StorageLoaded != 0 {
storageReadSingleTimer.Update(statedb.StorageReads / time.Duration(statedb.StorageLoaded))
}
accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation)
storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
triehash := statedb.AccountHashes // The time spent on tries hashing
trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
blockExecutionTimer.Update(ptime - (statedb.AccountReads + statedb.StorageReads)) // The time spent on EVM processing
blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation
blockCrossValidationTimer.Update(xvtime) // The time spent on stateless cross validation
stats.AccountReads = statedb.AccountReads // Account reads are complete(in processing)
stats.StorageReads = statedb.StorageReads // Storage reads are complete(in processing)
stats.AccountUpdates = statedb.AccountUpdates // Account updates are complete(in validation)
stats.StorageUpdates = statedb.StorageUpdates // Storage updates are complete(in validation)
stats.AccountHashes = statedb.AccountHashes // Account hashes are complete(in validation)

stats.AccountLoaded = statedb.AccountLoaded
stats.AccountUpdated = statedb.AccountUpdated
stats.AccountDeleted = statedb.AccountDeleted
stats.StorageLoaded = statedb.StorageLoaded
stats.StorageUpdated = int(statedb.StorageUpdated.Load())
stats.StorageDeleted = int(statedb.StorageDeleted.Load())

stats.Execution = ptime - (statedb.AccountReads + statedb.StorageReads) // The time spent on EVM processing
stats.Validation = vtime - (statedb.AccountHashes + statedb.AccountUpdates + statedb.StorageUpdates) // The time spent on block validation
stats.CrossValidation = xvtime // The time spent on stateless cross validation

// Write the block to the chain and get the status.
var (
Expand All @@ -2160,24 +2182,22 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
}

// Update the metrics touched during block commit
accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them
stats.AccountCommits = statedb.AccountCommits // Account commits are complete, we can mark them
stats.StorageCommits = statedb.StorageCommits // Storage commits are complete, we can mark them
stats.SnapshotCommit = statedb.SnapshotCommits // Snapshot commits are complete, we can mark them
stats.TrieDBCommit = statedb.TrieDBCommits // Trie database commits are complete, we can mark them
stats.BlockWrite = time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits

blockWriteTimer.Update(time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits)
elapsed := time.Since(startTime) + 1 // prevent zero division
blockInsertTimer.Update(elapsed)

// TODO(rjl493456442) generalize the ResettingTimer
mgasps := float64(res.GasUsed) * 1000 / float64(elapsed)
chainMgaspsMeter.Update(time.Duration(mgasps))
stats.TotalTime = elapsed
stats.MgasPerSecond = float64(res.GasUsed) * 1000 / float64(elapsed)

return &blockProcessingResult{
usedGas: res.GasUsed,
procTime: proctime,
status: status,
witness: witness,
stats: stats,
}, nil
}

Expand Down Expand Up @@ -2649,8 +2669,8 @@ func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool {
return false
}

// reportBlock logs a bad block error.
func (bc *BlockChain) reportBlock(block *types.Block, res *ProcessResult, err error) {
// reportBadBlock logs a bad block error.
func (bc *BlockChain) reportBadBlock(block *types.Block, res *ProcessResult, err error) {
var receipts types.Receipts
if res != nil {
receipts = res.Receipts
Expand Down
Loading