Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions cmd/geth/chaincmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ if one is set. Otherwise it prints the genesis from the datadir.`,
utils.CacheNoPrefetchFlag,
utils.CachePreimagesFlag,
utils.NoCompactionFlag,
utils.LogSlowBlockFlag,
utils.MetricsEnabledFlag,
utils.MetricsEnabledExpensiveFlag,
utils.MetricsHTTPFlag,
Expand Down
1 change: 1 addition & 0 deletions cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,7 @@ var (
utils.BeaconGenesisTimeFlag,
utils.BeaconCheckpointFlag,
utils.BeaconCheckpointFileFlag,
utils.LogSlowBlockFlag,
}, utils.NetworkFlags, utils.DatabaseFlags)

rpcFlags = []cli.Flag{
Expand Down
13 changes: 13 additions & 0 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -667,6 +667,12 @@ var (
Usage: "Disables db compaction after import",
Category: flags.LoggingCategory,
}
LogSlowBlockFlag = &cli.DurationFlag{
Name: "debug.logslowblock",
Usage: "Block execution time threshold beyond which detailed statistics will be logged (0 means disable)",
Value: ethconfig.Defaults.SlowBlockThreshold,
Category: flags.LoggingCategory,
}

// MISC settings
SyncTargetFlag = &cli.StringFlag{
Expand Down Expand Up @@ -1710,6 +1716,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.IsSet(LogNoHistoryFlag.Name) {
cfg.LogNoHistory = true
}
if ctx.IsSet(LogSlowBlockFlag.Name) {
cfg.SlowBlockThreshold = ctx.Duration(LogSlowBlockFlag.Name)
}
if ctx.IsSet(LogExportCheckpointsFlag.Name) {
cfg.LogExportCheckpoints = ctx.String(LogExportCheckpointsFlag.Name)
}
Expand Down Expand Up @@ -2281,6 +2290,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
Preimages: ctx.Bool(CachePreimagesFlag.Name),
StateScheme: scheme,
StateHistory: ctx.Uint64(StateHistoryFlag.Name),

// Disable transaction indexing/unindexing.
TxLookupLimit: -1,

Expand All @@ -2292,6 +2302,9 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh

// Enable state size tracking if enabled
StateSizeTracking: ctx.Bool(StateSizeTrackingFlag.Name),

// Configure the slow block statistic logger
SlowBlockThreshold: ctx.Duration(LogSlowBlockFlag.Name),
}
if options.ArchiveMode && !options.Preimages {
options.Preimages = true
Expand Down
143 changes: 84 additions & 59 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,10 @@ type BlockChainConfig struct {

// StateSizeTracking indicates whether the state size tracking is enabled.
StateSizeTracking bool

// SlowBlockThreshold is the block execution time threshold beyond which
// detailed statistics will be logged.
SlowBlockThreshold time.Duration
}

// DefaultConfig returns the default config.
Expand Down Expand Up @@ -337,7 +341,8 @@ type BlockChain struct {
logger *tracing.Hooks
stateSizer *state.SizeTracker // State size tracking

lastForkReadyAlert time.Time // Last time there was a fork readiness print out
lastForkReadyAlert time.Time // Last time there was a fork readiness print out
slowBlockThreshold time.Duration // Block execution time threshold beyond which detailed statistics will be logged
}

// NewBlockChain returns a fully initialised block chain using information
Expand Down Expand Up @@ -372,19 +377,20 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine,
log.Info("")

bc := &BlockChain{
chainConfig: chainConfig,
cfg: cfg,
db: db,
triedb: triedb,
triegc: prque.New[int64, common.Hash](nil),
chainmu: syncx.NewClosableMutex(),
bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit),
bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit),
receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit),
blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit),
txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit),
engine: engine,
logger: cfg.VmConfig.Tracer,
chainConfig: chainConfig,
cfg: cfg,
db: db,
triedb: triedb,
triegc: prque.New[int64, common.Hash](nil),
chainmu: syncx.NewClosableMutex(),
bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit),
bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit),
receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit),
blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit),
txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit),
engine: engine,
logger: cfg.VmConfig.Tracer,
slowBlockThreshold: cfg.SlowBlockThreshold,
}
bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped)
if err != nil {
Expand Down Expand Up @@ -1847,7 +1853,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
// still need re-execution to generate snapshots that are missing
case err != nil && !errors.Is(err, ErrKnownBlock):
stats.ignored += len(it.chain)
bc.reportBlock(block, nil, err)
bc.reportBadBlock(block, nil, err)
return nil, it.index, err
}
// Track the singleton witness from this chain insertion (if any)
Expand Down Expand Up @@ -1915,6 +1921,14 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
if err != nil {
return nil, it.index, err
}
res.stats.reportMetrics()

// Log slow block only if a single block is inserted (usually after the
// initial sync) to not overwhelm the users.
if len(chain) == 1 {
res.stats.logSlow(block, bc.slowBlockThreshold)
}

// Report the import stats before returning the various results
stats.processed++
stats.usedGas += res.usedGas
Expand Down Expand Up @@ -1975,15 +1989,20 @@ type blockProcessingResult struct {
procTime time.Duration
status WriteStatus
witness *stateless.Witness
stats *ExecuteStats
}

func (bpr *blockProcessingResult) Witness() *stateless.Witness {
return bpr.witness
}

func (bpr *blockProcessingResult) Stats() *ExecuteStats {
return bpr.stats
}

// ProcessBlock executes and validates the given block. If there was no error
// it writes the block and associated state to database.
func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (_ *blockProcessingResult, blockEndErr error) {
func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (result *blockProcessingResult, blockEndErr error) {
var (
err error
startTime = time.Now()
Expand Down Expand Up @@ -2017,16 +2036,22 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
}
// Upload the statistics of reader at the end
defer func() {
stats := prefetch.GetStats()
accountCacheHitPrefetchMeter.Mark(stats.AccountHit)
accountCacheMissPrefetchMeter.Mark(stats.AccountMiss)
storageCacheHitPrefetchMeter.Mark(stats.StorageHit)
storageCacheMissPrefetchMeter.Mark(stats.StorageMiss)
stats = process.GetStats()
accountCacheHitMeter.Mark(stats.AccountHit)
accountCacheMissMeter.Mark(stats.AccountMiss)
storageCacheHitMeter.Mark(stats.StorageHit)
storageCacheMissMeter.Mark(stats.StorageMiss)
pStat := prefetch.GetStats()
accountCacheHitPrefetchMeter.Mark(pStat.AccountCacheHit)
accountCacheMissPrefetchMeter.Mark(pStat.AccountCacheMiss)
storageCacheHitPrefetchMeter.Mark(pStat.StorageCacheHit)
storageCacheMissPrefetchMeter.Mark(pStat.StorageCacheMiss)

rStat := process.GetStats()
accountCacheHitMeter.Mark(rStat.AccountCacheHit)
accountCacheMissMeter.Mark(rStat.AccountCacheMiss)
storageCacheHitMeter.Mark(rStat.StorageCacheHit)
storageCacheMissMeter.Mark(rStat.StorageCacheMiss)

if result != nil {
result.stats.StatePrefetchCacheStats = pStat
result.stats.StateReadCacheStats = rStat
}
}()

go func(start time.Time, throwaway *state.StateDB, block *types.Block) {
Expand Down Expand Up @@ -2083,14 +2108,14 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
pstart := time.Now()
res, err := bc.processor.Process(block, statedb, bc.cfg.VmConfig)
if err != nil {
bc.reportBlock(block, res, err)
bc.reportBadBlock(block, res, err)
return nil, err
}
ptime := time.Since(pstart)

vstart := time.Now()
if err := bc.validator.ValidateState(block, statedb, res, false); err != nil {
bc.reportBlock(block, res, err)
bc.reportBadBlock(block, res, err)
return nil, err
}
vtime := time.Since(vstart)
Expand Down Expand Up @@ -2124,26 +2149,28 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
}
}

xvtime := time.Since(xvstart)
proctime := time.Since(startTime) // processing + validation + cross validation

var (
xvtime = time.Since(xvstart)
proctime = time.Since(startTime) // processing + validation + cross validation
stats = &ExecuteStats{}
)
// Update the metrics touched during block processing and validation
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
if statedb.AccountLoaded != 0 {
accountReadSingleTimer.Update(statedb.AccountReads / time.Duration(statedb.AccountLoaded))
}
if statedb.StorageLoaded != 0 {
storageReadSingleTimer.Update(statedb.StorageReads / time.Duration(statedb.StorageLoaded))
}
accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation)
storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
triehash := statedb.AccountHashes // The time spent on tries hashing
trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
blockExecutionTimer.Update(ptime - (statedb.AccountReads + statedb.StorageReads)) // The time spent on EVM processing
blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation
blockCrossValidationTimer.Update(xvtime) // The time spent on stateless cross validation
stats.AccountReads = statedb.AccountReads // Account reads are complete(in processing)
stats.StorageReads = statedb.StorageReads // Storage reads are complete(in processing)
stats.AccountUpdates = statedb.AccountUpdates // Account updates are complete(in validation)
stats.StorageUpdates = statedb.StorageUpdates // Storage updates are complete(in validation)
stats.AccountHashes = statedb.AccountHashes // Account hashes are complete(in validation)

stats.AccountLoaded = statedb.AccountLoaded
stats.AccountUpdated = statedb.AccountUpdated
stats.AccountDeleted = statedb.AccountDeleted
stats.StorageLoaded = statedb.StorageLoaded
stats.StorageUpdated = int(statedb.StorageUpdated.Load())
stats.StorageDeleted = int(statedb.StorageDeleted.Load())

stats.Execution = ptime - (statedb.AccountReads + statedb.StorageReads) // The time spent on EVM processing
stats.Validation = vtime - (statedb.AccountHashes + statedb.AccountUpdates + statedb.StorageUpdates) // The time spent on block validation
stats.CrossValidation = xvtime // The time spent on stateless cross validation

// Write the block to the chain and get the status.
var (
Expand All @@ -2165,24 +2192,22 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
}

// Update the metrics touched during block commit
accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them
stats.AccountCommits = statedb.AccountCommits // Account commits are complete, we can mark them
stats.StorageCommits = statedb.StorageCommits // Storage commits are complete, we can mark them
stats.SnapshotCommit = statedb.SnapshotCommits // Snapshot commits are complete, we can mark them
stats.TrieDBCommit = statedb.TrieDBCommits // Trie database commits are complete, we can mark them
stats.BlockWrite = time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits

blockWriteTimer.Update(time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits)
elapsed := time.Since(startTime) + 1 // prevent zero division
blockInsertTimer.Update(elapsed)

// TODO(rjl493456442) generalize the ResettingTimer
mgasps := float64(res.GasUsed) * 1000 / float64(elapsed)
chainMgaspsMeter.Update(time.Duration(mgasps))
stats.TotalTime = elapsed
stats.MgasPerSecond = float64(res.GasUsed) * 1000 / float64(elapsed)

return &blockProcessingResult{
usedGas: res.GasUsed,
procTime: proctime,
status: status,
witness: witness,
stats: stats,
}, nil
}

Expand Down Expand Up @@ -2667,8 +2692,8 @@ func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool {
return false
}

// reportBlock logs a bad block error.
func (bc *BlockChain) reportBlock(block *types.Block, res *ProcessResult, err error) {
// reportBadBlock logs a bad block error.
func (bc *BlockChain) reportBadBlock(block *types.Block, res *ProcessResult, err error) {
var receipts types.Receipts
if res != nil {
receipts = res.Receipts
Expand Down
Loading