diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index c5145bbfb73..e535d7d8924 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -96,6 +96,7 @@ if one is set. Otherwise it prints the genesis from the datadir.`, utils.CacheNoPrefetchFlag, utils.CachePreimagesFlag, utils.NoCompactionFlag, + utils.LogSlowBlockFlag, utils.MetricsEnabledFlag, utils.MetricsEnabledExpensiveFlag, utils.MetricsHTTPFlag, diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 2465b52ad1f..bcb868203b5 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -155,6 +155,7 @@ var ( utils.BeaconGenesisTimeFlag, utils.BeaconCheckpointFlag, utils.BeaconCheckpointFileFlag, + utils.LogSlowBlockFlag, }, utils.NetworkFlags, utils.DatabaseFlags) rpcFlags = []cli.Flag{ diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index c9da08578c9..4f3c2365b93 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -651,6 +651,11 @@ var ( Usage: "Disables db compaction after import", Category: flags.LoggingCategory, } + LogSlowBlockFlag = &cli.Uint64Flag{ + Name: "debug.logslowblock", + Usage: "The block execution speed threshold (Mgas/s) below which detailed statistics are logged", + Category: flags.LoggingCategory, + } // MISC settings SyncTargetFlag = &cli.StringFlag{ @@ -1699,6 +1704,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.IsSet(LogNoHistoryFlag.Name) { cfg.LogNoHistory = true } + if ctx.IsSet(LogSlowBlockFlag.Name) { + cfg.SlowBlockThreshold = ctx.Uint64(LogSlowBlockFlag.Name) + } if ctx.IsSet(LogExportCheckpointsFlag.Name) { cfg.LogExportCheckpoints = ctx.String(LogExportCheckpointsFlag.Name) } diff --git a/core/blockchain.go b/core/blockchain.go index 30f3da3004a..e9e07fd54d7 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -198,6 +198,10 @@ type BlockChainConfig struct { // StateSizeTracking indicates whether the state size tracking is enabled. StateSizeTracking bool + + // SlowBlockThreshold is the block execution speed threshold (Mgas/s) + // below which detailed statistics are logged. + SlowBlockThreshold uint64 } // DefaultConfig returns the default config. @@ -338,6 +342,7 @@ type BlockChain struct { stateSizer *state.SizeTracker // State size tracking lastForkReadyAlert time.Time // Last time there was a fork readiness print out + slowBlockThreshold uint64 // Block execution speed threshold (Mgas/s) below which detailed statistics are logged } // NewBlockChain returns a fully initialised block chain using information @@ -372,19 +377,20 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine, log.Info("") bc := &BlockChain{ - chainConfig: chainConfig, - cfg: cfg, - db: db, - triedb: triedb, - triegc: prque.New[int64, common.Hash](nil), - chainmu: syncx.NewClosableMutex(), - bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit), - bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit), - receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit), - blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), - txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit), - engine: engine, - logger: cfg.VmConfig.Tracer, + chainConfig: chainConfig, + cfg: cfg, + db: db, + triedb: triedb, + triegc: prque.New[int64, common.Hash](nil), + chainmu: syncx.NewClosableMutex(), + bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit), + bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit), + receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit), + blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), + txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit), + engine: engine, + logger: cfg.VmConfig.Tracer, + slowBlockThreshold: cfg.SlowBlockThreshold, } bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped) if err != nil { @@ -1842,7 +1848,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness // still need re-execution to generate snapshots that are missing case err != nil && !errors.Is(err, ErrKnownBlock): stats.ignored += len(it.chain) - bc.reportBlock(block, nil, err) + bc.reportBadBlock(block, nil, err) return nil, it.index, err } // Track the singleton witness from this chain insertion (if any) @@ -1910,6 +1916,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness if err != nil { return nil, it.index, err } + res.stats.reportMetrics() + res.stats.logSlow(block, bc.slowBlockThreshold) + // Report the import stats before returning the various results stats.processed++ stats.usedGas += res.usedGas @@ -1970,15 +1979,20 @@ type blockProcessingResult struct { procTime time.Duration status WriteStatus witness *stateless.Witness + stats *ExecuteStats } func (bpr *blockProcessingResult) Witness() *stateless.Witness { return bpr.witness } +func (bpr *blockProcessingResult) Stats() *ExecuteStats { + return bpr.stats +} + // ProcessBlock executes and validates the given block. If there was no error // it writes the block and associated state to database. -func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (_ *blockProcessingResult, blockEndErr error) { +func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (result *blockProcessingResult, blockEndErr error) { var ( err error startTime = time.Now() @@ -2012,16 +2026,22 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s } // Upload the statistics of reader at the end defer func() { - stats := prefetch.GetStats() - accountCacheHitPrefetchMeter.Mark(stats.AccountHit) - accountCacheMissPrefetchMeter.Mark(stats.AccountMiss) - storageCacheHitPrefetchMeter.Mark(stats.StorageHit) - storageCacheMissPrefetchMeter.Mark(stats.StorageMiss) - stats = process.GetStats() - accountCacheHitMeter.Mark(stats.AccountHit) - accountCacheMissMeter.Mark(stats.AccountMiss) - storageCacheHitMeter.Mark(stats.StorageHit) - storageCacheMissMeter.Mark(stats.StorageMiss) + pStat := prefetch.GetStats() + accountCacheHitPrefetchMeter.Mark(pStat.AccountHit) + accountCacheMissPrefetchMeter.Mark(pStat.AccountMiss) + storageCacheHitPrefetchMeter.Mark(pStat.StorageHit) + storageCacheMissPrefetchMeter.Mark(pStat.StorageMiss) + + rStat := process.GetStats() + accountCacheHitMeter.Mark(rStat.AccountHit) + accountCacheMissMeter.Mark(rStat.AccountMiss) + storageCacheHitMeter.Mark(rStat.StorageHit) + storageCacheMissMeter.Mark(rStat.StorageMiss) + + if result != nil { + result.stats.StatePrefetchCacheStats = pStat + result.stats.StateReadCacheStats = rStat + } }() go func(start time.Time, throwaway *state.StateDB, block *types.Block) { @@ -2078,14 +2098,14 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s pstart := time.Now() res, err := bc.processor.Process(block, statedb, bc.cfg.VmConfig) if err != nil { - bc.reportBlock(block, res, err) + bc.reportBadBlock(block, res, err) return nil, err } ptime := time.Since(pstart) vstart := time.Now() if err := bc.validator.ValidateState(block, statedb, res, false); err != nil { - bc.reportBlock(block, res, err) + bc.reportBadBlock(block, res, err) return nil, err } vtime := time.Since(vstart) @@ -2119,26 +2139,28 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s } } - xvtime := time.Since(xvstart) - proctime := time.Since(startTime) // processing + validation + cross validation - + var ( + xvtime = time.Since(xvstart) + proctime = time.Since(startTime) // processing + validation + cross validation + stats = &ExecuteStats{} + ) // Update the metrics touched during block processing and validation - accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing) - storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing) - if statedb.AccountLoaded != 0 { - accountReadSingleTimer.Update(statedb.AccountReads / time.Duration(statedb.AccountLoaded)) - } - if statedb.StorageLoaded != 0 { - storageReadSingleTimer.Update(statedb.StorageReads / time.Duration(statedb.StorageLoaded)) - } - accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation) - storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation) - accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation) - triehash := statedb.AccountHashes // The time spent on tries hashing - trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update - blockExecutionTimer.Update(ptime - (statedb.AccountReads + statedb.StorageReads)) // The time spent on EVM processing - blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation - blockCrossValidationTimer.Update(xvtime) // The time spent on stateless cross validation + stats.AccountReads = statedb.AccountReads // Account reads are complete(in processing) + stats.StorageReads = statedb.StorageReads // Storage reads are complete(in processing) + stats.AccountUpdates = statedb.AccountUpdates // Account updates are complete(in validation) + stats.StorageUpdates = statedb.StorageUpdates // Storage updates are complete(in validation) + stats.AccountHashes = statedb.AccountHashes // Account hashes are complete(in validation) + + stats.AccountLoaded = statedb.AccountLoaded + stats.AccountUpdated = statedb.AccountUpdated + stats.AccountDeleted = statedb.AccountDeleted + stats.StorageLoaded = statedb.StorageLoaded + stats.StorageUpdated = int(statedb.StorageUpdated.Load()) + stats.StorageDeleted = int(statedb.StorageDeleted.Load()) + + stats.Execution = ptime - (statedb.AccountReads + statedb.StorageReads) // The time spent on EVM processing + stats.Validation = vtime - (statedb.AccountHashes + statedb.AccountUpdates + statedb.StorageUpdates) // The time spent on block validation + stats.CrossValidation = xvtime // The time spent on stateless cross validation // Write the block to the chain and get the status. var ( @@ -2160,24 +2182,22 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s } // Update the metrics touched during block commit - accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them - storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them - snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them - triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them + stats.AccountCommits = statedb.AccountCommits // Account commits are complete, we can mark them + stats.StorageCommits = statedb.StorageCommits // Storage commits are complete, we can mark them + stats.SnapshotCommit = statedb.SnapshotCommits // Snapshot commits are complete, we can mark them + stats.TrieDBCommit = statedb.TrieDBCommits // Trie database commits are complete, we can mark them + stats.BlockWrite = time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits - blockWriteTimer.Update(time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits) elapsed := time.Since(startTime) + 1 // prevent zero division - blockInsertTimer.Update(elapsed) - - // TODO(rjl493456442) generalize the ResettingTimer - mgasps := float64(res.GasUsed) * 1000 / float64(elapsed) - chainMgaspsMeter.Update(time.Duration(mgasps)) + stats.TotalTime = elapsed + stats.MgasPerSecond = float64(res.GasUsed) * 1000 / float64(elapsed) return &blockProcessingResult{ usedGas: res.GasUsed, procTime: proctime, status: status, witness: witness, + stats: stats, }, nil } @@ -2649,8 +2669,8 @@ func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool { return false } -// reportBlock logs a bad block error. -func (bc *BlockChain) reportBlock(block *types.Block, res *ProcessResult, err error) { +// reportBadBlock logs a bad block error. +func (bc *BlockChain) reportBadBlock(block *types.Block, res *ProcessResult, err error) { var receipts types.Receipts if res != nil { receipts = res.Receipts diff --git a/core/blockchain_stats.go b/core/blockchain_stats.go new file mode 100644 index 00000000000..b11793f02d4 --- /dev/null +++ b/core/blockchain_stats.go @@ -0,0 +1,130 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +) + +// ExecuteStats includes all the statistics of a block execution in details. +type ExecuteStats struct { + // State read times + AccountReads time.Duration // Time spent on the account reads + StorageReads time.Duration // Time spent on the storage reads + AccountHashes time.Duration // Time spent on the account trie hash + AccountUpdates time.Duration // Time spent on the account trie update + AccountCommits time.Duration // Time spent on the account trie commit + StorageUpdates time.Duration // Time spent on the storage trie update + StorageCommits time.Duration // Time spent on the storage trie commit + + AccountLoaded int // Number of accounts loaded + AccountUpdated int // Number of accounts updated + AccountDeleted int // Number of accounts deleted + StorageLoaded int // Number of storage slots loaded + StorageUpdated int // Number of storage slots updated + StorageDeleted int // Number of storage slots deleted + + Execution time.Duration // Time spent on the EVM execution + Validation time.Duration // Time spent on the block validation + CrossValidation time.Duration // Optional, time spent on the block cross validation + SnapshotCommit time.Duration // Time spent on snapshot commit + TrieDBCommit time.Duration // Time spent on database commit + BlockWrite time.Duration // Time spent on block write + TotalTime time.Duration // The total time spent on block execution + MgasPerSecond float64 // The million gas processed per second + + // Cache hit rates + StateReadCacheStats state.ReaderStats + StatePrefetchCacheStats state.ReaderStats +} + +// reportMetrics uploads execution statistics to the metrics system. +func (s *ExecuteStats) reportMetrics() { + accountReadTimer.Update(s.AccountReads) // Account reads are complete(in processing) + storageReadTimer.Update(s.StorageReads) // Storage reads are complete(in processing) + if s.AccountLoaded != 0 { + accountReadSingleTimer.Update(s.AccountReads / time.Duration(s.AccountLoaded)) + } + if s.StorageLoaded != 0 { + storageReadSingleTimer.Update(s.StorageReads / time.Duration(s.StorageLoaded)) + } + + accountUpdateTimer.Update(s.AccountUpdates) // Account updates are complete(in validation) + storageUpdateTimer.Update(s.StorageUpdates) // Storage updates are complete(in validation) + accountHashTimer.Update(s.AccountHashes) // Account hashes are complete(in validation) + + accountCommitTimer.Update(s.AccountCommits) // Account commits are complete, we can mark them + storageCommitTimer.Update(s.StorageCommits) // Storage commits are complete, we can mark them + + blockExecutionTimer.Update(s.Execution) // The time spent on EVM processing + blockValidationTimer.Update(s.Validation) // The time spent on block validation + blockCrossValidationTimer.Update(s.CrossValidation) // The time spent on stateless cross validation + snapshotCommitTimer.Update(s.SnapshotCommit) // Snapshot commits are complete, we can mark them + triedbCommitTimer.Update(s.TrieDBCommit) // Trie database commits are complete, we can mark them + blockWriteTimer.Update(s.BlockWrite) // The time spent on block write + blockInsertTimer.Update(s.TotalTime) // The total time spent on block execution + chainMgaspsMeter.Update(time.Duration(s.MgasPerSecond)) // TODO(rjl493456442) generalize the ResettingTimer + + // Cache hit rates + accountCacheHitPrefetchMeter.Mark(s.StatePrefetchCacheStats.AccountHit) + accountCacheMissPrefetchMeter.Mark(s.StatePrefetchCacheStats.AccountMiss) + storageCacheHitPrefetchMeter.Mark(s.StatePrefetchCacheStats.StorageHit) + storageCacheMissPrefetchMeter.Mark(s.StatePrefetchCacheStats.StorageMiss) + + accountCacheHitMeter.Mark(s.StateReadCacheStats.AccountHit) + accountCacheMissMeter.Mark(s.StateReadCacheStats.AccountMiss) + storageCacheHitMeter.Mark(s.StateReadCacheStats.StorageHit) + storageCacheMissMeter.Mark(s.StateReadCacheStats.StorageMiss) +} + +// logSlow prints the detailed execution statistics if the block is regarded as slow. +func (s *ExecuteStats) logSlow(block *types.Block, slowBlockThreshold uint64) { + if slowBlockThreshold == 0 || s.MgasPerSecond == 0 { + return + } + if s.MgasPerSecond > float64(slowBlockThreshold) { + return + } + msg := fmt.Sprintf(` +########## SLOW BLOCK ######### +Block: %v (%#x) txs: %d, mgasps: %.2f + +EVM execution: %v +Validation: %v +DB commit: %v +Block write: %v +Account read: %v +Storage read: %v +State hash: %v +Total: %v + +State read cache: %s + +############################## +`, block.Number(), block.Hash(), len(block.Transactions()), s.MgasPerSecond, + common.PrettyDuration(s.Execution), common.PrettyDuration(s.Validation+s.CrossValidation), common.PrettyDuration(s.TrieDBCommit+s.SnapshotCommit), + common.PrettyDuration(s.BlockWrite), common.PrettyDuration(s.AccountReads), common.PrettyDuration(s.StorageReads), + common.PrettyDuration(s.AccountHashes+s.AccountCommits+s.AccountUpdates+s.StorageCommits+s.StorageUpdates), common.PrettyDuration(s.TotalTime), + s.StateReadCacheStats) + log.Info(msg) +} diff --git a/core/blockchain_test.go b/core/blockchain_test.go index b749798f9c3..3e3053d9bf4 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -162,12 +162,12 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error { } res, err := blockchain.processor.Process(block, statedb, vm.Config{}) if err != nil { - blockchain.reportBlock(block, res, err) + blockchain.reportBadBlock(block, res, err) return err } err = blockchain.validator.ValidateState(block, statedb, res, false) if err != nil { - blockchain.reportBlock(block, res, err) + blockchain.reportBadBlock(block, res, err) return err } diff --git a/core/state/reader.go b/core/state/reader.go index 3e8b31b6be3..c1a97fec362 100644 --- a/core/state/reader.go +++ b/core/state/reader.go @@ -18,6 +18,7 @@ package state import ( "errors" + "fmt" "sync" "sync/atomic" @@ -92,6 +93,11 @@ type ReaderStats struct { StorageMiss int64 } +// String implements fmt.Stringer, returning string format statistics. +func (s ReaderStats) String() string { + return fmt.Sprintf("account (hit: %d, miss: %d), storage (hit: %d, miss: %d)", s.AccountHit, s.AccountMiss, s.StorageHit, s.StorageMiss) +} + // ReaderWithStats wraps the additional method to retrieve the reader statistics from. type ReaderWithStats interface { Reader diff --git a/eth/api_debug.go b/eth/api_debug.go index 892e1032134..db1b842e90f 100644 --- a/eth/api_debug.go +++ b/eth/api_debug.go @@ -499,17 +499,14 @@ func (api *DebugAPI) ExecutionWitness(bn rpc.BlockNumber) (*stateless.ExtWitness if err != nil { return &stateless.ExtWitness{}, fmt.Errorf("block number %v not found", bn) } - parent := bc.GetHeader(block.ParentHash(), block.NumberU64()-1) if parent == nil { return &stateless.ExtWitness{}, fmt.Errorf("block number %v found, but parent missing", bn) } - result, err := bc.ProcessBlock(parent.Root, block, false, true) if err != nil { return nil, err } - return result.Witness().ToExtWitness(), nil } @@ -519,16 +516,13 @@ func (api *DebugAPI) ExecutionWitnessByHash(hash common.Hash) (*stateless.ExtWit if block == nil { return &stateless.ExtWitness{}, fmt.Errorf("block hash %x not found", hash) } - parent := bc.GetHeader(block.ParentHash(), block.NumberU64()-1) if parent == nil { return &stateless.ExtWitness{}, fmt.Errorf("block number %x found, but parent missing", hash) } - result, err := bc.ProcessBlock(parent.Root, block, false, true) if err != nil { return nil, err } - return result.Witness().ToExtWitness(), nil } diff --git a/eth/backend.go b/eth/backend.go index 85095618222..95ae9d4a41f 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -244,6 +244,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { // - DATADIR/triedb/verkle.journal TrieJournalDirectory: stack.ResolvePath("triedb"), StateSizeTracking: config.EnableStateSizeTracking, + SlowBlockThreshold: config.SlowBlockThreshold, } ) if config.VMTrace != "" { diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 6020387bcdb..34ed1798c1c 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -116,6 +116,10 @@ type Config struct { // presence of these blocks for every new peer connection. RequiredBlocks map[uint64]common.Hash `toml:"-"` + // SlowBlockThreshold is the block execution speed threshold (Mgas/s) + // below which detailed statistics are logged. + SlowBlockThreshold uint64 `toml:",omitempty"` + // Database options SkipBcVersionCheck bool `toml:"-"` DatabaseHandles int `toml:"-"` diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 6f6e541368f..179e6b20eb6 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -33,6 +33,7 @@ func (c Config) MarshalTOML() (interface{}, error) { StateHistory uint64 `toml:",omitempty"` StateScheme string `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` + SlowBlockThreshold uint64 `toml:",omitempty"` SkipBcVersionCheck bool `toml:"-"` DatabaseHandles int `toml:"-"` DatabaseCache int @@ -80,6 +81,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.StateHistory = c.StateHistory enc.StateScheme = c.StateScheme enc.RequiredBlocks = c.RequiredBlocks + enc.SlowBlockThreshold = c.SlowBlockThreshold enc.SkipBcVersionCheck = c.SkipBcVersionCheck enc.DatabaseHandles = c.DatabaseHandles enc.DatabaseCache = c.DatabaseCache @@ -131,6 +133,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { StateHistory *uint64 `toml:",omitempty"` StateScheme *string `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` + SlowBlockThreshold *uint64 `toml:",omitempty"` SkipBcVersionCheck *bool `toml:"-"` DatabaseHandles *int `toml:"-"` DatabaseCache *int @@ -213,6 +216,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.RequiredBlocks != nil { c.RequiredBlocks = dec.RequiredBlocks } + if dec.SlowBlockThreshold != nil { + c.SlowBlockThreshold = *dec.SlowBlockThreshold + } if dec.SkipBcVersionCheck != nil { c.SkipBcVersionCheck = *dec.SkipBcVersionCheck }