Skip to content
2 changes: 1 addition & 1 deletion cmd/devp2p/internal/ethtest/chain.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ func (c *Chain) RootAt(height int) common.Hash {

// ForkID gets the fork id of the chain.
func (c *Chain) ForkID() forkid.ID {
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()))
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()), c.blocks[0].Time())
}

// Shorten returns a copy chain of a desired height from the imported
Expand Down
52 changes: 38 additions & 14 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
if diskRoot != (common.Hash{}) {
log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot)

snapDisk, err := bc.setHeadBeyondRoot(head.NumberU64(), diskRoot, true)
snapDisk, err := bc.setHeadBeyondRoot(head.NumberU64(), 0, diskRoot, true)
if err != nil {
return nil, err
}
Expand All @@ -325,7 +325,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
}
} else {
log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
if _, err := bc.setHeadBeyondRoot(head.NumberU64(), common.Hash{}, true); err != nil {
if _, err := bc.setHeadBeyondRoot(head.NumberU64(), 0, common.Hash{}, true); err != nil {
return nil, err
}
}
Expand Down Expand Up @@ -424,7 +424,11 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
// Rewind the chain in case of an incompatible config upgrade.
if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
log.Warn("Rewinding chain to upgrade configuration", "err", compat)
bc.SetHead(compat.RewindTo)
if compat.RewindToTime > 0 {
bc.SetHeadWithTimestamp(compat.RewindToTime)
} else {
bc.SetHead(compat.RewindToBlock)
}
rawdb.WriteChainConfig(db, genesisHash, chainConfig)
}
// Start tx indexer/unindexer if required.
Expand Down Expand Up @@ -529,7 +533,16 @@ func (bc *BlockChain) loadLastState() error {
// was fast synced or full synced and in which state, the method will try to
// delete minimal data from disk whilst retaining chain consistency.
func (bc *BlockChain) SetHead(head uint64) error {
_, err := bc.setHeadBeyondRoot(head, common.Hash{}, false)
_, err := bc.setHeadBeyondRoot(head, 0, common.Hash{}, false)
return err
}

// SetHeadWithTimestamp rewinds the local chain to a new head that has at max
// the given timestamp. Depending on whether the node was fast synced or full
// synced and in which state, the method will try to delete minimal data from
// disk whilst retaining chain consistency.
func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error {
_, err := bc.setHeadBeyondRoot(0, timestamp, common.Hash{}, false)
return err
}

Expand Down Expand Up @@ -562,8 +575,12 @@ func (bc *BlockChain) SetSafe(block *types.Block) {
// in which state, the method will try to delete minimal data from disk whilst
// retaining chain consistency.
//
// The method also works in timestamp mode if `head == 0` but `time != 0`. In that
// case blocks are rolled back until the new head becomes older or equal to the
// requested time. If both `head` and `time` is 0, the chain is rewound to genesis.
//
// The method returns the block number where the requested root cap was found.
func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bool) (uint64, error) {
func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Hash, repair bool) (uint64, error) {
if !bc.chainmu.TryLock() {
return 0, errChainStopped
}
Expand All @@ -577,7 +594,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
pivot := rawdb.ReadLastPivotNumber(bc.db)
frozen, _ := bc.db.Ancients()

updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (uint64, bool) {
updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (*types.Header, bool) {
// Rewind the blockchain, ensuring we don't end up with a stateless head
// block. Note, depth equality is permitted to allow using SetHead as a
// chain reparation mechanism without deleting any data!
Expand Down Expand Up @@ -658,16 +675,18 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
bc.currentFastBlock.Store(newHeadFastBlock)
headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
}
head := bc.CurrentBlock().NumberU64()

var (
headHeader = bc.CurrentBlock().Header()
headNumber = headHeader.Number.Uint64()
)
// If setHead underflown the freezer threshold and the block processing
// intent afterwards is full block importing, delete the chain segment
// between the stateful-block and the sethead target.
var wipe bool
if head+1 < frozen {
wipe = pivot == nil || head >= *pivot
if headNumber+1 < frozen {
wipe = pivot == nil || headNumber >= *pivot
}
return head, wipe // Only force wipe if full synced
return headHeader, wipe // Only force wipe if full synced
}
// Rewind the header chain, deleting all block bodies until then
delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
Expand All @@ -694,13 +713,18 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
// touching the header chain altogether, unless the freezer is broken
if repair {
if target, force := updateFn(bc.db, bc.CurrentBlock().Header()); force {
bc.hc.SetHead(target, updateFn, delFn)
bc.hc.SetHead(target.Number.Uint64(), updateFn, delFn)
}
} else {
// Rewind the chain to the requested head and keep going backwards until a
// block with a state is found or fast sync pivot is passed
log.Warn("Rewinding blockchain", "target", head)
bc.hc.SetHead(head, updateFn, delFn)
if head != 0 || time == 0 {
log.Warn("Rewinding blockchain to block", "target", head)
bc.hc.SetHead(head, updateFn, delFn)
} else {
log.Warn("Rewinding blockchain to timestamp", "target", time)
bc.hc.SetHeadWithTimestamp(time, updateFn, delFn)
}
}
// Clear out any stale content from the caches
bc.bodyCache.Purge()
Expand Down
2 changes: 1 addition & 1 deletion core/blockchain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4238,7 +4238,7 @@ func TestEIP3651(t *testing.T) {

gspec.Config.BerlinBlock = common.Big0
gspec.Config.LondonBlock = common.Big0
gspec.Config.ShanghaiBlock = common.Big0
gspec.Config.ShanghaiTime = common.Big0
signer := types.LatestSigner(gspec.Config)

_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
Expand Down
116 changes: 81 additions & 35 deletions core/forkid/forkid.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"math"
"math/big"
"reflect"
"sort"
"strings"

"github.com/ethereum/go-ethereum/common"
Expand Down Expand Up @@ -65,19 +66,28 @@ type ID struct {
// Filter is a fork id filter to validate a remotely advertised ID.
type Filter func(id ID) error

// NewID calculates the Ethereum fork ID from the chain config, genesis hash, and head.
func NewID(config *params.ChainConfig, genesis common.Hash, head uint64) ID {
// NewID calculates the Ethereum fork ID from the chain config, genesis hash, head and time.
func NewID(config *params.ChainConfig, genesis common.Hash, head, time uint64) ID {
// Calculate the starting checksum from the genesis hash
hash := crc32.ChecksumIEEE(genesis[:])

// Calculate the current fork checksum and the next fork block
var next uint64
for _, fork := range gatherForks(config) {
forks, forksByTime := gatherForks(config)
for _, fork := range forks {
if fork <= head {
// Fork already passed, checksum the previous hash and the fork number
hash = checksumUpdate(hash, fork)
continue
}
return ID{Hash: checksumToBytes(hash), Next: fork}
}
var next uint64
for _, fork := range forksByTime {
if time >= fork {
// Fork passed, checksum previous hash and fork time
hash = checksumUpdate(hash, fork)
continue
}
next = fork
break
}
Expand All @@ -90,6 +100,7 @@ func NewIDWithChain(chain Blockchain) ID {
chain.Config(),
chain.Genesis().Hash(),
chain.CurrentHeader().Number.Uint64(),
chain.CurrentHeader().Time,
)
}

Expand All @@ -99,36 +110,40 @@ func NewFilter(chain Blockchain) Filter {
return newFilter(
chain.Config(),
chain.Genesis().Hash(),
func() uint64 {
return chain.CurrentHeader().Number.Uint64()
func() (uint64, uint64) {
return chain.CurrentHeader().Number.Uint64(), chain.CurrentHeader().Time
},
)
}

// NewStaticFilter creates a filter at block zero.
func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter {
head := func() uint64 { return 0 }
head := func() (uint64, uint64) { return 0, 0 }
return newFilter(config, genesis, head)
}

// newFilter is the internal version of NewFilter, taking closures as its arguments
// instead of a chain. The reason is to allow testing it without having to simulate
// an entire blockchain.
func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() uint64) Filter {
func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() (uint64, uint64)) Filter {
// Calculate the all the valid fork hash and fork next combos
var (
forks = gatherForks(config)
sums = make([][4]byte, len(forks)+1) // 0th is the genesis
forks, forksByTime = gatherForks(config)
sums = make([][4]byte, len(forks)+len(forksByTime)+1) // 0th is the genesis
)
allForks := append(forks, forksByTime...)
hash := crc32.ChecksumIEEE(genesis[:])
sums[0] = checksumToBytes(hash)
for i, fork := range forks {
for i, fork := range allForks {
hash = checksumUpdate(hash, fork)
sums[i+1] = checksumToBytes(hash)
}
// Add two sentries to simplify the fork checks and don't require special
// casing the last one.
forks = append(forks, math.MaxUint64) // Last fork will never be passed
if len(forksByTime) == 0 {
forks = append(forks, math.MaxUint64)
}
forksByTime = append(forksByTime, math.MaxUint64) // Last fork will never be passed

// Create a validator that will filter out incompatible chains
return func(id ID) error {
Expand All @@ -151,38 +166,33 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
// the remote, but at this current point in time we don't have enough
// information.
// 4. Reject in all other cases.
head := headfn()
for i, fork := range forks {
// If our head is beyond this fork, continue to the next (we have a dummy
// fork of maxuint64 as the last item to always fail this check eventually).
if head >= fork {
continue
}

verify := func(index int, headOrTime uint64) error {
// Found the first unpassed fork block, check if our current state matches
// the remote checksum (rule #1).
if sums[i] == id.Hash {
if sums[index] == id.Hash {
// Fork checksum matched, check if a remote future fork block already passed
// locally without the local node being aware of it (rule #1a).
if id.Next > 0 && head >= id.Next {
if id.Next > 0 && headOrTime >= id.Next {
return ErrLocalIncompatibleOrStale
}
// Haven't passed locally a remote-only fork, accept the connection (rule #1b).
return nil
}
// The local and remote nodes are in different forks currently, check if the
// remote checksum is a subset of our local forks (rule #2).
for j := 0; j < i; j++ {
for j := 0; j < index; j++ {
if sums[j] == id.Hash {
// Remote checksum is a subset, validate based on the announced next fork
if forks[j] != id.Next {
if allForks[j] != id.Next {
return ErrRemoteStale
}
return nil
}
}
// Remote chain is not a subset of our local one, check if it's a superset by
// any chance, signalling that we're simply out of sync (rule #3).
for j := i + 1; j < len(sums); j++ {
for j := index + 1; j < len(sums); j++ {
if sums[j] == id.Hash {
// Yay, remote checksum is a superset, ignore upcoming forks
return nil
Expand All @@ -191,6 +201,28 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
// No exact, subset or superset match. We are on differing chains, reject.
return ErrLocalIncompatibleOrStale
}

head, time := headfn()
// Verify forks by block
for i, fork := range forks {
// If our head is beyond this fork, continue to the next (we have a dummy
// fork of maxuint64 as the last item to always fail this check eventually).
if head >= fork {
continue
}
return verify(i, head)
}
// Verify forks by time
for i := len(forks); i < len(forks)+len(forksByTime); i++ {
fork := forksByTime[i-len(forks)]
// If our head is beyond this fork, continue to the next (we have a dummy
// fork of maxuint64 as the last item to always fail this check eventually).
if time >= fork {
continue
}
return verify(i, time)
}

log.Error("Impossible fork ID validation", "id", id)
return nil // Something's very wrong, accept rather than reject
}
Expand All @@ -212,45 +244,59 @@ func checksumToBytes(hash uint32) [4]byte {
}

// gatherForks gathers all the known forks and creates a sorted list out of them.
func gatherForks(config *params.ChainConfig) []uint64 {
func gatherForks(config *params.ChainConfig) ([]uint64, []uint64) {
// Gather all the fork block numbers via reflection
kind := reflect.TypeOf(params.ChainConfig{})
conf := reflect.ValueOf(config).Elem()

var forks []uint64
var forksByTime []uint64
for i := 0; i < kind.NumField(); i++ {
// Fetch the next field and skip non-fork rules
field := kind.Field(i)
time := false
if !strings.HasSuffix(field.Name, "Block") {
continue
if !strings.HasSuffix(field.Name, "Time") {
continue
}
time = true
}
if field.Type != reflect.TypeOf(new(big.Int)) {
continue
}
// Extract the fork rule block number and aggregate it
rule := conf.Field(i).Interface().(*big.Int)
if rule != nil {
forks = append(forks, rule.Uint64())
}
}
// Sort the fork block numbers to permit chronological XOR
for i := 0; i < len(forks); i++ {
for j := i + 1; j < len(forks); j++ {
if forks[i] > forks[j] {
forks[i], forks[j] = forks[j], forks[i]
if time {
forksByTime = append(forksByTime, rule.Uint64())
} else {
forks = append(forks, rule.Uint64())
}
}
}

sort.Slice(forks, func(i, j int) bool { return forks[i] < forks[j] })
sort.Slice(forksByTime, func(i, j int) bool { return forksByTime[i] < forksByTime[j] })

// Deduplicate block numbers applying multiple forks
for i := 1; i < len(forks); i++ {
if forks[i] == forks[i-1] {
forks = append(forks[:i], forks[i+1:]...)
i--
}
}
for i := 1; i < len(forksByTime); i++ {
if forksByTime[i] == forksByTime[i-1] {
forksByTime = append(forksByTime[:i], forksByTime[i+1:]...)
i--
}
}
// Skip any forks in block 0, that's the genesis ruleset
if len(forks) > 0 && forks[0] == 0 {
forks = forks[1:]
}
return forks
if len(forksByTime) > 0 && forksByTime[0] == 0 {
forksByTime = forksByTime[1:]
}
return forks, forksByTime
}
Loading