Skip to content

Commit 71817f3

Browse files
authored
eth/catalyst, eth/downloader: expose more sync information (#28584)
This change exposes more information from sync module internally
1 parent 5b57727 commit 71817f3

File tree

4 files changed

+37
-36
lines changed

4 files changed

+37
-36
lines changed

eth/catalyst/api.go

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -611,7 +611,8 @@ func (api *ConsensusAPI) delayPayloadImport(block *types.Block) (engine.PayloadS
611611
// Although we don't want to trigger a sync, if there is one already in
612612
// progress, try to extend if with the current payload request to relieve
613613
// some strain from the forkchoice update.
614-
if err := api.eth.Downloader().BeaconExtend(api.eth.SyncMode(), block.Header()); err == nil {
614+
err := api.eth.Downloader().BeaconExtend(api.eth.SyncMode(), block.Header())
615+
if err == nil {
615616
log.Debug("Payload accepted for sync extension", "number", block.NumberU64(), "hash", block.Hash())
616617
return engine.PayloadStatusV1{Status: engine.SYNCING}, nil
617618
}
@@ -623,12 +624,12 @@ func (api *ConsensusAPI) delayPayloadImport(block *types.Block) (engine.PayloadS
623624
// In full sync mode, failure to import a well-formed block can only mean
624625
// that the parent state is missing and the syncer rejected extending the
625626
// current cycle with the new payload.
626-
log.Warn("Ignoring payload with missing parent", "number", block.NumberU64(), "hash", block.Hash(), "parent", block.ParentHash())
627+
log.Warn("Ignoring payload with missing parent", "number", block.NumberU64(), "hash", block.Hash(), "parent", block.ParentHash(), "reason", err)
627628
} else {
628629
// In non-full sync mode (i.e. snap sync) all payloads are rejected until
629630
// snap sync terminates as snap sync relies on direct database injections
630631
// and cannot afford concurrent out-if-band modifications via imports.
631-
log.Warn("Ignoring payload while snap syncing", "number", block.NumberU64(), "hash", block.Hash())
632+
log.Warn("Ignoring payload while snap syncing", "number", block.NumberU64(), "hash", block.Hash(), "reason", err)
632633
}
633634
return engine.PayloadStatusV1{Status: engine.SYNCING}, nil
634635
}

eth/downloader/skeleton.go

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -69,9 +69,17 @@ var errSyncReorged = errors.New("sync reorged")
6969
// might still be propagating.
7070
var errTerminated = errors.New("terminated")
7171

72-
// errReorgDenied is returned if an attempt is made to extend the beacon chain
73-
// with a new header, but it does not link up to the existing sync.
74-
var errReorgDenied = errors.New("non-forced head reorg denied")
72+
// errChainReorged is an internal helper error to signal that the header chain
73+
// of the current sync cycle was (partially) reorged.
74+
var errChainReorged = errors.New("chain reorged")
75+
76+
// errChainGapped is an internal helper error to signal that the header chain
77+
// of the current sync cycle is gaped with the one advertised by consensus client.
78+
var errChainGapped = errors.New("chain gapped")
79+
80+
// errChainForked is an internal helper error to signal that the header chain
81+
// of the current sync cycle is forked with the one advertised by consensus client.
82+
var errChainForked = errors.New("chain forked")
7583

7684
func init() {
7785
// Tuning parameters is nice, but the scratch space must be assignable in
@@ -271,9 +279,9 @@ func (s *skeleton) startup() {
271279
newhead, err := s.sync(head)
272280
switch {
273281
case err == errSyncLinked:
274-
// Sync cycle linked up to the genesis block. Tear down the loop
275-
// and restart it so, it can properly notify the backfiller. Don't
276-
// account a new head.
282+
// Sync cycle linked up to the genesis block, or the existent chain
283+
// segment. Tear down the loop and restart it so, it can properly
284+
// notify the backfiller. Don't account a new head.
277285
head = nil
278286

279287
case err == errSyncMerged:
@@ -457,15 +465,16 @@ func (s *skeleton) sync(head *types.Header) (*types.Header, error) {
457465
// we don't seamlessly integrate reorgs to keep things simple. If the
458466
// network starts doing many mini reorgs, it might be worthwhile handling
459467
// a limited depth without an error.
460-
if reorged := s.processNewHead(event.header, event.final, event.force); reorged {
468+
if err := s.processNewHead(event.header, event.final); err != nil {
461469
// If a reorg is needed, and we're forcing the new head, signal
462470
// the syncer to tear down and start over. Otherwise, drop the
463471
// non-force reorg.
464472
if event.force {
465473
event.errc <- nil // forced head reorg accepted
474+
log.Info("Restarting sync cycle", "reason", err)
466475
return event.header, errSyncReorged
467476
}
468-
event.errc <- errReorgDenied
477+
event.errc <- err
469478
continue
470479
}
471480
event.errc <- nil // head extension accepted
@@ -610,7 +619,7 @@ func (s *skeleton) saveSyncStatus(db ethdb.KeyValueWriter) {
610619
// accepts and integrates it into the skeleton or requests a reorg. Upon reorg,
611620
// the syncer will tear itself down and restart with a fresh head. It is simpler
612621
// to reconstruct the sync state than to mutate it and hope for the best.
613-
func (s *skeleton) processNewHead(head *types.Header, final *types.Header, force bool) bool {
622+
func (s *skeleton) processNewHead(head *types.Header, final *types.Header) error {
614623
// If a new finalized block was announced, update the sync process independent
615624
// of what happens with the sync head below
616625
if final != nil {
@@ -631,26 +640,17 @@ func (s *skeleton) processNewHead(head *types.Header, final *types.Header, force
631640
// once more, ignore it instead of tearing down sync for a noop.
632641
if lastchain.Head == lastchain.Tail {
633642
if current := rawdb.ReadSkeletonHeader(s.db, number); current.Hash() == head.Hash() {
634-
return false
643+
return nil
635644
}
636645
}
637646
// Not a noop / double head announce, abort with a reorg
638-
if force {
639-
log.Warn("Beacon chain reorged", "tail", lastchain.Tail, "head", lastchain.Head, "newHead", number)
640-
}
641-
return true
647+
return fmt.Errorf("%w, tail: %d, head: %d, newHead: %d", errChainReorged, lastchain.Tail, lastchain.Head, number)
642648
}
643649
if lastchain.Head+1 < number {
644-
if force {
645-
log.Warn("Beacon chain gapped", "head", lastchain.Head, "newHead", number)
646-
}
647-
return true
650+
return fmt.Errorf("%w, head: %d, newHead: %d", errChainGapped, lastchain.Head, number)
648651
}
649652
if parent := rawdb.ReadSkeletonHeader(s.db, number-1); parent.Hash() != head.ParentHash {
650-
if force {
651-
log.Warn("Beacon chain forked", "ancestor", number-1, "hash", parent.Hash(), "want", head.ParentHash)
652-
}
653-
return true
653+
return fmt.Errorf("%w, ancestor: %d, hash: %s, want: %s", errChainForked, number-1, parent.Hash(), head.ParentHash)
654654
}
655655
// New header seems to be in the last subchain range. Unwind any extra headers
656656
// from the chain tip and insert the new head. We won't delete any trimmed
@@ -666,7 +666,7 @@ func (s *skeleton) processNewHead(head *types.Header, final *types.Header, force
666666
if err := batch.Write(); err != nil {
667667
log.Crit("Failed to write skeleton sync status", "err", err)
668668
}
669-
return false
669+
return nil
670670
}
671671

672672
// assignTasks attempts to match idle peers to pending header retrievals.

eth/downloader/skeleton_test.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -434,7 +434,7 @@ func TestSkeletonSyncExtend(t *testing.T) {
434434
newstate: []*subchain{
435435
{Head: 49, Tail: 49},
436436
},
437-
err: errReorgDenied,
437+
err: errChainReorged,
438438
},
439439
// Initialize a sync and try to extend it with a number-wise sequential
440440
// header, but a hash wise non-linking one.
@@ -444,7 +444,7 @@ func TestSkeletonSyncExtend(t *testing.T) {
444444
newstate: []*subchain{
445445
{Head: 49, Tail: 49},
446446
},
447-
err: errReorgDenied,
447+
err: errChainForked,
448448
},
449449
// Initialize a sync and try to extend it with a non-linking future block.
450450
{
@@ -453,7 +453,7 @@ func TestSkeletonSyncExtend(t *testing.T) {
453453
newstate: []*subchain{
454454
{Head: 49, Tail: 49},
455455
},
456-
err: errReorgDenied,
456+
err: errChainGapped,
457457
},
458458
// Initialize a sync and try to extend it with a past canonical block.
459459
{
@@ -462,7 +462,7 @@ func TestSkeletonSyncExtend(t *testing.T) {
462462
newstate: []*subchain{
463463
{Head: 50, Tail: 50},
464464
},
465-
err: errReorgDenied,
465+
err: errChainReorged,
466466
},
467467
// Initialize a sync and try to extend it with a past sidechain block.
468468
{
@@ -471,7 +471,7 @@ func TestSkeletonSyncExtend(t *testing.T) {
471471
newstate: []*subchain{
472472
{Head: 50, Tail: 50},
473473
},
474-
err: errReorgDenied,
474+
err: errChainReorged,
475475
},
476476
}
477477
for i, tt := range tests {
@@ -487,7 +487,7 @@ func TestSkeletonSyncExtend(t *testing.T) {
487487
skeleton.Sync(tt.head, nil, true)
488488

489489
<-wait
490-
if err := skeleton.Sync(tt.extend, nil, false); err != tt.err {
490+
if err := skeleton.Sync(tt.extend, nil, false); !errors.Is(err, tt.err) {
491491
t.Errorf("test %d: extension failure mismatch: have %v, want %v", i, err, tt.err)
492492
}
493493
skeleton.Terminate()

trie/verkle_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,24 +31,24 @@ import (
3131

3232
var (
3333
accounts = map[common.Address]*types.StateAccount{
34-
common.Address{1}: {
34+
{1}: {
3535
Nonce: 100,
3636
Balance: big.NewInt(100),
3737
CodeHash: common.Hash{0x1}.Bytes(),
3838
},
39-
common.Address{2}: {
39+
{2}: {
4040
Nonce: 200,
4141
Balance: big.NewInt(200),
4242
CodeHash: common.Hash{0x2}.Bytes(),
4343
},
4444
}
4545
storages = map[common.Address]map[common.Hash][]byte{
46-
common.Address{1}: {
46+
{1}: {
4747
common.Hash{10}: []byte{10},
4848
common.Hash{11}: []byte{11},
4949
common.MaxHash: []byte{0xff},
5050
},
51-
common.Address{2}: {
51+
{2}: {
5252
common.Hash{20}: []byte{20},
5353
common.Hash{21}: []byte{21},
5454
common.MaxHash: []byte{0xff},

0 commit comments

Comments
 (0)