Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions gossip/c_block_callbacks.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,11 @@ var (
blockExecutionTimer = metrics.GetOrRegisterTimer("chain/execution", nil)
blockWriteTimer = metrics.GetOrRegisterTimer("chain/write", nil)
blockAgeGauge = metrics.GetOrRegisterGauge("chain/block/age", nil)

_ = metrics.GetOrRegisterMeter("chain/reorg/executes", nil)
_ = metrics.GetOrRegisterMeter("chain/reorg/add", nil)
_ = metrics.GetOrRegisterMeter("chain/reorg/drop", nil)
_ = metrics.GetOrRegisterMeter("chain/reorg/invalidTx", nil)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@rus-alex what is the point in adding metrics, which will be unused?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The point is to show the same set of metrics like geth. As I remember it was a one of #290 goals.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree we should add as much metrics from geth as we can, however if we cannot fill the metric with any data and we create it unused, it seems useless to me - is there some usecase which needs them to be present, even through they will have no meaningful value?

Copy link
Contributor Author

@rus-alex rus-alex Sep 13, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It is just method to say: we didn't forget implemented such metrics and they better than geth's.
Agreed, it is overmach.

)

type ExtendedTxPosition struct {
Expand Down Expand Up @@ -394,6 +399,8 @@ func consensusCallbackBeginBlockFn(
storageUpdateTimer.Update(statedb.StorageUpdates)
snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads)
snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads)

// Update the metrics touched during block validation
accountHashTimer.Update(statedb.AccountHashes)
storageHashTimer.Update(statedb.StorageHashes)
triehash := statedb.AccountHashes + statedb.StorageHashes
Expand Down
20 changes: 16 additions & 4 deletions gossip/c_event_callbacks.go
Original file line number Diff line number Diff line change
@@ -1,15 +1,18 @@
package gossip

import (
"context"
"errors"
"math/big"
"sync/atomic"
"time"

"github.com/Fantom-foundation/lachesis-base/gossip/dagprocessor"
"github.com/Fantom-foundation/lachesis-base/hash"
"github.com/Fantom-foundation/lachesis-base/inter/dag"
"github.com/Fantom-foundation/lachesis-base/inter/idx"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/metrics"

"github.com/Fantom-foundation/go-opera/eventcheck"
"github.com/Fantom-foundation/go-opera/eventcheck/epochcheck"
Expand All @@ -28,6 +31,10 @@ var (
errDirtyEvmSnap = errors.New("EVM snapshot is dirty")
)

var (
blockValidationTimer = metrics.GetOrRegisterTimer("chain/validation", nil)
)

func (s *Service) buildEvent(e *inter.MutableEventPayload, onIndexed func()) error {
// set some unique ID
e.SetID(s.uniqueEventIDs.sample())
Expand Down Expand Up @@ -69,7 +76,7 @@ func (s *Service) buildEvent(e *inter.MutableEventPayload, onIndexed func()) err
}

// processSavedEvent performs processing which depends on event being saved in DB
func (s *Service) processSavedEvent(e *inter.EventPayload, es *iblockproc.EpochState) error {
func (s *Service) processSavedEvent(ctx context.Context, e *inter.EventPayload, es *iblockproc.EpochState) error {
err := s.dagIndexer.Add(e)
if err != nil {
return err
Expand All @@ -80,18 +87,21 @@ func (s *Service) processSavedEvent(e *inter.EventPayload, es *iblockproc.EpochS
return errWrongMedianTime
}

begin := ctx.Value("startOfValidation").(time.Time)
blockValidationTimer.Update(time.Since(begin))

// aBFT processing
return s.engine.Process(e)
}

// saveAndProcessEvent deletes event in a case if it fails validation during event processing
func (s *Service) saveAndProcessEvent(e *inter.EventPayload, es *iblockproc.EpochState) error {
func (s *Service) saveAndProcessEvent(ctx context.Context, e *inter.EventPayload, es *iblockproc.EpochState) error {
fixEventTxHashes(e)
// indexing event
s.store.SetEvent(e)
defer s.dagIndexer.DropNotFlushed()

err := s.processSavedEvent(e, es)
err := s.processSavedEvent(ctx, e, es)
if err != nil {
s.store.DelEvent(e.ID())
return err
Expand Down Expand Up @@ -191,6 +201,8 @@ func (s *Service) processEvent(e *inter.EventPayload) error {
atomic.StoreUint32(&s.eventBusyFlag, 1)
defer atomic.StoreUint32(&s.eventBusyFlag, 0)

ctx := context.WithValue(context.Background(), "startOfValidation", time.Now())

// repeat the checks under the mutex which may depend on volatile data
if s.store.HasEvent(e.ID()) {
return eventcheck.ErrAlreadyConnectedEvent
Expand Down Expand Up @@ -220,7 +232,7 @@ func (s *Service) processEvent(e *inter.EventPayload) error {
return err
}

err = s.saveAndProcessEvent(e, &es)
err = s.saveAndProcessEvent(ctx, e, &es)
if err != nil {
return err
}
Expand Down