From 01053b3fe892f8c471fc3e9e0d20f95df8840deb Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Thu, 7 Aug 2025 16:08:25 -0400 Subject: [PATCH 01/26] first pass --- consensus/dummy/consensus.go | 26 ++ plugin/evm/block.go | 6 + plugin/evm/config/config.go | 2 +- plugin/evm/config/config_test.go | 8 +- plugin/evm/extension/config.go | 175 ++++++++++++ plugin/evm/message/codec.go | 4 +- plugin/evm/message/handler.go | 5 +- plugin/evm/message/leafs_request.go | 28 +- plugin/evm/message/leafs_request_test.go | 63 ----- plugin/evm/message/syncable.go | 83 +----- plugin/evm/network_handler.go | 33 ++- .../{syncervm_client.go => sync/client.go} | 248 +++++++++++------- .../{syncervm_server.go => sync/server.go} | 65 +++-- plugin/evm/syncervm_test.go | 26 +- plugin/evm/vm.go | 180 +++++++------ sync/client/client_test.go | 14 +- sync/client/stats/stats.go | 29 +- .../client/{mock_client.go => test_client.go} | 47 ++-- sync/handlers/leafs_request.go | 34 +-- sync/handlers/leafs_request_test.go | 2 +- sync/handlers/stats/stats.go | 5 +- sync/statesync/code_syncer_test.go | 2 +- sync/statesync/state_syncer.go | 16 +- sync/statesync/sync_test.go | 154 +++++++---- 24 files changed, 729 insertions(+), 526 deletions(-) create mode 100644 plugin/evm/extension/config.go rename plugin/evm/{syncervm_client.go => sync/client.go} (56%) rename plugin/evm/{syncervm_server.go => sync/server.go} (61%) rename sync/client/{mock_client.go => test_client.go} (77%) diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go index 1fa100fb93..21c67bb877 100644 --- a/consensus/dummy/consensus.go +++ b/consensus/dummy/consensus.go @@ -41,6 +41,32 @@ type Mode struct { } type ( + OnFinalizeAndAssembleCallbackType = func( + header *types.Header, + parent *types.Header, + state *state.StateDB, + txs []*types.Transaction, + ) ( + extraData []byte, + blockFeeContribution *big.Int, + extDataGasUsed *big.Int, + err error, + ) + + OnExtraStateChangeType = func( + block *types.Block, + parent *types.Header, + statedb *state.StateDB, + ) ( + blockFeeContribution *big.Int, + extDataGasUsed *big.Int, + err error, + ) + + ConsensusCallbacks struct { + OnFinalizeAndAssemble OnFinalizeAndAssembleCallbackType + OnExtraStateChange OnExtraStateChangeType + } DummyEngine struct { clock *mockable.Clock consensusMode Mode diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 1a8dbb2d49..f3d9f636a0 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/params/extras" "github.com/ava-labs/subnet-evm/plugin/evm/header" + "github.com/ava-labs/subnet-evm/plugin/evm/sync" "github.com/ava-labs/subnet-evm/precompile/precompileconfig" "github.com/ava-labs/subnet-evm/predicate" @@ -30,6 +31,7 @@ import ( var ( _ snowman.Block = (*Block)(nil) _ block.WithVerifyContext = (*Block)(nil) + _ sync.EthBlockWrapper = (*Block)(nil) ) // Block implements the snowman.Block interface @@ -273,3 +275,7 @@ func (b *Block) Bytes() []byte { } func (b *Block) String() string { return fmt.Sprintf("EVM block, ID = %s", b.ID()) } + +func (b *Block) GetEthBlock() *types.Block { + return b.ethBlock +} diff --git a/plugin/evm/config/config.go b/plugin/evm/config/config.go index 5a42032f7b..89ad5aa5b6 100644 --- a/plugin/evm/config/config.go +++ b/plugin/evm/config/config.go @@ -191,7 +191,7 @@ type Config struct { MaxOutboundActiveRequests int64 `json:"max-outbound-active-requests"` // Sync settings - StateSyncEnabled bool `json:"state-sync-enabled"` + StateSyncEnabled *bool `json:"state-sync-enabled"` // Pointer distinguishes false (no state sync) and not set (state sync only at genesis). StateSyncSkipResume bool `json:"state-sync-skip-resume"` // Forces state sync to use the highest available summary block StateSyncServerTrieCache int `json:"state-sync-server-trie-cache"` StateSyncIDs string `json:"state-sync-ids"` diff --git a/plugin/evm/config/config_test.go b/plugin/evm/config/config_test.go index 239e2d2cfa..87d23641cf 100644 --- a/plugin/evm/config/config_test.go +++ b/plugin/evm/config/config_test.go @@ -13,6 +13,12 @@ import ( "github.com/stretchr/testify/assert" ) +// newTrue returns a pointer to a bool that is true +func newTrue() *bool { + b := true + return &b +} + func TestUnmarshalConfig(t *testing.T) { tests := []struct { name string @@ -62,7 +68,7 @@ func TestUnmarshalConfig(t *testing.T) { { "state sync enabled", []byte(`{"state-sync-enabled":true}`), - Config{StateSyncEnabled: true}, + Config{StateSyncEnabled: newTrue()}, false, }, { diff --git a/plugin/evm/extension/config.go b/plugin/evm/extension/config.go new file mode 100644 index 0000000000..bf1bf3660a --- /dev/null +++ b/plugin/evm/extension/config.go @@ -0,0 +1,175 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package extension + +import ( + "context" + "errors" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + avalanchecommon "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/subnet-evm/consensus/dummy" + "github.com/ava-labs/subnet-evm/core" + "github.com/ava-labs/subnet-evm/params" + "github.com/ava-labs/subnet-evm/params/extras" + "github.com/ava-labs/subnet-evm/plugin/evm/config" + "github.com/ava-labs/subnet-evm/plugin/evm/message" + "github.com/ava-labs/subnet-evm/plugin/evm/sync" + "github.com/ava-labs/subnet-evm/sync/handlers" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/types" +) + +var ( + errNilConfig = errors.New("nil extension config") + errNilSyncSummaryProvider = errors.New("nil sync summary provider") + errNilSyncableParser = errors.New("nil syncable parser") + errNilClock = errors.New("nil clock") +) + +type ExtensibleVM interface { + // SetExtensionConfig sets the configuration for the VM extension + // Should be called before any other method and only once + SetExtensionConfig(config *Config) error + // NewClient returns a client to send messages with for the given protocol + NewClient(protocol uint64, options ...p2p.ClientOption) *p2p.Client + // AddHandler registers a server handler for an application protocol + AddHandler(protocol uint64, handler p2p.Handler) error + // GetExtendedBlock returns the VMBlock for the given ID or an error if the block is not found + GetExtendedBlock(context.Context, ids.ID) (ExtendedBlock, error) + // LastAcceptedExtendedBlock returns the last accepted VM block + LastAcceptedExtendedBlock() ExtendedBlock + // ChainConfig returns the chain config for the VM + ChainConfig() *params.ChainConfig + // P2PValidators returns the validators for the network + P2PValidators() *p2p.Validators + // Blockchain returns the blockchain client + Blockchain() *core.BlockChain + // Config returns the configuration for the VM + Config() config.Config + // MetricRegistry returns the metric registry for the VM + MetricRegistry() *prometheus.Registry + // ReadLastAccepted returns the last accepted block hash and height + ReadLastAccepted() (common.Hash, uint64, error) + // VersionDB returns the versioned database for the VM + VersionDB() *versiondb.Database +} + +// InnerVM is the interface that must be implemented by the VM +// that's being wrapped by the extension +type InnerVM interface { + ExtensibleVM + avalanchecommon.VM + block.ChainVM + block.BuildBlockWithContextChainVM + block.StateSyncableVM +} + +// ExtendedBlock is a block that can be used by the extension +type ExtendedBlock interface { + snowman.Block + GetEthBlock() *types.Block + GetBlockExtension() BlockExtension +} + +type BlockExtender interface { + // NewBlockExtension is called when a new block is created + NewBlockExtension(b ExtendedBlock) (BlockExtension, error) +} + +// BlockExtension allows the VM extension to handle block processing events. +type BlockExtension interface { + // SyntacticVerify verifies the block syntactically + // it can be implemented to extend inner block verification + SyntacticVerify(rules extras.Rules) error + // SemanticVerify verifies the block semantically + // it can be implemented to extend inner block verification + SemanticVerify() error + // CleanupVerified is called when a block has passed SemanticVerify and SynctacticVerify, + // and should be cleaned up due to error or verification runs under non-write mode. This + // does not return an error because the block has already been verified. + CleanupVerified() + // Accept is called when a block is accepted by the block manager. Accept takes a + // database.Batch that contains the changes that were made to the database as a result + // of accepting the block. The changes in the batch should be flushed to the database in this method. + Accept(acceptedBatch database.Batch) error + // Reject is called when a block is rejected by the block manager + Reject() error +} + +// BuilderMempool is a mempool that's used in the block builder +type BuilderMempool interface { + // PendingLen returns the number of pending transactions + // that are waiting to be included in a block + PendingLen() int + // SubscribePendingTxs returns a channel that's signaled when there are pending transactions + SubscribePendingTxs() <-chan struct{} +} + +// LeafRequestConfig is the configuration to handle leaf requests +// in the network and syncer +type LeafRequestConfig struct { + // LeafType is the type of the leaf node + LeafType message.NodeType + // MetricName is the name of the metric to use for the leaf request + MetricName string + // Handler is the handler to use for the leaf request + Handler handlers.LeafRequestHandler +} + +// Config is the configuration for the VM extension +type Config struct { + // ConsensusCallbacks is the consensus callbacks to use + // for the VM to be used in consensus engine. + // Callback functions can be nil. + ConsensusCallbacks dummy.ConsensusCallbacks + // SyncSummaryProvider is the sync summary provider to use + // for the VM to be used in syncer. + // It's required and should be non-nil + SyncSummaryProvider sync.SummaryProvider + // SyncExtender can extend the syncer to handle custom sync logic. + // It's optional and can be nil + SyncExtender sync.Extender + // SyncableParser is to parse summary messages from the network. + // It's required and should be non-nil + SyncableParser message.SyncableParser + // BlockExtender allows the VM extension to create an extension to handle block processing events. + // It's optional and can be nil + BlockExtender BlockExtender + // ExtraSyncLeafHandlerConfig is the extra configuration to handle leaf requests + // in the network and syncer. It's optional and can be nil + ExtraSyncLeafHandlerConfig *LeafRequestConfig + // ExtraMempool is the mempool to be used in the block builder. + // It's optional and can be nil + ExtraMempool BuilderMempool + // Clock is the clock to use for time related operations. + // It's optional and can be nil + Clock *mockable.Clock +} + +func (c *Config) Validate() error { + if c == nil { + return errNilConfig + } + if c.SyncSummaryProvider == nil { + return errNilSyncSummaryProvider + } + if c.SyncableParser == nil { + return errNilSyncableParser + } + if c.Clock == nil { + return errNilClock + } + return nil +} diff --git a/plugin/evm/message/codec.go b/plugin/evm/message/codec.go index 22fc9d6310..3ccddc250f 100644 --- a/plugin/evm/message/codec.go +++ b/plugin/evm/message/codec.go @@ -27,9 +27,9 @@ func init() { c.SkipRegistrations(1) errs := wrappers.Errs{} + // Gossip types and sync summary type removed from codec + c.SkipRegistrations(3) errs.Add( - // Types for state sync frontier consensus - c.RegisterType(SyncSummary{}), // state sync types c.RegisterType(BlockRequest{}), diff --git a/plugin/evm/message/handler.go b/plugin/evm/message/handler.go index d5ba9b817c..d58ce2e2cd 100644 --- a/plugin/evm/message/handler.go +++ b/plugin/evm/message/handler.go @@ -17,9 +17,8 @@ var ( // Must have methods in format of handleType(context.Context, ids.NodeID, uint32, request Type) error // so that the Request object of relevant Type can invoke its respective handle method // on this struct. -// Also see GossipHandler for implementation style. type RequestHandler interface { - HandleStateTrieLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest LeafsRequest) ([]byte, error) + HandleLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest LeafsRequest) ([]byte, error) HandleBlockRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, request BlockRequest) ([]byte, error) HandleCodeRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, codeRequest CodeRequest) ([]byte, error) } @@ -35,7 +34,7 @@ type ResponseHandler interface { type NoopRequestHandler struct{} -func (NoopRequestHandler) HandleStateTrieLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest LeafsRequest) ([]byte, error) { +func (NoopRequestHandler) HandleLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest LeafsRequest) ([]byte, error) { return nil, nil } diff --git a/plugin/evm/message/leafs_request.go b/plugin/evm/message/leafs_request.go index 2e345949e7..e21e94988a 100644 --- a/plugin/evm/message/leafs_request.go +++ b/plugin/evm/message/leafs_request.go @@ -15,25 +15,37 @@ const MaxCodeHashesPerRequest = 5 var _ Request = LeafsRequest{} +// NodeType outlines the trie that a leaf node belongs to +// handlers.LeafsRequestHandler uses this information to determine +// which trie type to fetch the information from +type NodeType uint8 + +const ( + StateTrieNode = NodeType(1) + StateTrieKeyLength = common.HashLength +) + // LeafsRequest is a request to receive trie leaves at specified Root within Start and End byte range // Limit outlines maximum number of leaves to returns starting at Start +// NodeType outlines which trie to read from state/atomic. type LeafsRequest struct { - Root common.Hash `serialize:"true"` - Account common.Hash `serialize:"true"` - Start []byte `serialize:"true"` - End []byte `serialize:"true"` - Limit uint16 `serialize:"true"` + Root common.Hash `serialize:"true"` + Account common.Hash `serialize:"true"` + Start []byte `serialize:"true"` + End []byte `serialize:"true"` + Limit uint16 `serialize:"true"` + NodeType NodeType `serialize:"true"` } func (l LeafsRequest) String() string { return fmt.Sprintf( - "LeafsRequest(Root=%s, Account=%s, Start=%s, End %s, Limit=%d)", - l.Root, l.Account, common.Bytes2Hex(l.Start), common.Bytes2Hex(l.End), l.Limit, + "LeafsRequest(Root=%s, Account=%s, Start=%s, End=%s, Limit=%d, NodeType=%d)", + l.Root, l.Account, common.Bytes2Hex(l.Start), common.Bytes2Hex(l.End), l.Limit, l.NodeType, ) } func (l LeafsRequest) Handle(ctx context.Context, nodeID ids.NodeID, requestID uint32, handler RequestHandler) ([]byte, error) { - return handler.HandleStateTrieLeafsRequest(ctx, nodeID, requestID, l) + return handler.HandleLeafsRequest(ctx, nodeID, requestID, l) } // LeafsResponse is a response to a LeafsRequest diff --git a/plugin/evm/message/leafs_request_test.go b/plugin/evm/message/leafs_request_test.go index b4754402b5..4b28cdb53f 100644 --- a/plugin/evm/message/leafs_request_test.go +++ b/plugin/evm/message/leafs_request_test.go @@ -4,14 +4,10 @@ package message import ( - "bytes" - "context" "encoding/base64" "math/rand" "testing" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/libevm/common" "github.com/stretchr/testify/assert" ) @@ -106,62 +102,3 @@ func TestMarshalLeafsResponse(t *testing.T) { assert.False(t, l.More) // make sure it is not serialized assert.Equal(t, leafsResponse.ProofVals, l.ProofVals) } - -func TestLeafsRequestValidation(t *testing.T) { - mockRequestHandler := &mockHandler{} - - tests := map[string]struct { - request LeafsRequest - assertResponse func(t *testing.T) - }{ - "node type StateTrieNode": { - request: LeafsRequest{ - Root: common.BytesToHash([]byte("some hash goes here")), - Start: bytes.Repeat([]byte{0x00}, common.HashLength), - End: bytes.Repeat([]byte{0xff}, common.HashLength), - Limit: 10, - }, - assertResponse: func(t *testing.T) { - assert.True(t, mockRequestHandler.handleStateTrieCalled) - assert.False(t, mockRequestHandler.handleBlockRequestCalled) - assert.False(t, mockRequestHandler.handleCodeRequestCalled) - }, - }, - } - for name, test := range tests { - t.Run(name, func(t *testing.T) { - _, _ = test.request.Handle(context.Background(), ids.GenerateTestNodeID(), 1, mockRequestHandler) - test.assertResponse(t) - mockRequestHandler.reset() - }) - } -} - -var _ RequestHandler = (*mockHandler)(nil) - -type mockHandler struct { - handleStateTrieCalled, - handleBlockRequestCalled, - handleCodeRequestCalled bool -} - -func (m *mockHandler) HandleStateTrieLeafsRequest(context.Context, ids.NodeID, uint32, LeafsRequest) ([]byte, error) { - m.handleStateTrieCalled = true - return nil, nil -} - -func (m *mockHandler) HandleBlockRequest(context.Context, ids.NodeID, uint32, BlockRequest) ([]byte, error) { - m.handleBlockRequestCalled = true - return nil, nil -} - -func (m *mockHandler) HandleCodeRequest(context.Context, ids.NodeID, uint32, CodeRequest) ([]byte, error) { - m.handleCodeRequestCalled = true - return nil, nil -} - -func (m *mockHandler) reset() { - m.handleStateTrieCalled = false - m.handleBlockRequestCalled = false - m.handleCodeRequestCalled = false -} diff --git a/plugin/evm/message/syncable.go b/plugin/evm/message/syncable.go index a2b77ad1e1..3a3690b8f9 100644 --- a/plugin/evm/message/syncable.go +++ b/plugin/evm/message/syncable.go @@ -4,88 +4,19 @@ package message import ( - "context" - "fmt" - - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/crypto" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" ) -var _ block.StateSummary = (*SyncSummary)(nil) - -// SyncSummary provides the information necessary to sync a node starting -// at the given block. -type SyncSummary struct { - BlockNumber uint64 `serialize:"true"` - BlockHash common.Hash `serialize:"true"` - BlockRoot common.Hash `serialize:"true"` - - summaryID ids.ID - bytes []byte - acceptImpl func(SyncSummary) (block.StateSyncMode, error) +type Syncable interface { + block.StateSummary + GetBlockHash() common.Hash + GetBlockRoot() common.Hash } -func NewSyncSummaryFromBytes(summaryBytes []byte, acceptImpl func(SyncSummary) (block.StateSyncMode, error)) (SyncSummary, error) { - summary := SyncSummary{} - if codecVersion, err := Codec.Unmarshal(summaryBytes, &summary); err != nil { - return SyncSummary{}, err - } else if codecVersion != Version { - return SyncSummary{}, fmt.Errorf("failed to parse syncable summary due to unexpected codec version (%d != %d)", codecVersion, Version) - } - - summary.bytes = summaryBytes - summaryID, err := ids.ToID(crypto.Keccak256(summaryBytes)) - if err != nil { - return SyncSummary{}, err - } - summary.summaryID = summaryID - summary.acceptImpl = acceptImpl - return summary, nil +type SyncableParser interface { + Parse(summaryBytes []byte, acceptImpl AcceptImplFn) (Syncable, error) } -func NewSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot common.Hash) (SyncSummary, error) { - summary := SyncSummary{ - BlockNumber: blockNumber, - BlockHash: blockHash, - BlockRoot: blockRoot, - } - bytes, err := Codec.Marshal(Version, &summary) - if err != nil { - return SyncSummary{}, err - } - - summary.bytes = bytes - summaryID, err := ids.ToID(crypto.Keccak256(bytes)) - if err != nil { - return SyncSummary{}, err - } - summary.summaryID = summaryID - - return summary, nil -} - -func (s SyncSummary) Bytes() []byte { - return s.bytes -} - -func (s SyncSummary) Height() uint64 { - return s.BlockNumber -} - -func (s SyncSummary) ID() ids.ID { - return s.summaryID -} - -func (s SyncSummary) String() string { - return fmt.Sprintf("SyncSummary(BlockHash=%s, BlockNumber=%d, BlockRoot=%s)", s.BlockHash, s.BlockNumber, s.BlockRoot) -} - -func (s SyncSummary) Accept(context.Context) (block.StateSyncMode, error) { - if s.acceptImpl == nil { - return block.StateSyncSkipped, fmt.Errorf("accept implementation not specified for summary: %s", s) - } - return s.acceptImpl(s) -} +type AcceptImplFn func(Syncable) (block.StateSyncMode, error) diff --git a/plugin/evm/network_handler.go b/plugin/evm/network_handler.go index c7ff5e0bac..f4fa081d44 100644 --- a/plugin/evm/network_handler.go +++ b/plugin/evm/network_handler.go @@ -9,40 +9,55 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/libevm/ethdb" - "github.com/ava-labs/libevm/metrics" + "github.com/ava-labs/libevm/log" "github.com/ava-labs/libevm/triedb" "github.com/ava-labs/subnet-evm/plugin/evm/message" syncHandlers "github.com/ava-labs/subnet-evm/sync/handlers" - syncStats "github.com/ava-labs/subnet-evm/sync/handlers/stats" + "github.com/ava-labs/subnet-evm/sync/handlers/stats" "github.com/ava-labs/subnet-evm/warp" ) var _ message.RequestHandler = (*networkHandler)(nil) +type LeafHandlers map[message.NodeType]syncHandlers.LeafRequestHandler + type networkHandler struct { - leafRequestHandler *syncHandlers.LeafsRequestHandler + leafRequestHandlers LeafHandlers blockRequestHandler *syncHandlers.BlockRequestHandler codeRequestHandler *syncHandlers.CodeRequestHandler } +type LeafRequestTypeConfig struct { + NodeType message.NodeType + NodeKeyLen int + TrieDB *triedb.Database + UseSnapshots bool + MetricName string +} + // newNetworkHandler constructs the handler for serving network requests. func newNetworkHandler( provider syncHandlers.SyncDataProvider, diskDB ethdb.KeyValueReader, - evmTrieDB *triedb.Database, warpBackend warp.Backend, networkCodec codec.Manager, -) message.RequestHandler { - syncStats := syncStats.NewHandlerStats(metrics.Enabled) + leafRequestHandlers LeafHandlers, + syncStats stats.HandlerStats, +) *networkHandler { return &networkHandler{ - leafRequestHandler: syncHandlers.NewLeafsRequestHandler(evmTrieDB, nil, networkCodec, syncStats), + leafRequestHandlers: leafRequestHandlers, blockRequestHandler: syncHandlers.NewBlockRequestHandler(provider, networkCodec, syncStats), codeRequestHandler: syncHandlers.NewCodeRequestHandler(diskDB, networkCodec, syncStats), } } -func (n networkHandler) HandleStateTrieLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) { - return n.leafRequestHandler.OnLeafsRequest(ctx, nodeID, requestID, leafsRequest) +func (n networkHandler) HandleLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) { + handler, ok := n.leafRequestHandlers[leafsRequest.NodeType] + if !ok { + log.Debug("node type is not recognised, dropping request", "nodeID", nodeID, "requestID", requestID, "nodeType", leafsRequest.NodeType) + return nil, nil + } + return handler.OnLeafsRequest(ctx, nodeID, requestID, leafsRequest) } func (n networkHandler) HandleBlockRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, blockRequest message.BlockRequest) ([]byte, error) { diff --git a/plugin/evm/syncervm_client.go b/plugin/evm/sync/client.go similarity index 56% rename from plugin/evm/syncervm_client.go rename to plugin/evm/sync/client.go index a8a320e368..dc1e70c56b 100644 --- a/plugin/evm/syncervm_client.go +++ b/plugin/evm/sync/client.go @@ -1,83 +1,109 @@ // Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package sync import ( "context" "fmt" "sync" + syncclient "github.com/ava-labs/subnet-evm/sync/client" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" + commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/vms/components/chain" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/core/types" "github.com/ava-labs/libevm/ethdb" "github.com/ava-labs/libevm/log" "github.com/ava-labs/subnet-evm/core/state/snapshot" "github.com/ava-labs/subnet-evm/eth" "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/plugin/evm/message" - syncclient "github.com/ava-labs/subnet-evm/sync/client" "github.com/ava-labs/subnet-evm/sync/statesync" ) -const ( - // State sync fetches [parentsToGet] parents of the block it syncs to. - // The last 256 block hashes are necessary to support the BLOCKHASH opcode. - parentsToGet = 256 -) +// ParentsToFetch is the number of the block parents the state syncs to. +// The last 256 block hashes are necessary to support the BLOCKHASH opcode. +const ParentsToFetch = 256 var stateSyncSummaryKey = []byte("stateSyncSummary") -// stateSyncClientConfig defines the options and dependencies needed to construct a StateSyncerClient -type stateSyncClientConfig struct { - enabled bool - skipResume bool +type BlockAcceptor interface { + PutLastAcceptedID(ids.ID) error +} + +// EthBlockWrapper can be implemented by a concrete block wrapper type to +// return *types.Block, which is needed to update chain pointers at the +// end of the sync operation. +type EthBlockWrapper interface { + GetEthBlock() *types.Block +} + +// Extender is an interface that allows for extending the state sync process. +type Extender interface { + // Sync is called to perform any extension-specific state sync logic. + Sync(ctx context.Context, client syncclient.LeafClient, verdb *versiondb.Database, syncSummary message.Syncable) error + // OnFinishBeforeCommit is called after the state sync process has completed but before the state sync summary is committed. + OnFinishBeforeCommit(lastAcceptedHeight uint64, syncSummary message.Syncable) error + // OnFinishAfterCommit is called after the state sync process has completed and the state sync summary is committed. + OnFinishAfterCommit(summaryHeight uint64) error +} + +// ClientConfig defines the options and dependencies needed to construct a Client +type ClientConfig struct { + Enabled bool + SkipResume bool // Specifies the number of blocks behind the latest state summary that the chain must be // in order to prefer performing state sync over falling back to the normal bootstrapping // algorithm. - stateSyncMinBlocks uint64 - stateSyncRequestSize uint16 // number of key/value pairs to ask peers for per request + MinBlocks uint64 + RequestSize uint16 // number of key/value pairs to ask peers for per request + + LastAcceptedHeight uint64 + + Chain *eth.Ethereum + State *chain.State + ChaindDB ethdb.Database + Acceptor BlockAcceptor + VerDB *versiondb.Database + MetadataDB database.Database - lastAcceptedHeight uint64 + // Extension points + Parser message.SyncableParser + // Extender is an optional extension point for the state sync process, and can be nil. + Extender Extender - chain *eth.Ethereum - state *chain.State - chaindb ethdb.Database - metadataDB database.Database - acceptedBlockDB database.Database - db *versiondb.Database + Client syncclient.Client - client syncclient.Client - stateSyncDone chan struct{} + ToEngine chan<- commonEng.Message } -type stateSyncerClient struct { - *stateSyncClientConfig +type client struct { + *ClientConfig - resumableSummary message.SyncSummary + resumableSummary message.Syncable cancel context.CancelFunc wg sync.WaitGroup // State Sync results - syncSummary message.SyncSummary - stateSyncErr error - stateSyncDone chan struct{} + summary message.Syncable + err error } -func NewStateSyncClient(config *stateSyncClientConfig) StateSyncClient { - return &stateSyncerClient{ - stateSyncDone: config.stateSyncDone, - stateSyncClientConfig: config, +func NewClient(config *ClientConfig) Client { + return &client{ + ClientConfig: config, } } -type StateSyncClient interface { +type Client interface { // methods that implement the client side of [block.StateSyncableVM] StateSyncEnabled(context.Context) (bool, error) GetOngoingSyncStateSummary(context.Context) (block.StateSummary, error) @@ -95,28 +121,28 @@ type StateSyncClient interface { // Error returns an error if any was encountered. type Syncer interface { Start(ctx context.Context) error - Wait(ctx context.Context) error + Done() <-chan error } // StateSyncEnabled returns [client.enabled], which is set in the chain's config file. -func (client *stateSyncerClient) StateSyncEnabled(context.Context) (bool, error) { - return client.enabled, nil +func (client *client) StateSyncEnabled(context.Context) (bool, error) { + return client.Enabled, nil } // GetOngoingSyncStateSummary returns a state summary that was previously started // and not finished, and sets [resumableSummary] if one was found. // Returns [database.ErrNotFound] if no ongoing summary is found or if [client.skipResume] is true. -func (client *stateSyncerClient) GetOngoingSyncStateSummary(context.Context) (block.StateSummary, error) { - if client.skipResume { +func (client *client) GetOngoingSyncStateSummary(context.Context) (block.StateSummary, error) { + if client.SkipResume { return nil, database.ErrNotFound } - summaryBytes, err := client.metadataDB.Get(stateSyncSummaryKey) + summaryBytes, err := client.MetadataDB.Get(stateSyncSummaryKey) if err != nil { return nil, err // includes the [database.ErrNotFound] case } - summary, err := message.NewSyncSummaryFromBytes(summaryBytes, client.acceptSyncSummary) + summary, err := client.Parser.Parse(summaryBytes, client.acceptSyncSummary) if err != nil { return nil, fmt.Errorf("failed to parse saved state sync summary to SyncSummary: %w", err) } @@ -125,11 +151,11 @@ func (client *stateSyncerClient) GetOngoingSyncStateSummary(context.Context) (bl } // ClearOngoingSummary clears any marker of an ongoing state sync summary -func (client *stateSyncerClient) ClearOngoingSummary() error { - if err := client.metadataDB.Delete(stateSyncSummaryKey); err != nil { +func (client *client) ClearOngoingSummary() error { + if err := client.MetadataDB.Delete(stateSyncSummaryKey); err != nil { return fmt.Errorf("failed to clear ongoing summary: %w", err) } - if err := client.db.Commit(); err != nil { + if err := client.VerDB.Commit(); err != nil { return fmt.Errorf("failed to commit db while clearing ongoing summary: %w", err) } @@ -137,64 +163,70 @@ func (client *stateSyncerClient) ClearOngoingSummary() error { } // ParseStateSummary parses [summaryBytes] to [commonEng.Summary] -func (client *stateSyncerClient) ParseStateSummary(_ context.Context, summaryBytes []byte) (block.StateSummary, error) { - return message.NewSyncSummaryFromBytes(summaryBytes, client.acceptSyncSummary) +func (client *client) ParseStateSummary(_ context.Context, summaryBytes []byte) (block.StateSummary, error) { + return client.Parser.Parse(summaryBytes, client.acceptSyncSummary) } // stateSync blockingly performs the state sync for the EVM state and the atomic state // to [client.syncSummary]. returns an error if one occurred. -func (client *stateSyncerClient) stateSync(ctx context.Context) error { - if err := client.syncBlocks(ctx, client.syncSummary.BlockHash, client.syncSummary.BlockNumber, parentsToGet); err != nil { +func (client *client) stateSync(ctx context.Context) error { + if err := client.syncBlocks(ctx, client.summary.GetBlockHash(), client.summary.Height(), ParentsToFetch); err != nil { return err } // Sync the EVM trie. - return client.syncStateTrie(ctx) + if err := client.syncStateTrie(ctx); err != nil { + return err + } + + if client.Extender != nil { + return client.Extender.Sync(ctx, client.Client, client.VerDB, client.summary) + } + return nil } // acceptSyncSummary returns true if sync will be performed and launches the state sync process // in a goroutine. -func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncSummary) (block.StateSyncMode, error) { - isResume := proposedSummary.BlockHash == client.resumableSummary.BlockHash +func (client *client) acceptSyncSummary(proposedSummary message.Syncable) (block.StateSyncMode, error) { + isResume := client.resumableSummary != nil && + proposedSummary.GetBlockHash() == client.resumableSummary.GetBlockHash() if !isResume { // Skip syncing if the blockchain is not significantly ahead of local state, // since bootstrapping would be faster. // (Also ensures we don't sync to a height prior to local state.) - if client.lastAcceptedHeight+client.stateSyncMinBlocks > proposedSummary.Height() { + if client.LastAcceptedHeight+client.MinBlocks > proposedSummary.Height() { log.Info( "last accepted too close to most recent syncable block, skipping state sync", - "lastAccepted", client.lastAcceptedHeight, + "lastAccepted", client.LastAcceptedHeight, "syncableHeight", proposedSummary.Height(), ) return block.StateSyncSkipped, nil } - // Wipe the snapshot completely if we are not resuming from an existing sync, so that we do not // use a corrupted snapshot. // Note: this assumes that when the node is started with state sync disabled, the in-progress state // sync marker will be wiped, so we do not accidentally resume progress from an incorrect version // of the snapshot. (if switching between versions that come before this change and back this could // lead to the snapshot not being cleaned up correctly) - <-snapshot.WipeSnapshot(client.chaindb, true) + <-snapshot.WipeSnapshot(client.ChaindDB, true) // Reset the snapshot generator here so that when state sync completes, snapshots will not attempt to read an // invalid generator. // Note: this must be called after WipeSnapshot is called so that we do not invalidate a partially generated snapshot. - snapshot.ResetSnapshotGeneration(client.chaindb) + snapshot.ResetSnapshotGeneration(client.ChaindDB) } - client.syncSummary = proposedSummary + client.summary = proposedSummary // Update the current state sync summary key in the database // Note: this must be performed after WipeSnapshot finishes so that we do not start a state sync // session from a partially wiped snapshot. - if err := client.metadataDB.Put(stateSyncSummaryKey, proposedSummary.Bytes()); err != nil { + if err := client.MetadataDB.Put(stateSyncSummaryKey, proposedSummary.Bytes()); err != nil { return block.StateSyncSkipped, fmt.Errorf("failed to write state sync summary key to disk: %w", err) } - if err := client.db.Commit(); err != nil { + if err := client.VerDB.Commit(); err != nil { return block.StateSyncSkipped, fmt.Errorf("failed to commit db: %w", err) } log.Info("Starting state sync", "summary", proposedSummary) - // create a cancellable ctx for the state sync goroutine ctx, cancel := context.WithCancel(context.Background()) client.cancel = cancel @@ -204,15 +236,15 @@ func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncS defer cancel() if err := client.stateSync(ctx); err != nil { - client.stateSyncErr = err + client.err = err } else { - client.stateSyncErr = client.finishSync() + client.err = client.finishSync() } // notify engine regardless of whether err == nil, // this error will be propagated to the engine when it calls // vm.SetState(snow.Bootstrapping) - log.Info("stateSync completed, notifying engine", "err", client.stateSyncErr) - close(client.stateSyncDone) + log.Info("stateSync completed, notifying engine", "err", client.err) + client.ToEngine <- commonEng.StateSyncDone }() return block.StateSyncStatic, nil } @@ -221,7 +253,7 @@ func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncS // using [client] and writes them to disk. // the process begins with [fromHash] and it fetches parents recursively. // fetching starts from the first ancestor not found on disk -func (client *stateSyncerClient) syncBlocks(ctx context.Context, fromHash common.Hash, fromHeight uint64, parentsToGet int) error { +func (client *client) syncBlocks(ctx context.Context, fromHash common.Hash, fromHeight uint64, parentsToGet int) error { nextHash := fromHash nextHeight := fromHeight parentsPerRequest := uint16(32) @@ -229,7 +261,7 @@ func (client *stateSyncerClient) syncBlocks(ctx context.Context, fromHash common // first, check for blocks already available on disk so we don't // request them from peers. for parentsToGet >= 0 { - blk := rawdb.ReadBlock(client.chaindb, nextHash, nextHeight) + blk := rawdb.ReadBlock(client.ChaindDB, nextHash, nextHeight) if blk != nil { // block exists nextHash = blk.ParentHash() @@ -237,19 +269,18 @@ func (client *stateSyncerClient) syncBlocks(ctx context.Context, fromHash common parentsToGet-- continue } - // block was not found break } // get any blocks we couldn't find on disk from peers and write // them to disk. - batch := client.chaindb.NewBatch() + batch := client.ChaindDB.NewBatch() for i := parentsToGet - 1; i >= 0 && (nextHash != common.Hash{}); { if err := ctx.Err(); err != nil { return err } - blocks, err := client.client.GetBlocks(ctx, nextHash, nextHeight, parentsPerRequest) + blocks, err := client.Client.GetBlocks(ctx, nextHash, nextHeight, parentsPerRequest) if err != nil { log.Error("could not get blocks from peer", "err", err, "nextHash", nextHash, "remaining", i+1) return err @@ -257,7 +288,6 @@ func (client *stateSyncerClient) syncBlocks(ctx context.Context, fromHash common for _, block := range blocks { rawdb.WriteBlock(batch, block) rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64()) - i-- nextHash = block.ParentHash() nextHeight-- @@ -268,16 +298,16 @@ func (client *stateSyncerClient) syncBlocks(ctx context.Context, fromHash common return batch.Write() } -func (client *stateSyncerClient) syncStateTrie(ctx context.Context) error { - log.Info("state sync: sync starting", "root", client.syncSummary.BlockRoot) +func (client *client) syncStateTrie(ctx context.Context) error { + log.Info("state sync: sync starting", "root", client.summary.GetBlockRoot()) evmSyncer, err := statesync.NewStateSyncer(&statesync.StateSyncerConfig{ - Client: client.client, - Root: client.syncSummary.BlockRoot, + Client: client.Client, + Root: client.summary.GetBlockRoot(), BatchSize: ethdb.IdealBatchSize, - DB: client.chaindb, + DB: client.ChaindDB, MaxOutstandingCodeHashes: statesync.DefaultMaxOutstandingCodeHashes, NumCodeFetchingWorkers: statesync.DefaultNumCodeFetchingWorkers, - RequestSize: client.stateSyncRequestSize, + RequestSize: client.RequestSize, }) if err != nil { return err @@ -285,12 +315,12 @@ func (client *stateSyncerClient) syncStateTrie(ctx context.Context) error { if err := evmSyncer.Start(ctx); err != nil { return err } - err = evmSyncer.Wait(ctx) - log.Info("state sync: sync finished", "root", client.syncSummary.BlockRoot, "err", err) + err = <-evmSyncer.Done() + log.Info("state sync: sync finished", "root", client.summary.GetBlockRoot(), "err", err) return err } -func (client *stateSyncerClient) Shutdown() error { +func (client *client) Shutdown() error { if client.cancel != nil { client.cancel() } @@ -299,29 +329,31 @@ func (client *stateSyncerClient) Shutdown() error { } // finishSync is responsible for updating disk and memory pointers so the VM is prepared -// for bootstrapping. -func (client *stateSyncerClient) finishSync() error { - stateBlock, err := client.state.GetBlock(context.TODO(), ids.ID(client.syncSummary.BlockHash)) +// for bootstrapping. Executes any shared memory operations from the atomic trie to shared memory. +func (client *client) finishSync() error { + stateBlock, err := client.State.GetBlock(context.TODO(), ids.ID(client.summary.GetBlockHash())) if err != nil { - return fmt.Errorf("could not get block by hash from client state: %s", client.syncSummary.BlockHash) + return fmt.Errorf("could not get block by hash from client state: %s", client.summary.GetBlockHash()) } wrapper, ok := stateBlock.(*chain.BlockWrapper) if !ok { return fmt.Errorf("could not convert block(%T) to *chain.BlockWrapper", wrapper) } - evmBlock, ok := wrapper.Block.(*Block) + wrappedBlock := wrapper.Block + + evmBlockGetter, ok := wrappedBlock.(EthBlockWrapper) if !ok { - return fmt.Errorf("could not convert block(%T) to evm.Block", stateBlock) + return fmt.Errorf("could not convert block(%T) to evm.EthBlockWrapper", stateBlock) } - block := evmBlock.ethBlock + block := evmBlockGetter.GetEthBlock() - if block.Hash() != client.syncSummary.BlockHash { - return fmt.Errorf("attempted to set last summary block to unexpected block hash: (%s != %s)", block.Hash(), client.syncSummary.BlockHash) + if block.Hash() != client.summary.GetBlockHash() { + return fmt.Errorf("attempted to set last summary block to unexpected block hash: (%s != %s)", block.Hash(), client.summary.GetBlockHash()) } - if block.NumberU64() != client.syncSummary.BlockNumber { - return fmt.Errorf("attempted to set last summary block to unexpected block number: (%d != %d)", block.NumberU64(), client.syncSummary.BlockNumber) + if block.NumberU64() != client.summary.Height() { + return fmt.Errorf("attempted to set last summary block to unexpected block number: (%d != %d)", block.NumberU64(), client.summary.Height()) } // BloomIndexer needs to know that some parts of the chain are not available @@ -334,32 +366,50 @@ func (client *stateSyncerClient) finishSync() error { // by [params.BloomBitsBlocks]. parentHeight := block.NumberU64() - 1 parentHash := block.ParentHash() - client.chain.BloomIndexer().AddCheckpoint(parentHeight/params.BloomBitsBlocks, parentHash) + client.Chain.BloomIndexer().AddCheckpoint(parentHeight/params.BloomBitsBlocks, parentHash) - if err := client.chain.BlockChain().ResetToStateSyncedBlock(block); err != nil { + if err := client.Chain.BlockChain().ResetToStateSyncedBlock(block); err != nil { return err } - if err := client.updateVMMarkers(); err != nil { + if client.Extender != nil { + if err := client.Extender.OnFinishBeforeCommit(client.LastAcceptedHeight, client.summary); err != nil { + return err + } + } + + if err := client.commitVMMarkers(); err != nil { return fmt.Errorf("error updating vm markers, height=%d, hash=%s, err=%w", block.NumberU64(), block.Hash(), err) } - return client.state.SetLastAcceptedBlock(evmBlock) + if err := client.State.SetLastAcceptedBlock(wrappedBlock); err != nil { + return err + } + + if client.Extender != nil { + return client.Extender.OnFinishAfterCommit(block.NumberU64()) + } + + return nil } -// updateVMMarkers updates the following markers in the VM's database +// commitVMMarkers updates the following markers in the VM's database // and commits them atomically: // - updates lastAcceptedKey // - removes state sync progress markers -func (client *stateSyncerClient) updateVMMarkers() error { - if err := client.acceptedBlockDB.Put(lastAcceptedKey, client.syncSummary.BlockHash[:]); err != nil { +func (client *client) commitVMMarkers() error { + // Mark the previously last accepted block for the shared memory cursor, so that we will execute shared + // memory operations from the previously last accepted block to [vm.syncSummary] when ApplyToSharedMemory + // is called. + id := ids.ID(client.summary.GetBlockHash()) + if err := client.Acceptor.PutLastAcceptedID(id); err != nil { return err } - if err := client.metadataDB.Delete(stateSyncSummaryKey); err != nil { + if err := client.MetadataDB.Delete(stateSyncSummaryKey); err != nil { return err } - return client.db.Commit() + return client.VerDB.Commit() } // Error returns a non-nil error if one occurred during the sync. -func (client *stateSyncerClient) Error() error { return client.stateSyncErr } +func (client *client) Error() error { return client.err } diff --git a/plugin/evm/syncervm_server.go b/plugin/evm/sync/server.go similarity index 61% rename from plugin/evm/syncervm_server.go rename to plugin/evm/sync/server.go index 6146c15c97..2275011ced 100644 --- a/plugin/evm/syncervm_server.go +++ b/plugin/evm/sync/server.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package sync import ( "context" @@ -12,57 +12,41 @@ import ( "github.com/ava-labs/libevm/log" "github.com/ava-labs/subnet-evm/core" - "github.com/ava-labs/subnet-evm/plugin/evm/message" + + "github.com/ava-labs/libevm/core/types" ) -type stateSyncServerConfig struct { - Chain *core.BlockChain +var errProviderNotSet = fmt.Errorf("provider not set") - // SyncableInterval is the interval at which blocks are eligible to provide syncable block summaries. - SyncableInterval uint64 +type SummaryProvider interface { + StateSummaryAtBlock(ethBlock *types.Block) (block.StateSummary, error) } -type stateSyncServer struct { +type server struct { chain *core.BlockChain + provider SummaryProvider syncableInterval uint64 } -type StateSyncServer interface { +type Server interface { GetLastStateSummary(context.Context) (block.StateSummary, error) GetStateSummary(context.Context, uint64) (block.StateSummary, error) } -func NewStateSyncServer(config *stateSyncServerConfig) StateSyncServer { - return &stateSyncServer{ - chain: config.Chain, - syncableInterval: config.SyncableInterval, - } -} - -// stateSummaryAtHeight returns the SyncSummary at [height] if valid and available. -func (server *stateSyncServer) stateSummaryAtHeight(height uint64) (message.SyncSummary, error) { - blk := server.chain.GetBlockByNumber(height) - if blk == nil { - return message.SyncSummary{}, fmt.Errorf("block not found for height (%d)", height) - } - - if !server.chain.HasState(blk.Root()) { - return message.SyncSummary{}, fmt.Errorf("block root does not exist for height (%d), root (%s)", height, blk.Root()) - } - - summary, err := message.NewSyncSummary(blk.Hash(), height, blk.Root()) - if err != nil { - return message.SyncSummary{}, fmt.Errorf("failed to construct syncable block at height %d: %w", height, err) +func NewServer(chain *core.BlockChain, provider SummaryProvider, syncableInterval uint64) Server { + return &server{ + chain: chain, + syncableInterval: syncableInterval, + provider: provider, } - return summary, nil } // GetLastStateSummary returns the latest state summary. // State summary is calculated by the block nearest to last accepted // that is divisible by [syncableInterval] // If no summary is available, [database.ErrNotFound] must be returned. -func (server *stateSyncServer) GetLastStateSummary(context.Context) (block.StateSummary, error) { +func (server *server) GetLastStateSummary(context.Context) (block.StateSummary, error) { lastHeight := server.chain.LastAcceptedBlock().NumberU64() lastSyncSummaryNumber := lastHeight - lastHeight%server.syncableInterval @@ -78,20 +62,33 @@ func (server *stateSyncServer) GetLastStateSummary(context.Context) (block.State // GetStateSummary implements StateSyncableVM and returns a summary corresponding // to the provided [height] if the node can serve state sync data for that key. // If not, [database.ErrNotFound] must be returned. -func (server *stateSyncServer) GetStateSummary(_ context.Context, height uint64) (block.StateSummary, error) { +func (server *server) GetStateSummary(_ context.Context, height uint64) (block.StateSummary, error) { summaryBlock := server.chain.GetBlockByNumber(height) if summaryBlock == nil || summaryBlock.NumberU64() > server.chain.LastAcceptedBlock().NumberU64() || summaryBlock.NumberU64()%server.syncableInterval != 0 { return nil, database.ErrNotFound } - summary, err := server.stateSummaryAtHeight(summaryBlock.NumberU64()) if err != nil { log.Debug("could not get state summary", "height", height, "err", err) return nil, database.ErrNotFound } - log.Debug("Serving syncable block at requested height", "height", height, "summary", summary) return summary, nil } + +func (server *server) stateSummaryAtHeight(height uint64) (block.StateSummary, error) { + blk := server.chain.GetBlockByNumber(height) + if blk == nil { + return nil, fmt.Errorf("block not found for height (%d)", height) + } + + if !server.chain.HasState(blk.Root()) { + return nil, fmt.Errorf("block root does not exist for height (%d), root (%s)", height, blk.Root()) + } + if server.provider == nil { + return nil, errProviderNotSet + } + return server.provider.StateSummaryAtBlock(blk) +} diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 63d9228f78..18c45ee3eb 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -45,6 +45,8 @@ import ( statesyncclient "github.com/ava-labs/subnet-evm/sync/client" "github.com/ava-labs/subnet-evm/sync/statesync/statesynctest" "github.com/ava-labs/subnet-evm/utils/utilstest" + + syncervm "github.com/ava-labs/subnet-evm/plugin/evm/sync" ) func TestSkipStateSync(t *testing.T) { @@ -54,7 +56,7 @@ func TestSkipStateSync(t *testing.T) { stateSyncMinBlocks: 300, // must be greater than [syncableInterval] to skip sync syncMode: block.StateSyncSkipped, } - vmSetup := createSyncServerAndClientVMs(t, test, parentsToGet) + vmSetup := createSyncServerAndClientVMs(t, test, syncervm.ParentsToFetch) testSyncerVM(t, vmSetup, test) } @@ -66,14 +68,14 @@ func TestStateSyncFromScratch(t *testing.T) { stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync syncMode: block.StateSyncStatic, } - vmSetup := createSyncServerAndClientVMs(t, test, parentsToGet) + vmSetup := createSyncServerAndClientVMs(t, test, syncervm.ParentsToFetch) testSyncerVM(t, vmSetup, test) } func TestStateSyncFromScratchExceedParent(t *testing.T) { rand.New(rand.NewSource(1)) - numToGen := parentsToGet + uint64(32) + numToGen := syncervm.ParentsToFetch + uint64(32) test := syncTest{ syncableInterval: numToGen, stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync @@ -103,11 +105,8 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { if err := syncerVM.AppRequestFailed(context.Background(), nodeID, requestID, commonEng.ErrTimeout); err != nil { panic(err) } - cancel := syncerVM.StateSyncClient.(*stateSyncerClient).cancel - if cancel != nil { - cancel() - } else { - t.Fatal("state sync client not populated correctly") + if err := syncerVM.Client.Shutdown(); err != nil { + panic(err) } } else { syncerVM.AppResponse(context.Background(), nodeID, requestID, response) @@ -115,7 +114,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { }, expectedErr: context.Canceled, } - vmSetup := createSyncServerAndClientVMs(t, test, parentsToGet) + vmSetup := createSyncServerAndClientVMs(t, test, syncervm.ParentsToFetch) // Perform sync resulting in early termination. testSyncerVM(t, vmSetup, test) @@ -161,7 +160,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { t.Fatalf("Unexpected last accepted height: %d", height) } - enabled, err := syncDisabledVM.StateSyncEnabled(context.Background()) + enabled, err := syncDisabledVM.Client.StateSyncEnabled(context.Background()) assert.NoError(t, err) assert.False(t, enabled, "sync should be disabled") @@ -266,7 +265,7 @@ func TestVMShutdownWhileSyncing(t *testing.T) { }, expectedErr: context.Canceled, } - vmSetup = createSyncServerAndClientVMs(t, test, parentsToGet) + vmSetup = createSyncServerAndClientVMs(t, test, syncervm.ParentsToFetch) // Perform sync resulting in early termination. testSyncerVM(t, vmSetup, test) } @@ -310,9 +309,6 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s require.NoError(err) require.NoError(serverVM.vm.State.SetLastAcceptedBlock(internalBlock)) - // patch syncableInterval for test - serverVM.vm.StateSyncServer.(*stateSyncServer).syncableInterval = test.syncableInterval - // initialise [syncerVM] with blank genesis state stateSyncEnabledJSON := fmt.Sprintf(`{"state-sync-enabled":true, "state-sync-min-blocks": %d, "tx-lookup-limit": %d}`, test.stateSyncMinBlocks, 4) syncerVM := newVM(t, testVMConfig{ @@ -430,7 +426,7 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { require.Equal(msg, commonEng.StateSyncDone) // If the test is expected to error, assert the correct error is returned and finish the test. - err = syncerVM.StateSyncClient.Error() + err = syncerVM.Client.Error() if test.expectedErr != nil { require.ErrorIs(err, test.expectedErr) // Note we re-open the database here to avoid a closed error when the test is for a shutdown VM. diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 5e7fe7a821..ef2dd30855 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -22,6 +22,7 @@ import ( "github.com/ava-labs/avalanchego/network/p2p/acp118" "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/firewood-go-ethhash/ffi" + "github.com/ava-labs/subnet-evm/plugin/evm/extension" "github.com/prometheus/client_golang/prometheus" avalanchegoprometheus "github.com/ava-labs/avalanchego/vms/evm/metrics/prometheus" @@ -46,6 +47,7 @@ import ( "github.com/ava-labs/subnet-evm/plugin/evm/customrawdb" subnetevmlog "github.com/ava-labs/subnet-evm/plugin/evm/log" "github.com/ava-labs/subnet-evm/plugin/evm/message" + vmsync "github.com/ava-labs/subnet-evm/plugin/evm/sync" "github.com/ava-labs/subnet-evm/plugin/evm/validators" "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" "github.com/ava-labs/subnet-evm/triedb/hashdb" @@ -54,6 +56,8 @@ import ( "github.com/ava-labs/subnet-evm/rpc" statesyncclient "github.com/ava-labs/subnet-evm/sync/client" "github.com/ava-labs/subnet-evm/sync/client/stats" + "github.com/ava-labs/subnet-evm/sync/handlers" + handlerstats "github.com/ava-labs/subnet-evm/sync/handlers/stats" "github.com/ava-labs/subnet-evm/warp" // Force-load tracer engine to trigger registration @@ -85,7 +89,6 @@ import ( "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/chain" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" @@ -187,6 +190,9 @@ type VM struct { chainConfig *params.ChainConfig ethConfig ethconfig.Config + // Extension Points + extensionConfig *extension.Config + // pointers to eth constructs eth *eth.Ethereum txPool *txpool.TxPool @@ -243,8 +249,8 @@ type VM struct { logger subnetevmlog.Logger // State sync server and client - StateSyncServer - StateSyncClient + vmsync.Server + vmsync.Client // Avalanche Warp Messaging backend // Used to serve BLS signatures of warp messages over RPC @@ -350,6 +356,17 @@ func (vm *VM) Initialize( // The NetworkId here is kept same as ChainID to be compatible with // Ethereum tooling. vm.ethConfig.NetworkId = g.Config.ChainID.Uint64() + // create genesisHash after applying upgradeBytes in case + // upgradeBytes modifies genesis. + vm.genesisHash = vm.ethConfig.Genesis.ToBlock().Hash() // must create genesis hash before [vm.readLastAccepted] + lastAcceptedHash, lastAcceptedHeight, err := vm.readLastAccepted() + if err != nil { + return err + } + log.Info("read last accepted", + "hash", lastAcceptedHash, + "height", lastAcceptedHeight, + ) // Set minimum price for mining and default gas price oracle value to the min // gas price to prevent so transactions and blocks all use the correct fees @@ -381,7 +398,7 @@ func (vm *VM) Initialize( vm.ethConfig.PopulateMissingTries = vm.config.PopulateMissingTries vm.ethConfig.PopulateMissingTriesParallelism = vm.config.PopulateMissingTriesParallelism vm.ethConfig.AllowMissingTries = vm.config.AllowMissingTries - vm.ethConfig.SnapshotDelayInit = vm.config.StateSyncEnabled + vm.ethConfig.SnapshotDelayInit = vm.stateSyncEnabled(lastAcceptedHeight) vm.ethConfig.SnapshotWait = vm.config.SnapshotWait vm.ethConfig.SnapshotVerify = vm.config.SnapshotVerify vm.ethConfig.HistoricalProofQueryWindow = vm.config.HistoricalProofQueryWindow @@ -410,7 +427,7 @@ func (vm *VM) Initialize( if vm.config.OfflinePruning { return errors.New("Offline pruning is not supported for Firewood") } - if vm.config.StateSyncEnabled { + if vm.config.StateSyncEnabled == nil || *vm.config.StateSyncEnabled { return errors.New("State sync is not yet supported for Firewood") } } @@ -439,18 +456,6 @@ func (vm *VM) Initialize( vm.chainConfig = g.Config - // create genesisHash after applying upgradeBytes in case - // upgradeBytes modifies genesis. - vm.genesisHash = vm.ethConfig.Genesis.ToBlock().Hash() // must create genesis hash before [vm.readLastAccepted] - lastAcceptedHash, lastAcceptedHeight, err := vm.readLastAccepted() - if err != nil { - return err - } - log.Info("read last accepted", - "hash", lastAcceptedHash, - "height", lastAcceptedHeight, - ) - vm.networkCodec = message.Codec vm.Network, err = network.NewNetwork(vm.ctx, appSender, vm.networkCodec, vm.config.MaxOutboundActiveRequests, vm.sdkMetrics) if err != nil { @@ -505,13 +510,7 @@ func (vm *VM) Initialize( warpHandler := acp118.NewCachedHandler(meteredCache, vm.warpBackend, vm.ctx.WarpSigner) vm.Network.AddHandler(p2p.SignatureRequestHandlerID, warpHandler) - vm.setAppRequestHandlers() - - vm.StateSyncServer = NewStateSyncServer(&stateSyncServerConfig{ - Chain: vm.blockChain, - SyncableInterval: vm.config.StateSyncCommitInterval, - }) - return vm.initializeStateSyncClient(lastAcceptedHeight) + return vm.initializeStateSync(lastAcceptedHeight) } func parseGenesis(ctx *snow.Context, genesisBytes []byte, upgradeBytes []byte, airdropFile string) (*core.Genesis, error) { @@ -645,10 +644,49 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash, ethConfig ethconfig. // initializeStateSyncClient initializes the client for performing state sync. // If state sync is disabled, this function will wipe any ongoing summary from // disk to ensure that we do not continue syncing from an invalid snapshot. -func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { +func (vm *VM) initializeStateSync(lastAcceptedHeight uint64) error { + // Create standalone EVM TrieDB (read only) for serving leafs requests. + // We create a standalone TrieDB here, so that it has a standalone cache from the one + // used by the node when processing blocks. + evmTrieDB := triedb.NewDatabase( + vm.chaindb, + &triedb.Config{ + DBOverride: hashdb.Config{ + CleanCacheSize: vm.config.StateSyncServerTrieCache * units.MiB, + }.BackendConstructor, + }, + ) + + // register default leaf request handler for state trie + syncStats := handlerstats.GetOrRegisterHandlerStats(metrics.Enabled) + stateLeafRequestConfig := &extension.LeafRequestConfig{ + LeafType: message.StateTrieNode, + MetricName: "sync_state_trie_leaves", + Handler: handlers.NewLeafsRequestHandler(evmTrieDB, + message.StateTrieKeyLength, + vm.blockChain, vm.networkCodec, + syncStats, + ), + } + + leafHandlers := make(LeafHandlers) + leafHandlers[stateLeafRequestConfig.LeafType] = stateLeafRequestConfig.Handler + + networkHandler := newNetworkHandler( + vm.blockChain, + vm.chaindb, + vm.warpBackend, + vm.networkCodec, + leafHandlers, + syncStats, + ) + vm.Network.SetRequestHandler(networkHandler) + + vm.Server = vmsync.NewServer(vm.blockChain, vm.extensionConfig.SyncSummaryProvider, vm.config.StateSyncCommitInterval) + stateSyncEnabled := vm.stateSyncEnabled(lastAcceptedHeight) // parse nodeIDs from state sync IDs in vm config var stateSyncIDs []ids.NodeID - if vm.config.StateSyncEnabled && len(vm.config.StateSyncIDs) > 0 { + if stateSyncEnabled && len(vm.config.StateSyncIDs) > 0 { nodeIDs := strings.Split(vm.config.StateSyncIDs, ",") stateSyncIDs = make([]ids.NodeID, len(nodeIDs)) for i, nodeIDString := range nodeIDs { @@ -660,34 +698,39 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { } } - vm.StateSyncClient = NewStateSyncClient(&stateSyncClientConfig{ - chain: vm.eth, - state: vm.State, - stateSyncDone: vm.stateSyncDone, - client: statesyncclient.NewClient( + // Initialize the state sync client + leafMetricsNames := make(map[message.NodeType]string) + leafMetricsNames[stateLeafRequestConfig.LeafType] = stateLeafRequestConfig.MetricName + + vm.Client = vmsync.NewClient(&vmsync.ClientConfig{ + Chain: vm.eth, + State: vm.State, + Client: statesyncclient.NewClient( &statesyncclient.ClientConfig{ NetworkClient: vm.Network, Codec: vm.networkCodec, - Stats: stats.NewClientSyncerStats(), + Stats: stats.NewClientSyncerStats(leafMetricsNames), StateSyncNodeIDs: stateSyncIDs, BlockParser: vm, }, ), - enabled: vm.config.StateSyncEnabled, - skipResume: vm.config.StateSyncSkipResume, - stateSyncMinBlocks: vm.config.StateSyncMinBlocks, - stateSyncRequestSize: vm.config.StateSyncRequestSize, - lastAcceptedHeight: lastAcceptedHeight, // TODO clean up how this is passed around - chaindb: vm.chaindb, - metadataDB: vm.metadataDB, - acceptedBlockDB: vm.acceptedBlockDB, - db: vm.versiondb, + Enabled: stateSyncEnabled, + SkipResume: vm.config.StateSyncSkipResume, + MinBlocks: vm.config.StateSyncMinBlocks, + RequestSize: vm.config.StateSyncRequestSize, + LastAcceptedHeight: lastAcceptedHeight, // TODO clean up how this is passed around + ChaindDB: vm.chaindb, + VerDB: vm.versiondb, + MetadataDB: vm.metadataDB, + Acceptor: vm, + Parser: vm.extensionConfig.SyncableParser, + Extender: nil, }) // If StateSync is disabled, clear any ongoing summary so that we will not attempt to resume // sync using a snapshot that has been modified by the node running normal operations. - if !vm.config.StateSyncEnabled { - return vm.StateSyncClient.ClearOngoingSummary() + if !stateSyncEnabled { + return vm.Client.ClearOngoingSummary() } return nil @@ -742,17 +785,16 @@ func (vm *VM) SetState(_ context.Context, state snow.State) error { // onBootstrapStarted marks this VM as bootstrapping func (vm *VM) onBootstrapStarted() error { vm.bootstrapped.Set(false) - if err := vm.StateSyncClient.Error(); err != nil { + if err := vm.Client.Error(); err != nil { return err } // After starting bootstrapping, do not attempt to resume a previous state sync. - if err := vm.StateSyncClient.ClearOngoingSummary(); err != nil { + if err := vm.Client.ClearOngoingSummary(); err != nil { return err } // Ensure snapshots are initialized before bootstrapping (i.e., if state sync is skipped). // Note calling this function has no effect if snapshots are already initialized. vm.blockChain.InitializeSnapshots() - return nil } @@ -880,25 +922,6 @@ func (vm *VM) onNormalOperationsStarted() error { return nil } -// setAppRequestHandlers sets the request handlers for the VM to serve state sync -// requests. -func (vm *VM) setAppRequestHandlers() { - // Create standalone EVM TrieDB (read only) for serving leafs requests. - // We create a standalone TrieDB here, so that it has a standalone cache from the one - // used by the node when processing blocks. - evmTrieDB := triedb.NewDatabase( - vm.chaindb, - &triedb.Config{ - DBOverride: hashdb.Config{ - CleanCacheSize: vm.config.StateSyncServerTrieCache * units.MiB, - }.BackendConstructor, - }, - ) - - networkHandler := newNetworkHandler(vm.blockChain, vm.chaindb, evmTrieDB, vm.warpBackend, vm.networkCodec) - vm.Network.SetRequestHandler(networkHandler) -} - func (vm *VM) WaitForEvent(ctx context.Context) (commonEng.Message, error) { vm.builderLock.Lock() builder := vm.builder @@ -935,7 +958,7 @@ func (vm *VM) Shutdown(context.Context) error { } } vm.Network.Shutdown() - if err := vm.StateSyncClient.Shutdown(); err != nil { + if err := vm.Client.Shutdown(); err != nil { log.Error("error stopping state syncer", "err", err) } close(vm.shutdownChan) @@ -1322,23 +1345,16 @@ func attachEthService(handler *rpc.Server, apis []rpc.API, names []string) error return nil } -func (vm *VM) Connected(ctx context.Context, nodeID ids.NodeID, version *version.Application) error { - vm.vmLock.Lock() - defer vm.vmLock.Unlock() - - if err := vm.validatorsManager.Connect(nodeID); err != nil { - return fmt.Errorf("uptime manager failed to connect node %s: %w", nodeID, err) +func (vm *VM) stateSyncEnabled(lastAcceptedHeight uint64) bool { + if vm.config.StateSyncEnabled != nil { + // if the config is set, use that + return *vm.config.StateSyncEnabled } - return vm.Network.Connected(ctx, nodeID, version) -} -func (vm *VM) Disconnected(ctx context.Context, nodeID ids.NodeID) error { - vm.vmLock.Lock() - defer vm.vmLock.Unlock() - - if err := vm.validatorsManager.Disconnect(nodeID); err != nil { - return fmt.Errorf("uptime manager failed to disconnect node %s: %w", nodeID, err) - } + // enable state sync by default if the chain is empty. + return lastAcceptedHeight == 0 +} - return vm.Network.Disconnected(ctx, nodeID) +func (vm *VM) PutLastAcceptedID(ID ids.ID) error { + return vm.acceptedBlockDB.Put(lastAcceptedKey, ID[:]) } diff --git a/sync/client/client_test.go b/sync/client/client_test.go index 20ec22527a..5524f420ba 100644 --- a/sync/client/client_test.go +++ b/sync/client/client_test.go @@ -90,7 +90,7 @@ func TestGetCode(t *testing.T) { Codec: message.Codec, Stats: clientstats.NewNoOpStats(), StateSyncNodeIDs: nil, - BlockParser: mockBlockParser, + BlockParser: newTestBlockParser(), }) for name, test := range tests { @@ -161,7 +161,7 @@ func TestGetBlocks(t *testing.T) { Codec: message.Codec, Stats: clientstats.NewNoOpStats(), StateSyncNodeIDs: nil, - BlockParser: mockBlockParser, + BlockParser: newTestBlockParser(), }) blocksRequestHandler := handlers.NewBlockRequestHandler(buildGetter(blocks), message.Codec, handlerstats.NewNoopHandlerStats()) @@ -416,13 +416,13 @@ func TestGetLeafs(t *testing.T) { largeTrieRoot, largeTrieKeys, _ := statesynctest.GenerateTrie(t, trieDB, 100_000, common.HashLength) smallTrieRoot, _, _ := statesynctest.GenerateTrie(t, trieDB, leafsLimit, common.HashLength) - handler := handlers.NewLeafsRequestHandler(trieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()) + handler := handlers.NewLeafsRequestHandler(trieDB, message.StateTrieKeyLength, nil, message.Codec, handlerstats.NewNoopHandlerStats()) client := NewClient(&ClientConfig{ NetworkClient: &mockNetwork{}, Codec: message.Codec, Stats: clientstats.NewNoOpStats(), StateSyncNodeIDs: nil, - BlockParser: mockBlockParser, + BlockParser: newTestBlockParser(), }) tests := map[string]struct { @@ -786,7 +786,7 @@ func TestGetLeafsRetries(t *testing.T) { trieDB := triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil) root, _, _ := statesynctest.GenerateTrie(t, trieDB, 100_000, common.HashLength) - handler := handlers.NewLeafsRequestHandler(trieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()) + handler := handlers.NewLeafsRequestHandler(trieDB, message.StateTrieKeyLength, nil, message.Codec, handlerstats.NewNoopHandlerStats()) mockNetClient := &mockNetwork{} const maxAttempts = 8 @@ -795,7 +795,7 @@ func TestGetLeafsRetries(t *testing.T) { Codec: message.Codec, Stats: clientstats.NewNoOpStats(), StateSyncNodeIDs: nil, - BlockParser: mockBlockParser, + BlockParser: newTestBlockParser(), }) request := message.LeafsRequest{ @@ -856,7 +856,7 @@ func TestStateSyncNodes(t *testing.T) { Codec: message.Codec, Stats: clientstats.NewNoOpStats(), StateSyncNodeIDs: stateSyncNodes, - BlockParser: mockBlockParser, + BlockParser: newTestBlockParser(), }) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/sync/client/stats/stats.go b/sync/client/stats/stats.go index bd1d77cbc6..2d727a7a0b 100644 --- a/sync/client/stats/stats.go +++ b/sync/client/stats/stats.go @@ -75,17 +75,21 @@ func (m *messageMetric) UpdateRequestLatency(duration time.Duration) { } type clientSyncerStats struct { - stateTrieLeavesMetric, + leafMetrics map[message.NodeType]MessageMetric codeRequestMetric, blockRequestMetric MessageMetric } // NewClientSyncerStats returns stats for the client syncer -func NewClientSyncerStats() ClientSyncerStats { +func NewClientSyncerStats(leafMetricNames map[message.NodeType]string) *clientSyncerStats { + leafMetrics := make(map[message.NodeType]MessageMetric, len(leafMetricNames)) + for nodeType, name := range leafMetricNames { + leafMetrics[nodeType] = NewMessageMetric(name) + } return &clientSyncerStats{ - stateTrieLeavesMetric: NewMessageMetric("sync_state_trie_leaves"), - codeRequestMetric: NewMessageMetric("sync_code"), - blockRequestMetric: NewMessageMetric("sync_blocks"), + leafMetrics: leafMetrics, + codeRequestMetric: NewMessageMetric("sync_code"), + blockRequestMetric: NewMessageMetric("sync_blocks"), } } @@ -97,7 +101,11 @@ func (c *clientSyncerStats) GetMetric(msgIntf message.Request) (MessageMetric, e case message.CodeRequest: return c.codeRequestMetric, nil case message.LeafsRequest: - return c.stateTrieLeavesMetric, nil + metric, ok := c.leafMetrics[msg.NodeType] + if !ok { + return nil, fmt.Errorf("invalid leafs request for node type: %T", msg.NodeType) + } + return metric, nil default: return nil, fmt.Errorf("attempted to get metric for invalid request with type %T", msg) } @@ -124,12 +132,3 @@ func NewNoOpStats() ClientSyncerStats { func (n noopStats) GetMetric(_ message.Request) (MessageMetric, error) { return n.noop, nil } - -// NewStats returns syncer stats if enabled or a no-op version if disabled. -func NewStats(enabled bool) ClientSyncerStats { - if enabled { - return NewClientSyncerStats() - } else { - return NewNoOpStats() - } -} diff --git a/sync/client/mock_client.go b/sync/client/test_client.go similarity index 77% rename from sync/client/mock_client.go rename to sync/client/test_client.go index 0d30b85e20..1edaf4136c 100644 --- a/sync/client/mock_client.go +++ b/sync/client/test_client.go @@ -18,37 +18,36 @@ import ( ) var ( - _ Client = &MockClient{} - mockBlockParser EthBlockParser = &testBlockParser{} + _ Client = (*TestClient)(nil) + _ EthBlockParser = (*testBlockParser)(nil) ) -// TODO replace with gomock library -type MockClient struct { +type TestClient struct { codec codec.Manager - leafsHandler *handlers.LeafsRequestHandler + leafsHandler handlers.LeafRequestHandler leavesReceived int32 codesHandler *handlers.CodeRequestHandler codeReceived int32 blocksHandler *handlers.BlockRequestHandler blocksReceived int32 // GetLeafsIntercept is called on every GetLeafs request if set to a non-nil callback. - // The returned response will be returned by MockClient to the caller. + // The returned response will be returned by TestClient to the caller. GetLeafsIntercept func(req message.LeafsRequest, res message.LeafsResponse) (message.LeafsResponse, error) // GetCodesIntercept is called on every GetCode request if set to a non-nil callback. - // The returned response will be returned by MockClient to the caller. + // The returned response will be returned by TestClient to the caller. GetCodeIntercept func(hashes []common.Hash, codeBytes [][]byte) ([][]byte, error) // GetBlocksIntercept is called on every GetBlocks request if set to a non-nil callback. - // The returned response will be returned by MockClient to the caller. + // The returned response will be returned by TestClient to the caller. GetBlocksIntercept func(blockReq message.BlockRequest, blocks types.Blocks) (types.Blocks, error) } -func NewMockClient( +func NewTestClient( codec codec.Manager, - leafHandler *handlers.LeafsRequestHandler, + leafHandler handlers.LeafRequestHandler, codesHandler *handlers.CodeRequestHandler, blocksHandler *handlers.BlockRequestHandler, -) *MockClient { - return &MockClient{ +) *TestClient { + return &TestClient{ codec: codec, leafsHandler: leafHandler, codesHandler: codesHandler, @@ -56,7 +55,7 @@ func NewMockClient( } } -func (ml *MockClient) GetLeafs(ctx context.Context, request message.LeafsRequest) (message.LeafsResponse, error) { +func (ml *TestClient) GetLeafs(ctx context.Context, request message.LeafsRequest) (message.LeafsResponse, error) { response, err := ml.leafsHandler.OnLeafsRequest(ctx, ids.GenerateTestNodeID(), 1, request) if err != nil { return message.LeafsResponse{}, err @@ -70,18 +69,18 @@ func (ml *MockClient) GetLeafs(ctx context.Context, request message.LeafsRequest if ml.GetLeafsIntercept != nil { leafsResponse, err = ml.GetLeafsIntercept(request, leafsResponse) } - // Increment the number of leaves received by the mock client + // Increment the number of leaves received by the test client atomic.AddInt32(&ml.leavesReceived, int32(numLeaves)) return leafsResponse, err } -func (ml *MockClient) LeavesReceived() int32 { +func (ml *TestClient) LeavesReceived() int32 { return atomic.LoadInt32(&ml.leavesReceived) } -func (ml *MockClient) GetCode(ctx context.Context, hashes []common.Hash) ([][]byte, error) { +func (ml *TestClient) GetCode(ctx context.Context, hashes []common.Hash) ([][]byte, error) { if ml.codesHandler == nil { - panic("no code handler for mock client") + panic("no code handler for test client") } request := message.CodeRequest{Hashes: hashes} response, err := ml.codesHandler.OnCodeRequest(ctx, ids.GenerateTestNodeID(), 1, request) @@ -103,13 +102,13 @@ func (ml *MockClient) GetCode(ctx context.Context, hashes []common.Hash) ([][]by return code, err } -func (ml *MockClient) CodeReceived() int32 { +func (ml *TestClient) CodeReceived() int32 { return atomic.LoadInt32(&ml.codeReceived) } -func (ml *MockClient) GetBlocks(ctx context.Context, blockHash common.Hash, height uint64, numParents uint16) ([]*types.Block, error) { +func (ml *TestClient) GetBlocks(ctx context.Context, blockHash common.Hash, height uint64, numParents uint16) ([]*types.Block, error) { if ml.blocksHandler == nil { - panic("no blocks handler for mock client") + panic("no blocks handler for test client") } request := message.BlockRequest{ Hash: blockHash, @@ -121,7 +120,7 @@ func (ml *MockClient) GetBlocks(ctx context.Context, blockHash common.Hash, heig return nil, err } - client := &client{blockParser: mockBlockParser} // Hack to avoid duplicate code + client := &client{blockParser: newTestBlockParser()} // Hack to avoid duplicate code blocksRes, numBlocks, err := client.parseBlocks(ml.codec, request, response) if err != nil { return nil, err @@ -134,12 +133,16 @@ func (ml *MockClient) GetBlocks(ctx context.Context, blockHash common.Hash, heig return blocks, err } -func (ml *MockClient) BlocksReceived() int32 { +func (ml *TestClient) BlocksReceived() int32 { return atomic.LoadInt32(&ml.blocksReceived) } type testBlockParser struct{} +func newTestBlockParser() *testBlockParser { + return &testBlockParser{} +} + func (t *testBlockParser) ParseEthBlock(b []byte) (*types.Block, error) { block := new(types.Block) if err := rlp.DecodeBytes(b, block); err != nil { diff --git a/sync/handlers/leafs_request.go b/sync/handlers/leafs_request.go index cf428560c3..41b40ed655 100644 --- a/sync/handlers/leafs_request.go +++ b/sync/handlers/leafs_request.go @@ -25,6 +25,8 @@ import ( "github.com/ava-labs/subnet-evm/utils" ) +var _ LeafRequestHandler = (*leafsRequestHandler)(nil) + const ( // Maximum number of leaves to return in a message.LeafsResponse // This parameter overrides any other Limit specified @@ -39,22 +41,28 @@ const ( keyLength = common.HashLength // length of the keys of the trie to sync ) -// LeafsRequestHandler is a peer.RequestHandler for types.LeafsRequest +type LeafRequestHandler interface { + OnLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) +} + +// leafsRequestHandler is a peer.RequestHandler for types.LeafsRequest // serving requested trie data -type LeafsRequestHandler struct { +type leafsRequestHandler struct { trieDB *triedb.Database snapshotProvider SnapshotProvider codec codec.Manager stats stats.LeafsRequestHandlerStats pool sync.Pool + trieKeyLength int } -func NewLeafsRequestHandler(trieDB *triedb.Database, snapshotProvider SnapshotProvider, codec codec.Manager, syncerStats stats.LeafsRequestHandlerStats) *LeafsRequestHandler { - return &LeafsRequestHandler{ +func NewLeafsRequestHandler(trieDB *triedb.Database, trieKeyLength int, snapshotProvider SnapshotProvider, codec codec.Manager, syncerStats stats.LeafsRequestHandlerStats) *leafsRequestHandler { + return &leafsRequestHandler{ trieDB: trieDB, snapshotProvider: snapshotProvider, codec: codec, stats: syncerStats, + trieKeyLength: trieKeyLength, pool: sync.Pool{ New: func() interface{} { return make([][]byte, 0, maxLeavesLimit) }, }, @@ -69,9 +77,9 @@ func NewLeafsRequestHandler(trieDB *triedb.Database, snapshotProvider SnapshotPr // Specified Limit in message.LeafsRequest is overridden to maxLeavesLimit if it is greater than maxLeavesLimit // Expects returned errors to be treated as FATAL // Never returns errors -// Returns nothing if the requested trie root is not found +// Returns nothing if NodeType is invalid or requested trie root is not found // Assumes ctx is active -func (lrh *LeafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) { +func (lrh *leafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) { startTime := time.Now() lrh.stats.IncLeafsRequest() @@ -83,13 +91,12 @@ func (lrh *LeafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.N lrh.stats.IncInvalidLeafsRequest() return nil, nil } - if len(leafsRequest.Start) != 0 && len(leafsRequest.Start) != keyLength || - len(leafsRequest.End) != 0 && len(leafsRequest.End) != keyLength { - log.Debug("invalid length for leafs request range, dropping request", "startLen", len(leafsRequest.Start), "endLen", len(leafsRequest.End), "expected", keyLength) + if (len(leafsRequest.Start) != 0 && len(leafsRequest.Start) != lrh.trieKeyLength) || + (len(leafsRequest.End) != 0 && len(leafsRequest.End) != lrh.trieKeyLength) { + log.Debug("invalid length for leafs request range, dropping request", "startLen", len(leafsRequest.Start), "endLen", len(leafsRequest.End), "expected", lrh.trieKeyLength) lrh.stats.IncInvalidLeafsRequest() return nil, nil } - // TODO: We should know the state root that accounts correspond to, // as this information will be necessary to access storage tries when // the trie is path based. @@ -105,7 +112,6 @@ func (lrh *LeafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.N if limit > maxLeavesLimit { limit = maxLeavesLimit } - var leafsResponse message.LeafsResponse // pool response's key/val allocations leafsResponse.Keys = lrh.pool.Get().([][]byte) @@ -120,12 +126,11 @@ func (lrh *LeafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.N lrh.pool.Put(leafsResponse.Keys[:0]) lrh.pool.Put(leafsResponse.Vals[:0]) }() - responseBuilder := &responseBuilder{ request: &leafsRequest, response: &leafsResponse, t: t, - keyLength: keyLength, + keyLength: lrh.trieKeyLength, limit: limit, stats: lrh.stats, } @@ -134,7 +139,6 @@ func (lrh *LeafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.N responseBuilder.snap = lrh.snapshotProvider.Snapshots() } err = responseBuilder.handleRequest(ctx) - // ensure metrics are captured properly on all return paths defer func() { lrh.stats.UpdateLeafsRequestProcessingTime(time.Since(startTime)) @@ -151,13 +155,11 @@ func (lrh *LeafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.N log.Debug("context err set before any leafs were iterated", "nodeID", nodeID, "requestID", requestID, "request", leafsRequest, "ctxErr", ctx.Err()) return nil, nil } - responseBytes, err := lrh.codec.Marshal(message.Version, leafsResponse) if err != nil { log.Debug("failed to marshal LeafsResponse, dropping request", "nodeID", nodeID, "requestID", requestID, "request", leafsRequest, "err", err) return nil, nil } - log.Debug("handled leafsRequest", "time", time.Since(startTime), "leafs", len(leafsResponse.Keys), "proofLen", len(leafsResponse.ProofVals)) return responseBytes, nil } diff --git a/sync/handlers/leafs_request_test.go b/sync/handlers/leafs_request_test.go index 2ec0132375..f451468567 100644 --- a/sync/handlers/leafs_request_test.go +++ b/sync/handlers/leafs_request_test.go @@ -74,7 +74,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { } } snapshotProvider := &TestSnapshotProvider{} - leafsHandler := NewLeafsRequestHandler(trieDB, snapshotProvider, message.Codec, mockHandlerStats) + leafsHandler := NewLeafsRequestHandler(trieDB, message.StateTrieKeyLength, snapshotProvider, message.Codec, mockHandlerStats) snapConfig := snapshot.Config{ CacheSize: 64, AsyncBuild: false, diff --git a/sync/handlers/stats/stats.go b/sync/handlers/stats/stats.go index 9acf088d8b..277320e4ed 100644 --- a/sync/handlers/stats/stats.go +++ b/sync/handlers/stats/stats.go @@ -166,7 +166,10 @@ func (h *handlerStats) IncSnapshotReadSuccess() { h.snapshotReadSuccess.Inc(1 func (h *handlerStats) IncSnapshotSegmentValid() { h.snapshotSegmentValid.Inc(1) } func (h *handlerStats) IncSnapshotSegmentInvalid() { h.snapshotSegmentInvalid.Inc(1) } -func NewHandlerStats(enabled bool) HandlerStats { +// GetOrRegisterHandlerStats returns a [HandlerStats] to track state sync handler metrics. +// If `enabled` is false, a no-op implementation is returned. +// if `enabled` is true, calling this multiple times will return the same registered metrics. +func GetOrRegisterHandlerStats(enabled bool) HandlerStats { if !enabled { return NewNoopHandlerStats() } diff --git a/sync/statesync/code_syncer_test.go b/sync/statesync/code_syncer_test.go index 5fe51b5851..02909b1e4b 100644 --- a/sync/statesync/code_syncer_test.go +++ b/sync/statesync/code_syncer_test.go @@ -42,7 +42,7 @@ func testCodeSyncer(t *testing.T, test codeSyncerTest) { // Set up mockClient codeRequestHandler := handlers.NewCodeRequestHandler(serverDB, message.Codec, handlerstats.NewNoopHandlerStats()) - mockClient := statesyncclient.NewMockClient(message.Codec, nil, codeRequestHandler, nil) + mockClient := statesyncclient.NewTestClient(message.Codec, nil, codeRequestHandler, nil) mockClient.GetCodeIntercept = test.getCodeIntercept clientDB := rawdb.NewMemoryDatabase() diff --git a/sync/statesync/state_syncer.go b/sync/statesync/state_syncer.go index 35e03fb79c..0b3bbebf87 100644 --- a/sync/statesync/state_syncer.go +++ b/sync/statesync/state_syncer.go @@ -254,21 +254,7 @@ func (t *stateSync) Start(ctx context.Context) error { return nil } -func (t *stateSync) Wait(ctx context.Context) error { - // This should only be called after Start, so we can assume cancelFunc is set. - if t.cancelFunc == nil { - return errWaitBeforeStart - } - - select { - case err := <-t.done: - return err - case <-ctx.Done(): - t.cancelFunc() // cancel the sync operations if the context is done - <-t.done // wait for the sync operations to finish - return ctx.Err() - } -} +func (t *stateSync) Done() <-chan error { return t.done } // addTrieInProgress tracks the root as being currently synced. func (t *stateSync) addTrieInProgress(root common.Hash, trie *trieToSync) { diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go index d71d0a3812..23a98d09bf 100644 --- a/sync/statesync/sync_test.go +++ b/sync/statesync/sync_test.go @@ -22,11 +22,13 @@ import ( "github.com/ava-labs/libevm/trie" "github.com/ava-labs/libevm/triedb" "github.com/ava-labs/subnet-evm/core/state/snapshot" + "github.com/ava-labs/subnet-evm/plugin/evm/customrawdb" "github.com/ava-labs/subnet-evm/plugin/evm/message" statesyncclient "github.com/ava-labs/subnet-evm/sync/client" "github.com/ava-labs/subnet-evm/sync/handlers" handlerstats "github.com/ava-labs/subnet-evm/sync/handlers/stats" "github.com/ava-labs/subnet-evm/sync/statesync/statesynctest" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -49,9 +51,9 @@ func testSync(t *testing.T, test syncTest) { ctx = test.ctx } clientDB, serverDB, serverTrieDB, root := test.prepareForTest(t) - leafsRequestHandler := handlers.NewLeafsRequestHandler(serverTrieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()) + leafsRequestHandler := handlers.NewLeafsRequestHandler(serverTrieDB, message.StateTrieKeyLength, nil, message.Codec, handlerstats.NewNoopHandlerStats()) codeRequestHandler := handlers.NewCodeRequestHandler(serverDB, message.Codec, handlerstats.NewNoopHandlerStats()) - mockClient := statesyncclient.NewMockClient(message.Codec, leafsRequestHandler, codeRequestHandler, nil) + mockClient := statesyncclient.NewTestClient(message.Codec, leafsRequestHandler, codeRequestHandler, nil) // Set intercept functions for the mock client mockClient.GetLeafsIntercept = test.GetLeafsIntercept mockClient.GetCodeIntercept = test.GetCodeIntercept @@ -65,10 +67,12 @@ func testSync(t *testing.T, test syncTest) { MaxOutstandingCodeHashes: DefaultMaxOutstandingCodeHashes, RequestSize: 1024, }) - require.NoError(t, err, "failed to create state syncer") + if err != nil { + t.Fatal(err) + } // begin sync s.Start(ctx) - waitFor(t, context.Background(), s.Wait, test.expectedError, testSyncTimeout) + waitFor(t, s.Done(), test.expectedError, testSyncTimeout) if test.expectedError != nil { return } @@ -86,21 +90,27 @@ func testSyncResumes(t *testing.T, steps []syncTest, stepCallback func()) { } // waitFor waits for a result on the [result] channel to match [expected], or a timeout. -func waitFor(t *testing.T, ctx context.Context, resultFunc func(context.Context) error, expected error, timeout time.Duration) { +func waitFor(t *testing.T, result <-chan error, expected error, timeout time.Duration) { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - err := resultFunc(ctx) - if ctx.Err() != nil { + select { + case err := <-result: + if expected != nil { + if err == nil { + t.Fatalf("Expected error %s, but got nil", expected) + } + assert.Contains(t, err.Error(), expected.Error()) + } else if err != nil { + t.Fatal("unexpected error waiting for sync result", err) + } + case <-time.After(timeout): // print a stack trace to assist with debugging + // if the test times out. var stackBuf bytes.Buffer pprof.Lookup("goroutine").WriteTo(&stackBuf, 2) t.Log(stackBuf.String()) // fail the test t.Fatal("unexpected timeout waiting for sync result") } - - require.ErrorIs(t, err, expected, "result of sync did not match expected error") } func TestSimpleSyncCases(t *testing.T) { @@ -507,56 +517,90 @@ func testSyncerSyncsToNewRoot(t *testing.T, deleteBetweenSyncs func(*testing.T, }) } -func TestDifferentWaitContext(t *testing.T) { - serverDB := rawdb.NewMemoryDatabase() - serverTrieDB := triedb.NewDatabase(serverDB, nil) - // Create trie with many accounts to ensure sync takes time - root := statesynctest.FillAccountsWithStorage(t, serverDB, serverTrieDB, common.Hash{}, 2000) - clientDB := rawdb.NewMemoryDatabase() +// assertDBConsistency checks [serverTrieDB] and [clientTrieDB] have the same EVM state trie at [root], +// and that [clientTrieDB.DiskDB] has corresponding account & snapshot values. +// Also verifies any code referenced by the EVM state is present in [clientTrieDB] and the hash is correct. +func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database, serverTrieDB, clientTrieDB *triedb.Database) { + numSnapshotAccounts := 0 + accountIt := customrawdb.IterateAccountSnapshots(clientDB) + defer accountIt.Release() + for accountIt.Next() { + if !bytes.HasPrefix(accountIt.Key(), rawdb.SnapshotAccountPrefix) || len(accountIt.Key()) != len(rawdb.SnapshotAccountPrefix)+common.HashLength { + continue + } + numSnapshotAccounts++ + } + if err := accountIt.Error(); err != nil { + t.Fatal(err) + } + trieAccountLeaves := 0 + + statesynctest.AssertTrieConsistency(t, root, serverTrieDB, clientTrieDB, func(key, val []byte) error { + trieAccountLeaves++ + accHash := common.BytesToHash(key) + var acc types.StateAccount + if err := rlp.DecodeBytes(val, &acc); err != nil { + return err + } + // check snapshot consistency + snapshotVal := rawdb.ReadAccountSnapshot(clientDB, accHash) + expectedSnapshotVal := types.SlimAccountRLP(acc) + assert.Equal(t, expectedSnapshotVal, snapshotVal) + + // check code consistency + if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) { + codeHash := common.BytesToHash(acc.CodeHash) + code := rawdb.ReadCode(clientDB, codeHash) + actualHash := crypto.Keccak256Hash(code) + assert.NotZero(t, len(code)) + assert.Equal(t, codeHash, actualHash) + } + if acc.Root == types.EmptyRootHash { + return nil + } - // Track requests to show sync continues after Wait returns - var requestCount int64 + storageIt := rawdb.IterateStorageSnapshots(clientDB, accHash) + defer storageIt.Release() - leafsRequestHandler := handlers.NewLeafsRequestHandler(serverTrieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()) - codeRequestHandler := handlers.NewCodeRequestHandler(serverDB, message.Codec, handlerstats.NewNoopHandlerStats()) - mockClient := statesyncclient.NewMockClient(message.Codec, leafsRequestHandler, codeRequestHandler, nil) - - // Intercept to track ongoing requests and add delay - mockClient.GetLeafsIntercept = func(req message.LeafsRequest, resp message.LeafsResponse) (message.LeafsResponse, error) { - atomic.AddInt64(&requestCount, 1) - // Add small delay to ensure sync is ongoing - time.Sleep(10 * time.Millisecond) - return resp, nil - } + snapshotStorageKeysCount := 0 + for storageIt.Next() { + snapshotStorageKeysCount++ + } - s, err := NewStateSyncer(&StateSyncerConfig{ - Client: mockClient, - Root: root, - DB: clientDB, - BatchSize: 1000, - NumCodeFetchingWorkers: DefaultNumCodeFetchingWorkers, - MaxOutstandingCodeHashes: DefaultMaxOutstandingCodeHashes, - RequestSize: 1024, + storageTrieLeavesCount := 0 + + // check storage trie and storage snapshot consistency + statesynctest.AssertTrieConsistency(t, acc.Root, serverTrieDB, clientTrieDB, func(key, val []byte) error { + storageTrieLeavesCount++ + snapshotVal := rawdb.ReadStorageSnapshot(clientDB, accHash, common.BytesToHash(key)) + assert.Equal(t, val, snapshotVal) + return nil + }) + + assert.Equal(t, storageTrieLeavesCount, snapshotStorageKeysCount) + return nil }) - if err != nil { - t.Fatal(err) - } - // Create two different contexts - startCtx := context.Background() // Never cancelled - waitCtx, waitCancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer waitCancel() + // Check that the number of accounts in the snapshot matches the number of leaves in the accounts trie + assert.Equal(t, trieAccountLeaves, numSnapshotAccounts) +} - // Start with one context - require.NoError(t, s.Start(startCtx), "failed to start state syncer") +func fillAccountsWithStorage(t *testing.T, serverDB ethdb.Database, serverTrieDB *triedb.Database, root common.Hash, numAccounts int) common.Hash { + newRoot, _ := statesynctest.FillAccounts(t, serverTrieDB, root, numAccounts, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { + codeBytes := make([]byte, 256) + _, err := rand.Read(codeBytes) + if err != nil { + t.Fatalf("error reading random code bytes: %v", err) + } - // Wait with different context that will timeout - err = s.Wait(waitCtx) - require.ErrorIs(t, err, context.DeadlineExceeded, "Wait should return DeadlineExceeded error") + codeHash := crypto.Keccak256Hash(codeBytes) + rawdb.WriteCode(serverDB, codeHash, codeBytes) + account.CodeHash = codeHash[:] - // Check if more requests were made after Wait returned - requestsWhenWaitReturned := atomic.LoadInt64(&requestCount) - time.Sleep(100 * time.Millisecond) - requestsAfterWait := atomic.LoadInt64(&requestCount) - require.Equal(t, requestsWhenWaitReturned, requestsAfterWait, "Sync should not continue after Wait returned with different context") + // now create state trie + numKeys := 16 + account.Root, _, _ = statesynctest.GenerateTrie(t, serverTrieDB, numKeys, common.HashLength) + return account + }) + return newRoot } From f3fd07333316d5008e8d10758342e9519c534501 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Thu, 7 Aug 2025 16:13:58 -0400 Subject: [PATCH 02/26] reapply wait changes --- plugin/evm/sync/client.go | 4 ++-- sync/statesync/state_syncer.go | 19 ++++++++++++++++++- sync/statesync/sync_test.go | 23 +++++++++-------------- 3 files changed, 29 insertions(+), 17 deletions(-) diff --git a/plugin/evm/sync/client.go b/plugin/evm/sync/client.go index dc1e70c56b..103cc9c84d 100644 --- a/plugin/evm/sync/client.go +++ b/plugin/evm/sync/client.go @@ -121,7 +121,7 @@ type Client interface { // Error returns an error if any was encountered. type Syncer interface { Start(ctx context.Context) error - Done() <-chan error + Wait(ctx context.Context) error } // StateSyncEnabled returns [client.enabled], which is set in the chain's config file. @@ -315,7 +315,7 @@ func (client *client) syncStateTrie(ctx context.Context) error { if err := evmSyncer.Start(ctx); err != nil { return err } - err = <-evmSyncer.Done() + err = evmSyncer.Wait(ctx) log.Info("state sync: sync finished", "root", client.summary.GetBlockRoot(), "err", err) return err } diff --git a/sync/statesync/state_syncer.go b/sync/statesync/state_syncer.go index 0b3bbebf87..03672e7f4b 100644 --- a/sync/statesync/state_syncer.go +++ b/sync/statesync/state_syncer.go @@ -34,6 +34,9 @@ type StateSyncerConfig struct { MaxOutstandingCodeHashes int // Maximum number of code hashes in the code syncer queue NumCodeFetchingWorkers int // Number of code syncing threads RequestSize uint16 // Number of leafs to request from a peer at a time + + // context cancellation management + cancelFunc context.CancelFunc } // stateSync keeps the state of the entire state sync operation. @@ -254,7 +257,21 @@ func (t *stateSync) Start(ctx context.Context) error { return nil } -func (t *stateSync) Done() <-chan error { return t.done } +func (t *stateSync) Wait(ctx context.Context) error { + // This should only be called after Start, so we can assume cancelFunc is set. + if t.cancelFunc == nil { + return errWaitBeforeStart + } + + select { + case err := <-t.done: + return err + case <-ctx.Done(): + t.cancelFunc() // cancel the sync operations if the context is done + <-t.done // wait for the sync operations to finish + return ctx.Err() + } +} // addTrieInProgress tracks the root as being currently synced. func (t *stateSync) addTrieInProgress(root common.Hash, trie *trieToSync) { diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go index 23a98d09bf..e6c4f0a5cb 100644 --- a/sync/statesync/sync_test.go +++ b/sync/statesync/sync_test.go @@ -72,7 +72,7 @@ func testSync(t *testing.T, test syncTest) { } // begin sync s.Start(ctx) - waitFor(t, s.Done(), test.expectedError, testSyncTimeout) + waitFor(t, context.Background(), s.Wait, test.expectedError, testSyncTimeout) if test.expectedError != nil { return } @@ -90,27 +90,22 @@ func testSyncResumes(t *testing.T, steps []syncTest, stepCallback func()) { } // waitFor waits for a result on the [result] channel to match [expected], or a timeout. -func waitFor(t *testing.T, result <-chan error, expected error, timeout time.Duration) { +func waitFor(t *testing.T, ctx context.Context, resultFunc func(context.Context) error, expected error, timeout time.Duration) { t.Helper() - select { - case err := <-result: - if expected != nil { - if err == nil { - t.Fatalf("Expected error %s, but got nil", expected) - } - assert.Contains(t, err.Error(), expected.Error()) - } else if err != nil { - t.Fatal("unexpected error waiting for sync result", err) - } - case <-time.After(timeout): + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := resultFunc(ctx) + if ctx.Err() != nil { // print a stack trace to assist with debugging - // if the test times out. + var stackBuf bytes.Buffer pprof.Lookup("goroutine").WriteTo(&stackBuf, 2) t.Log(stackBuf.String()) // fail the test t.Fatal("unexpected timeout waiting for sync result") } + + require.ErrorIs(t, err, expected, "result of sync did not match expected error") } func TestSimpleSyncCases(t *testing.T) { From d04a7fce731b672800fd71019b40e25fa9e55ef9 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Thu, 7 Aug 2025 16:24:28 -0400 Subject: [PATCH 03/26] add test back --- sync/statesync/sync_test.go | 54 +++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go index e6c4f0a5cb..3e10b53bb8 100644 --- a/sync/statesync/sync_test.go +++ b/sync/statesync/sync_test.go @@ -599,3 +599,57 @@ func fillAccountsWithStorage(t *testing.T, serverDB ethdb.Database, serverTrieDB }) return newRoot } + +func TestDifferentWaitContext(t *testing.T) { + serverDB := rawdb.NewMemoryDatabase() + serverTrieDB := triedb.NewDatabase(serverDB, nil) + // Create trie with many accounts to ensure sync takes time + root := fillAccountsWithStorage(t, serverDB, serverTrieDB, common.Hash{}, 2000) + clientDB := rawdb.NewMemoryDatabase() + + // Track requests to show sync continues after Wait returns + var requestCount int64 + + leafsRequestHandler := handlers.NewLeafsRequestHandler(serverTrieDB, message.StateTrieKeyLength, nil, message.Codec, handlerstats.NewNoopHandlerStats()) + codeRequestHandler := handlers.NewCodeRequestHandler(serverDB, message.Codec, handlerstats.NewNoopHandlerStats()) + mockClient := statesyncclient.NewTestClient(message.Codec, leafsRequestHandler, codeRequestHandler, nil) + + // Intercept to track ongoing requests and add delay + mockClient.GetLeafsIntercept = func(req message.LeafsRequest, resp message.LeafsResponse) (message.LeafsResponse, error) { + atomic.AddInt64(&requestCount, 1) + // Add small delay to ensure sync is ongoing + time.Sleep(10 * time.Millisecond) + return resp, nil + } + + s, err := NewStateSyncer(&StateSyncerConfig{ + Client: mockClient, + Root: root, + DB: clientDB, + BatchSize: 1000, + NumCodeFetchingWorkers: DefaultNumCodeFetchingWorkers, + MaxOutstandingCodeHashes: DefaultMaxOutstandingCodeHashes, + RequestSize: 1024, + }) + if err != nil { + t.Fatal(err) + } + + // Create two different contexts + startCtx := context.Background() // Never cancelled + waitCtx, waitCancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer waitCancel() + + // Start with one context + require.NoError(t, s.Start(startCtx), "failed to start state syncer") + + // Wait with different context that will timeout + err = s.Wait(waitCtx) + require.ErrorIs(t, err, context.DeadlineExceeded, "Wait should return DeadlineExceeded error") + + // Check if more requests were made after Wait returned + requestsWhenWaitReturned := atomic.LoadInt64(&requestCount) + time.Sleep(100 * time.Millisecond) + requestsAfterWait := atomic.LoadInt64(&requestCount) + require.Equal(t, requestsWhenWaitReturned, requestsAfterWait, "Sync should not continue after Wait returned with different context") +} From 239a3b5080c310f472256fbee0bcc573347600d4 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Fri, 8 Aug 2025 14:54:28 -0400 Subject: [PATCH 04/26] attempt 2 --- plugin/evm/block.go | 163 +++++++++++++----- plugin/evm/block_test.go | 3 +- plugin/evm/block_verification.go | 156 ----------------- plugin/evm/message/block_sync_summary.go | 82 +++++++++ .../evm/message/block_sync_summary_parser.go | 32 ++++ .../message/block_sync_summary_provider.go | 16 ++ plugin/evm/message/block_sync_summary_test.go | 44 +++++ plugin/evm/vm.go | 20 ++- plugin/evm/vm_test.go | 97 ++++++----- plugin/evm/vm_upgrade_bytes_test.go | 4 +- plugin/evm/vm_warp_test.go | 7 +- sync/README.md | 4 +- sync/statesync/sync_test.go | 14 +- 13 files changed, 379 insertions(+), 263 deletions(-) delete mode 100644 plugin/evm/block_verification.go create mode 100644 plugin/evm/message/block_sync_summary.go create mode 100644 plugin/evm/message/block_sync_summary_parser.go create mode 100644 plugin/evm/message/block_sync_summary_provider.go create mode 100644 plugin/evm/message/block_sync_summary_test.go diff --git a/plugin/evm/block.go b/plugin/evm/block.go index f3d9f636a0..07c504459d 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -10,16 +10,19 @@ import ( "fmt" "time" + "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/log" "github.com/ava-labs/libevm/rlp" + "github.com/ava-labs/libevm/trie" "github.com/ava-labs/libevm/core/rawdb" "github.com/ava-labs/libevm/core/types" + "github.com/ava-labs/subnet-evm/constants" "github.com/ava-labs/subnet-evm/core" "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/params/extras" + "github.com/ava-labs/subnet-evm/plugin/evm/extension" "github.com/ava-labs/subnet-evm/plugin/evm/header" - "github.com/ava-labs/subnet-evm/plugin/evm/sync" "github.com/ava-labs/subnet-evm/precompile/precompileconfig" "github.com/ava-labs/subnet-evm/predicate" @@ -29,32 +32,41 @@ import ( ) var ( - _ snowman.Block = (*Block)(nil) - _ block.WithVerifyContext = (*Block)(nil) - _ sync.EthBlockWrapper = (*Block)(nil) + _ snowman.Block = (*wrappedBlock)(nil) + _ block.WithVerifyContext = (*wrappedBlock)(nil) + _ extension.ExtendedBlock = (*wrappedBlock)(nil) ) -// Block implements the snowman.Block interface -type Block struct { - id ids.ID - ethBlock *types.Block - vm *VM +// wrappedBlock implements the snowman.wrappedBlock interface +type wrappedBlock struct { + id ids.ID + ethBlock *types.Block + extension extension.BlockExtension + vm *VM } -// newBlock returns a new Block wrapping the ethBlock type and implementing the snowman.Block interface -func (vm *VM) newBlock(ethBlock *types.Block) *Block { - return &Block{ +// wrapBlock returns a new Block wrapping the ethBlock type and implementing the snowman.Block interface +func wrapBlock(ethBlock *types.Block, vm *VM) (*wrappedBlock, error) { + b := &wrappedBlock{ id: ids.ID(ethBlock.Hash()), ethBlock: ethBlock, vm: vm, } + if vm.extensionConfig.BlockExtender != nil { + extension, err := vm.extensionConfig.BlockExtender.NewBlockExtension(b) + if err != nil { + return nil, fmt.Errorf("failed to create block extension: %w", err) + } + b.extension = extension + } + return b, nil } // ID implements the snowman.Block interface -func (b *Block) ID() ids.ID { return b.id } +func (b *wrappedBlock) ID() ids.ID { return b.id } // Accept implements the snowman.Block interface -func (b *Block) Accept(context.Context) error { +func (b *wrappedBlock) Accept(context.Context) error { vm := b.vm // Although returning an error from Accept is considered fatal, it is good @@ -79,7 +91,7 @@ func (b *Block) Accept(context.Context) error { return fmt.Errorf("chain could not accept %s: %w", blkID, err) } - if err := vm.acceptedBlockDB.Put(lastAcceptedKey, blkID[:]); err != nil { + if err := vm.PutLastAcceptedID(blkID); err != nil { return fmt.Errorf("failed to put %s as the last accepted block: %w", blkID, err) } @@ -88,7 +100,7 @@ func (b *Block) Accept(context.Context) error { // handlePrecompileAccept calls Accept on any logs generated with an active precompile address that implements // contract.Accepter -func (b *Block) handlePrecompileAccept(rules extras.Rules) error { +func (b *wrappedBlock) handlePrecompileAccept(rules extras.Rules) error { // Short circuit early if there are no precompile accepters to execute if len(rules.AccepterPrecompiles) == 0 { return nil @@ -121,7 +133,7 @@ func (b *Block) handlePrecompileAccept(rules extras.Rules) error { } // Reject implements the snowman.Block interface -func (b *Block) Reject(context.Context) error { +func (b *wrappedBlock) Reject(context.Context) error { blkID := b.ID() log.Debug("rejecting block", "hash", blkID.Hex(), @@ -132,33 +144,22 @@ func (b *Block) Reject(context.Context) error { } // Parent implements the snowman.Block interface -func (b *Block) Parent() ids.ID { +func (b *wrappedBlock) Parent() ids.ID { return ids.ID(b.ethBlock.ParentHash()) } // Height implements the snowman.Block interface -func (b *Block) Height() uint64 { +func (b *wrappedBlock) Height() uint64 { return b.ethBlock.NumberU64() } // Timestamp implements the snowman.Block interface -func (b *Block) Timestamp() time.Time { +func (b *wrappedBlock) Timestamp() time.Time { return time.Unix(int64(b.ethBlock.Time()), 0) } -// syntacticVerify verifies that a *Block is well-formed. -func (b *Block) syntacticVerify() error { - if b == nil || b.ethBlock == nil { - return errInvalidBlock - } - - header := b.ethBlock.Header() - rules := b.vm.chainConfig.Rules(header.Number, params.IsMergeTODO, header.Time) - return b.vm.syntacticBlockValidator.SyntacticVerify(b, rules) -} - // Verify implements the snowman.Block interface -func (b *Block) Verify(context.Context) error { +func (b *wrappedBlock) Verify(context.Context) error { return b.verify(&precompileconfig.PredicateContext{ SnowCtx: b.vm.ctx, ProposerVMBlockCtx: nil, @@ -166,7 +167,7 @@ func (b *Block) Verify(context.Context) error { } // ShouldVerifyWithContext implements the block.WithVerifyContext interface -func (b *Block) ShouldVerifyWithContext(context.Context) (bool, error) { +func (b *wrappedBlock) ShouldVerifyWithContext(context.Context) (bool, error) { rules := b.vm.rules(b.ethBlock.Number(), b.ethBlock.Time()) predicates := rules.Predicaters // Short circuit early if there are no predicates to verify @@ -190,7 +191,7 @@ func (b *Block) ShouldVerifyWithContext(context.Context) (bool, error) { } // VerifyWithContext implements the block.WithVerifyContext interface -func (b *Block) VerifyWithContext(ctx context.Context, proposerVMBlockCtx *block.Context) error { +func (b *wrappedBlock) VerifyWithContext(ctx context.Context, proposerVMBlockCtx *block.Context) error { return b.verify(&precompileconfig.PredicateContext{ SnowCtx: b.vm.ctx, ProposerVMBlockCtx: proposerVMBlockCtx, @@ -200,7 +201,7 @@ func (b *Block) VerifyWithContext(ctx context.Context, proposerVMBlockCtx *block // Verify the block is valid. // Enforces that the predicates are valid within [predicateContext]. // Writes the block details to disk and the state to the trie manager iff writes=true. -func (b *Block) verify(predicateContext *precompileconfig.PredicateContext, writes bool) error { +func (b *wrappedBlock) verify(predicateContext *precompileconfig.PredicateContext, writes bool) error { if predicateContext.ProposerVMBlockCtx != nil { log.Debug("Verifying block with context", "block", b.ID(), "height", b.Height()) } else { @@ -232,8 +233,88 @@ func (b *Block) verify(predicateContext *precompileconfig.PredicateContext, writ return b.vm.blockChain.InsertBlockManual(b.ethBlock, writes) } +// semanticVerify verifies that a *Block is internally consistent. +func (b *wrappedBlock) semanticVerify() error { + // Make sure the block isn't too far in the future + blockTimestamp := b.ethBlock.Time() + if maxBlockTime := uint64(b.vm.clock.Time().Add(maxFutureBlockTime).Unix()); blockTimestamp > maxBlockTime { + return fmt.Errorf("block timestamp is too far in the future: %d > allowed %d", blockTimestamp, maxBlockTime) + } + + if b.extension != nil { + if err := b.extension.SemanticVerify(); err != nil { + return err + } + } + return nil +} + +// syntacticVerify verifies that a *Block is well-formed. +func (b *wrappedBlock) syntacticVerify() error { + if b == nil || b.ethBlock == nil { + return errInvalidBlock + } + + // Skip verification of the genesis block since it should already be marked as accepted. + if b.ethBlock.Hash() == b.vm.genesisHash { + return nil + } + + ethHeader := b.ethBlock.Header() + rules := b.vm.chainConfig.Rules(ethHeader.Number, params.IsMergeTODO, ethHeader.Time) + rulesExtra := params.GetRulesExtra(rules) + // Perform block and header sanity checks + if !ethHeader.Number.IsUint64() { + return fmt.Errorf("invalid block number: %v", ethHeader.Number) + } + if !ethHeader.Difficulty.IsUint64() || ethHeader.Difficulty.Cmp(common.Big1) != 0 { + return fmt.Errorf("invalid difficulty: %d", ethHeader.Difficulty) + } + if ethHeader.Nonce.Uint64() != 0 { + return fmt.Errorf( + "expected nonce to be 0 but got %d: %w", + ethHeader.Nonce.Uint64(), errInvalidNonce, + ) + } + + if ethHeader.MixDigest != (common.Hash{}) { + return fmt.Errorf("invalid mix digest: %v", ethHeader.MixDigest) + } + + // Verify the extra data is well-formed. + if err := header.VerifyExtra(rulesExtra.AvalancheRules, ethHeader.Extra); err != nil { + return err + } + + // Check that the tx hash in the header matches the body + txsHash := types.DeriveSha(b.ethBlock.Transactions(), trie.NewStackTrie(nil)) + if txsHash != ethHeader.TxHash { + return fmt.Errorf("invalid txs hash %v does not match calculated txs hash %v", ethHeader.TxHash, txsHash) + } + // Check that the uncle hash in the header matches the body + uncleHash := types.CalcUncleHash(b.ethBlock.Uncles()) + if uncleHash != ethHeader.UncleHash { + return fmt.Errorf("invalid uncle hash %v does not match calculated uncle hash %v", ethHeader.UncleHash, uncleHash) + } + // Coinbase must match the BlackholeAddr on C-Chain + if ethHeader.Coinbase != constants.BlackholeAddr { + return fmt.Errorf("invalid coinbase %v does not match required blackhole address %v", ethHeader.Coinbase, constants.BlackholeAddr) + } + // Block must not have any uncles + if len(b.ethBlock.Uncles()) > 0 { + return errUnclesUnsupported + } + + if b.extension != nil { + if err := b.extension.SyntacticVerify(*rulesExtra); err != nil { + return err + } + } + return nil +} + // verifyPredicates verifies the predicates in the block are valid according to predicateContext. -func (b *Block) verifyPredicates(predicateContext *precompileconfig.PredicateContext) error { +func (b *wrappedBlock) verifyPredicates(predicateContext *precompileconfig.PredicateContext) error { rules := b.vm.chainConfig.Rules(b.ethBlock.Number(), params.IsMergeTODO, b.ethBlock.Time()) rulesExtra := params.GetRulesExtra(rules) @@ -266,7 +347,7 @@ func (b *Block) verifyPredicates(predicateContext *precompileconfig.PredicateCon } // Bytes implements the snowman.Block interface -func (b *Block) Bytes() []byte { +func (b *wrappedBlock) Bytes() []byte { res, err := rlp.EncodeToBytes(b.ethBlock) if err != nil { panic(err) @@ -274,8 +355,12 @@ func (b *Block) Bytes() []byte { return res } -func (b *Block) String() string { return fmt.Sprintf("EVM block, ID = %s", b.ID()) } +func (b *wrappedBlock) String() string { return fmt.Sprintf("EVM block, ID = %s", b.ID()) } -func (b *Block) GetEthBlock() *types.Block { +func (b *wrappedBlock) GetEthBlock() *types.Block { return b.ethBlock } + +func (b *wrappedBlock) GetBlockExtension() extension.BlockExtension { + return b.extension +} diff --git a/plugin/evm/block_test.go b/plugin/evm/block_test.go index 4b138c66b8..7152bd7daf 100644 --- a/plugin/evm/block_test.go +++ b/plugin/evm/block_test.go @@ -88,7 +88,8 @@ func TestHandlePrecompileAccept(t *testing.T) { ) // Call handlePrecompileAccept - blk := vm.newBlock(ethBlock) + blk, err := wrapBlock(ethBlock, vm) + require.NoError(err) rules := extras.Rules{ AccepterPrecompiles: map[common.Address]precompileconfig.Accepter{ precompileAddr: mockAccepter, diff --git a/plugin/evm/block_verification.go b/plugin/evm/block_verification.go deleted file mode 100644 index 9722fff8e7..0000000000 --- a/plugin/evm/block_verification.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "errors" - "fmt" - "math/big" - - "github.com/ava-labs/libevm/common" - - "github.com/ava-labs/libevm/core/types" - "github.com/ava-labs/libevm/trie" - "github.com/ava-labs/subnet-evm/params" - "github.com/ava-labs/subnet-evm/plugin/evm/customtypes" - "github.com/ava-labs/subnet-evm/plugin/evm/header" - "github.com/ava-labs/subnet-evm/plugin/evm/upgrade/legacy" -) - -var legacyMinGasPrice = big.NewInt(legacy.BaseFee) - -type BlockValidator interface { - SyntacticVerify(b *Block, rules params.Rules) error -} - -type blockValidator struct{} - -func NewBlockValidator() BlockValidator { - return &blockValidator{} -} - -func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { - rulesExtra := params.GetRulesExtra(rules) - if b == nil || b.ethBlock == nil { - return errInvalidBlock - } - ethHeader := b.ethBlock.Header() - blockHash := b.ethBlock.Hash() - - // Skip verification of the genesis block since it should already be marked as accepted. - if blockHash == b.vm.genesisHash { - return nil - } - - // Perform block and header sanity checks - if ethHeader.Number == nil || !ethHeader.Number.IsUint64() { - return errInvalidBlock - } - if ethHeader.Difficulty == nil || !ethHeader.Difficulty.IsUint64() || - ethHeader.Difficulty.Uint64() != 1 { - return fmt.Errorf("invalid difficulty: %d", ethHeader.Difficulty) - } - if ethHeader.Nonce.Uint64() != 0 { - return fmt.Errorf( - "expected nonce to be 0 but got %d: %w", - ethHeader.Nonce.Uint64(), errInvalidNonce, - ) - } - - if ethHeader.MixDigest != (common.Hash{}) { - return fmt.Errorf("invalid mix digest: %v", ethHeader.MixDigest) - } - - // Verify the extra data is well-formed. - if err := header.VerifyExtra(rulesExtra.AvalancheRules, ethHeader.Extra); err != nil { - return err - } - - if rulesExtra.IsSubnetEVM { - if ethHeader.BaseFee == nil { - return errNilBaseFeeSubnetEVM - } - if bfLen := ethHeader.BaseFee.BitLen(); bfLen > 256 { - return fmt.Errorf("too large base fee: bitlen %d", bfLen) - } - } - - // Check that the tx hash in the header matches the body - txsHash := types.DeriveSha(b.ethBlock.Transactions(), trie.NewStackTrie(nil)) - if txsHash != ethHeader.TxHash { - return fmt.Errorf("invalid txs hash %v does not match calculated txs hash %v", ethHeader.TxHash, txsHash) - } - // Check that the uncle hash in the header matches the body - uncleHash := types.CalcUncleHash(b.ethBlock.Uncles()) - if uncleHash != ethHeader.UncleHash { - return fmt.Errorf("invalid uncle hash %v does not match calculated uncle hash %v", ethHeader.UncleHash, uncleHash) - } - - // Block must not have any uncles - if len(b.ethBlock.Uncles()) > 0 { - return errUnclesUnsupported - } - - // Block must not be empty - txs := b.ethBlock.Transactions() - if len(txs) == 0 { - return errEmptyBlock - } - - if !rulesExtra.IsSubnetEVM { - // Make sure that all the txs have the correct fee set. - for _, tx := range txs { - if tx.GasPrice().Cmp(legacyMinGasPrice) < 0 { - return fmt.Errorf("block contains tx %s with gas price too low (%d < %d)", tx.Hash(), tx.GasPrice(), legacyMinGasPrice) - } - } - } - - // Make sure the block isn't too far in the future - blockTimestamp := b.ethBlock.Time() - if maxBlockTime := uint64(b.vm.clock.Time().Add(maxFutureBlockTime).Unix()); blockTimestamp > maxBlockTime { - return fmt.Errorf("block timestamp is too far in the future: %d > allowed %d", blockTimestamp, maxBlockTime) - } - - if rulesExtra.IsSubnetEVM { - blockGasCost := customtypes.GetHeaderExtra(ethHeader).BlockGasCost - switch { - // Make sure BlockGasCost is not nil - // NOTE: ethHeader.BlockGasCost correctness is checked in header verification - case blockGasCost == nil: - return errNilBlockGasCostSubnetEVM - case !blockGasCost.IsUint64(): - return fmt.Errorf("too large blockGasCost: %d", blockGasCost) - } - } - - // Verify the existence / non-existence of excessBlobGas - cancun := rules.IsCancun - if !cancun && ethHeader.ExcessBlobGas != nil { - return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", *ethHeader.ExcessBlobGas) - } - if !cancun && ethHeader.BlobGasUsed != nil { - return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", *ethHeader.BlobGasUsed) - } - if cancun && ethHeader.ExcessBlobGas == nil { - return errors.New("header is missing excessBlobGas") - } - if cancun && ethHeader.BlobGasUsed == nil { - return errors.New("header is missing blobGasUsed") - } - if !cancun && ethHeader.ParentBeaconRoot != nil { - return fmt.Errorf("invalid parentBeaconRoot: have %x, expected nil", *ethHeader.ParentBeaconRoot) - } - // TODO: decide what to do after Cancun - // currently we are enforcing it to be empty hash - if cancun { - switch { - case ethHeader.ParentBeaconRoot == nil: - return errors.New("header is missing parentBeaconRoot") - case *ethHeader.ParentBeaconRoot != (common.Hash{}): - return fmt.Errorf("invalid parentBeaconRoot: have %x, expected empty hash", ethHeader.ParentBeaconRoot) - } - } - return nil -} diff --git a/plugin/evm/message/block_sync_summary.go b/plugin/evm/message/block_sync_summary.go new file mode 100644 index 0000000000..d3683ec40c --- /dev/null +++ b/plugin/evm/message/block_sync_summary.go @@ -0,0 +1,82 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "context" + "fmt" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/crypto" +) + +var _ Syncable = (*BlockSyncSummary)(nil) + +// BlockSyncSummary provides the information necessary to sync a node starting +// at the given block. +type BlockSyncSummary struct { + BlockNumber uint64 `serialize:"true"` + BlockHash common.Hash `serialize:"true"` + BlockRoot common.Hash `serialize:"true"` + + summaryID ids.ID + bytes []byte + acceptImpl AcceptImplFn +} + +func NewBlockSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot common.Hash) (*BlockSyncSummary, error) { + // We intentionally do not use the acceptImpl here and leave it for the parser to set. + summary := BlockSyncSummary{ + BlockNumber: blockNumber, + BlockHash: blockHash, + BlockRoot: blockRoot, + } + bytes, err := Codec.Marshal(Version, &summary) + if err != nil { + return nil, fmt.Errorf("failed to marshal syncable summary: %w", err) + } + + summary.bytes = bytes + summaryID, err := ids.ToID(crypto.Keccak256(bytes)) + if err != nil { + return nil, fmt.Errorf("failed to compute summary ID: %w", err) + } + summary.summaryID = summaryID + + return &summary, nil +} + +func (s *BlockSyncSummary) GetBlockHash() common.Hash { + return s.BlockHash +} + +func (s *BlockSyncSummary) GetBlockRoot() common.Hash { + return s.BlockRoot +} + +func (s *BlockSyncSummary) Bytes() []byte { + return s.bytes +} + +func (s *BlockSyncSummary) Height() uint64 { + return s.BlockNumber +} + +func (s *BlockSyncSummary) ID() ids.ID { + return s.summaryID +} + +func (s *BlockSyncSummary) String() string { + return fmt.Sprintf("BlockSyncSummary(BlockHash=%s, BlockNumber=%d, BlockRoot=%s)", s.BlockHash, s.BlockNumber, s.BlockRoot) +} + +func (s *BlockSyncSummary) Accept(context.Context) (block.StateSyncMode, error) { + if s.acceptImpl == nil { + return block.StateSyncSkipped, fmt.Errorf("accept implementation not specified for summary: %s", s) + } + return s.acceptImpl(s) +} diff --git a/plugin/evm/message/block_sync_summary_parser.go b/plugin/evm/message/block_sync_summary_parser.go new file mode 100644 index 0000000000..97435edece --- /dev/null +++ b/plugin/evm/message/block_sync_summary_parser.go @@ -0,0 +1,32 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. +package message + +import ( + "fmt" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/libevm/crypto" +) + +type BlockSyncSummaryParser struct{} + +func NewBlockSyncSummaryParser() *BlockSyncSummaryParser { + return &BlockSyncSummaryParser{} +} + +func (b *BlockSyncSummaryParser) Parse(summaryBytes []byte, acceptImpl AcceptImplFn) (Syncable, error) { + summary := BlockSyncSummary{} + if _, err := Codec.Unmarshal(summaryBytes, &summary); err != nil { + return nil, fmt.Errorf("failed to parse syncable summary: %w", err) + } + + summary.bytes = summaryBytes + summaryID, err := ids.ToID(crypto.Keccak256(summaryBytes)) + if err != nil { + return nil, fmt.Errorf("failed to compute summary ID: %w", err) + } + summary.summaryID = summaryID + summary.acceptImpl = acceptImpl + return &summary, nil +} diff --git a/plugin/evm/message/block_sync_summary_provider.go b/plugin/evm/message/block_sync_summary_provider.go new file mode 100644 index 0000000000..959ae4d0b7 --- /dev/null +++ b/plugin/evm/message/block_sync_summary_provider.go @@ -0,0 +1,16 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. +package message + +import ( + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + + "github.com/ava-labs/libevm/core/types" +) + +type BlockSyncSummaryProvider struct{} + +// StateSummaryAtBlock returns the block state summary at [block] if valid. +func (a *BlockSyncSummaryProvider) StateSummaryAtBlock(blk *types.Block) (block.StateSummary, error) { + return NewBlockSyncSummary(blk.Hash(), blk.NumberU64(), blk.Root()) +} diff --git a/plugin/evm/message/block_sync_summary_test.go b/plugin/evm/message/block_sync_summary_test.go new file mode 100644 index 0000000000..40aa926d7c --- /dev/null +++ b/plugin/evm/message/block_sync_summary_test.go @@ -0,0 +1,44 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "context" + "encoding/base64" + "testing" + + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/libevm/common" + "github.com/stretchr/testify/require" +) + +func TestMarshalBlockSyncSummary(t *testing.T) { + blockSyncSummary, err := NewBlockSyncSummary(common.Hash{1}, 2, common.Hash{3}) + require.NoError(t, err) + + require.Equal(t, common.Hash{1}, blockSyncSummary.GetBlockHash()) + require.Equal(t, uint64(2), blockSyncSummary.Height()) + require.Equal(t, common.Hash{3}, blockSyncSummary.GetBlockRoot()) + + expectedBase64Bytes := "AAAAAAAAAAAAAgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + require.Equal(t, expectedBase64Bytes, base64.StdEncoding.EncodeToString(blockSyncSummary.Bytes())) + + parser := NewBlockSyncSummaryParser() + called := false + acceptImplTest := func(Syncable) (block.StateSyncMode, error) { + called = true + return block.StateSyncSkipped, nil + } + s, err := parser.Parse(blockSyncSummary.Bytes(), acceptImplTest) + require.NoError(t, err) + require.Equal(t, blockSyncSummary.GetBlockHash(), s.GetBlockHash()) + require.Equal(t, blockSyncSummary.Height(), s.Height()) + require.Equal(t, blockSyncSummary.GetBlockRoot(), s.GetBlockRoot()) + require.Equal(t, blockSyncSummary.Bytes(), s.Bytes()) + + mode, err := s.Accept(context.TODO()) + require.NoError(t, err) + require.Equal(t, block.StateSyncSkipped, mode) + require.True(t, called) +} diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index ef2dd30855..eccd2d42d6 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -222,8 +222,6 @@ type VM struct { validatorsDB database.Database - syntacticBlockValidator BlockValidator - // builderLock is used to synchronize access to the block builder, // as it is uninitialized at first and is only initialized when onNormalOperationsStarted is called. builderLock sync.Mutex @@ -281,6 +279,7 @@ func (vm *VM) Initialize( appSender commonEng.AppSender, ) error { vm.stateSyncDone = make(chan struct{}) + vm.extensionConfig = &extension.Config{} vm.config.SetDefaults(defaultTxPoolConfig) if len(configBytes) > 0 { if err := json.Unmarshal(configBytes, &vm.config); err != nil { @@ -346,7 +345,7 @@ func (vm *VM) Initialize( return err } - vm.syntacticBlockValidator = NewBlockValidator() + // TODO: FIX THIS vm.syntacticBlockValidator = NewBlockValidator(vm) vm.ethConfig = ethconfig.NewDefaultConfig() vm.ethConfig.Genesis = g @@ -737,7 +736,7 @@ func (vm *VM) initializeStateSync(lastAcceptedHeight uint64) error { } func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { - block := vm.newBlock(lastAcceptedBlock) + block, err := wrapBlock(lastAcceptedBlock, vm) config := &chain.Config{ DecidedCacheSize: decidedCacheSize, @@ -1003,7 +1002,7 @@ func (vm *VM) buildBlockWithContext(ctx context.Context, proposerVMBlockCtx *blo } // Note: the status of block is set by ChainState - blk := vm.newBlock(block) + blk, err := wrapBlock(block, vm) // Verify is called on a non-wrapped block here, such that this // does not add [blk] to the processing blocks map in ChainState. @@ -1037,7 +1036,10 @@ func (vm *VM) parseBlock(_ context.Context, b []byte) (snowman.Block, error) { } // Note: the status of block is set by ChainState - block := vm.newBlock(ethBlock) + block, err := wrapBlock(ethBlock, vm) + if err != nil { + return nil, err + } // Performing syntactic verification in ParseBlock allows for // short-circuiting bad blocks before they are processed by the VM. if err := block.syntacticVerify(); err != nil { @@ -1052,7 +1054,7 @@ func (vm *VM) ParseEthBlock(b []byte) (*types.Block, error) { return nil, err } - return block.(*Block).ethBlock, nil + return block.(*wrappedBlock).ethBlock, nil } // getBlock attempts to retrieve block [id] from the VM to be wrapped @@ -1065,7 +1067,7 @@ func (vm *VM) getBlock(_ context.Context, id ids.ID) (snowman.Block, error) { return nil, database.ErrNotFound } // Note: the status of block is set by ChainState - return vm.newBlock(ethBlock), nil + return wrapBlock(ethBlock, vm) } // GetAcceptedBlock attempts to retrieve block [blkID] from the VM. This method @@ -1099,7 +1101,7 @@ func (vm *VM) SetPreference(ctx context.Context, blkID ids.ID) error { return fmt.Errorf("failed to set preference to %s: %w", blkID, err) } - return vm.blockChain.SetPreference(block.(*Block).ethBlock) + return vm.blockChain.SetPreference(block.(*wrappedBlock).ethBlock) } // GetBlockIDAtHeight returns the canonical block at [height]. diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 1617dd7a26..64d8fea08b 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -49,6 +49,7 @@ import ( "github.com/ava-labs/subnet-evm/plugin/evm/config" "github.com/ava-labs/subnet-evm/plugin/evm/customrawdb" "github.com/ava-labs/subnet-evm/plugin/evm/customtypes" + "github.com/ava-labs/subnet-evm/plugin/evm/extension" "github.com/ava-labs/subnet-evm/plugin/evm/header" "github.com/ava-labs/subnet-evm/plugin/evm/vmerrors" "github.com/ava-labs/subnet-evm/precompile/allowlist" @@ -454,7 +455,7 @@ func testBuildEthTxBlock(t *testing.T, scheme string) { t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk2.ID(), lastAcceptedID) } - ethBlk1 := blk1.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlk1 := blk1.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock if ethBlk1Root := ethBlk1.Root(); !tvm.vm.blockChain.HasState(ethBlk1Root) { t.Fatalf("Expected blk1 state root to not yet be pruned after blk2 was accepted because of tip buffer") } @@ -499,7 +500,7 @@ func testBuildEthTxBlock(t *testing.T, scheme string) { } // State root should be committed when accepted tip on shutdown - ethBlk2 := blk2.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlk2 := blk2.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock if ethBlk2Root := ethBlk2.Root(); !restartedVM.blockChain.HasState(ethBlk2Root) { t.Fatalf("Expected blk2 state root to not be pruned after shutdown (last accepted tip should be committed)") } @@ -1110,7 +1111,7 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { } blkBHeight := vm1BlkB.Height() - blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() + blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex()) } @@ -1154,7 +1155,7 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { t.Fatalf("Expected accepted block to be indexed by height, but found %s", blkID) } - blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() + blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkCHash { t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), b.Hash().Hex()) } @@ -1300,7 +1301,7 @@ func testStickyPreference(t *testing.T, scheme string) { } blkBHeight := vm1BlkB.Height() - blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() + blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex()) } @@ -1355,14 +1356,14 @@ func testStickyPreference(t *testing.T, scheme string) { if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() + blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() vm1BlkD, err := vm1.ParseBlock(context.Background(), vm2BlkD.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } blkDHeight := vm1BlkD.Height() - blkDHash := vm1BlkD.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() + blkDHash := vm1BlkD.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() // Should be no-ops if err := vm1BlkC.Verify(context.Background()); err != nil { @@ -1619,8 +1620,8 @@ func testUncleBlock(t *testing.T, scheme string) { } // Create uncle block from blkD - blkDEthBlock := vm2BlkD.(*chain.BlockWrapper).Block.(*Block).ethBlock - uncles := []*types.Header{vm1BlkB.(*chain.BlockWrapper).Block.(*Block).ethBlock.Header()} + blkDEthBlock := vm2BlkD.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock + uncles := []*types.Header{vm1BlkB.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Header()} uncleBlockHeader := types.CopyHeader(blkDEthBlock.Header()) uncleBlockHeader.UncleHash = types.CalcUncleHash(uncles) @@ -1631,7 +1632,7 @@ func testUncleBlock(t *testing.T, scheme string) { nil, trie.NewStackTrie(nil), ) - uncleBlock := vm2.newBlock(uncleEthBlock) + uncleBlock, err := wrapBlock(uncleEthBlock, tvm2.vm) if err := uncleBlock.Verify(context.Background()); !errors.Is(err, errUnclesUnsupported) { t.Fatalf("VM2 should have failed with %q but got %q", errUnclesUnsupported, err.Error()) @@ -1689,7 +1690,7 @@ func testEmptyBlock(t *testing.T, scheme string) { } // Create empty block from blkA - ethBlock := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlock := blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() emptyEthBlock := types.NewBlock( types.CopyHeader(ethBlock.Header()), @@ -1699,7 +1700,7 @@ func testEmptyBlock(t *testing.T, scheme string) { new(trie.Trie), ) - emptyBlock := tvm.vm.newBlock(emptyEthBlock) + emptyBlock, err := wrapBlock(emptyEthBlock, tvm.vm) if _, err := tvm.vm.ParseBlock(context.Background(), emptyBlock.Bytes()); !errors.Is(err, errEmptyBlock) { t.Fatalf("VM should have failed with errEmptyBlock but got %s", err.Error()) @@ -1909,7 +1910,7 @@ func testAcceptReorg(t *testing.T, scheme string) { t.Fatalf("Block failed verification on VM1: %s", err) } - blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() + blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock().Hash() if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkBHash { t.Fatalf("expected current block to have hash %s but got %s", blkBHash.Hex(), b.Hash().Hex()) } @@ -1918,7 +1919,7 @@ func testAcceptReorg(t *testing.T, scheme string) { t.Fatal(err) } - blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() + blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock().Hash() if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkCHash { t.Fatalf("expected current block to have hash %s but got %s", blkCHash.Hex(), b.Hash().Hex()) } @@ -1929,7 +1930,7 @@ func testAcceptReorg(t *testing.T, scheme string) { if err := vm1BlkD.Accept(context.Background()); err != nil { t.Fatal(err) } - blkDHash := vm1BlkD.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() + blkDHash := vm1BlkD.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock().Hash() if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkDHash { t.Fatalf("expected current block to have hash %s but got %s", blkDHash.Hex(), b.Hash().Hex()) } @@ -1978,8 +1979,8 @@ func testFutureBlock(t *testing.T, scheme string) { } // Create empty block from blkA - internalBlkA := blkA.(*chain.BlockWrapper).Block.(*Block) - modifiedHeader := types.CopyHeader(internalBlkA.ethBlock.Header()) + internalBlkA := blkA.(*chain.BlockWrapper).Block.(extension.ExtendedBlock) + modifiedHeader := types.CopyHeader(internalBlkA.GetEthBlock().Header()) // Set the VM's clock to the time of the produced block tvm.vm.clock.Set(time.Unix(int64(modifiedHeader.Time), 0)) // Set the modified time to exceed the allowed future time @@ -1987,13 +1988,13 @@ func testFutureBlock(t *testing.T, scheme string) { modifiedHeader.Time = modifiedTime modifiedBlock := types.NewBlock( modifiedHeader, - internalBlkA.ethBlock.Transactions(), + internalBlkA.GetEthBlock().Transactions(), nil, nil, trie.NewStackTrie(nil), ) - futureBlock := tvm.vm.newBlock(modifiedBlock) + futureBlock, err := wrapBlock(modifiedBlock, tvm.vm) if err := futureBlock.Verify(context.Background()); err == nil { t.Fatal("Future block should have failed verification due to block timestamp too far in the future") @@ -2053,7 +2054,7 @@ func testLastAcceptedBlockNumberAllow(t *testing.T, scheme string) { } blkHeight := blk.Height() - blkHash := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() + blkHash := blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock().Hash() tvm.vm.eth.APIBackend.SetAllowUnfinalizedQueries(true) @@ -2143,7 +2144,7 @@ func testBuildSubnetEVMBlock(t *testing.T, scheme string) { } blk = issueAndAccept(t, tvm.vm) - ethBlk := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlk := blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() if customtypes.BlockGasCost(ethBlk) == nil || customtypes.BlockGasCost(ethBlk).Cmp(big.NewInt(100)) < 0 { t.Fatalf("expected blockGasCost to be at least 100 but got %d", customtypes.BlockGasCost(ethBlk)) } @@ -2241,7 +2242,7 @@ func testBuildAllowListActivationBlock(t *testing.T, scheme string) { } // Verify that the allow list config activation was handled correctly in the first block. - blkState, err := tvm.vm.blockChain.StateAt(blk.(*chain.BlockWrapper).Block.(*Block).ethBlock.Root()) + blkState, err := tvm.vm.blockChain.StateAt(blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock().Root()) if err != nil { t.Fatal(err) } @@ -2355,7 +2356,7 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) // Verify that the constructed block only has the whitelisted tx - block := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + block := blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() txs := block.Transactions() @@ -2379,7 +2380,7 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { blk = issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) - block = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + block = blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() blkState, err := tvm.vm.blockChain.StateAt(block.Root()) require.NoError(t, err) @@ -2405,7 +2406,7 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) // Verify that the constructed block only has the whitelisted tx - block = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + block = blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() txs = block.Transactions() require.Len(t, txs, 1) @@ -2560,7 +2561,7 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { blk := issueAndAccept(t, tvm.vm) // Verify that the constructed block only has the whitelisted tx - block := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + block := blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() txs := block.Transactions() if txs.Len() != 1 { t.Fatalf("Expected number of txs to be %d, but found %d", 1, txs.Len()) @@ -2582,7 +2583,7 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { blk = issueAndAccept(t, tvm.vm) // Verify that the constructed block only has the previously rejected tx - block = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + block = blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() txs = block.Transactions() if txs.Len() != 1 { t.Fatalf("Expected number of txs to be %d, but found %d", 1, txs.Len()) @@ -2688,7 +2689,7 @@ func TestFeeManagerChangeFee(t *testing.T) { t.Fatalf("Expected new block to match") } - block := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + block := blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() feeConfig, lastChangedAt, err = tvm.vm.blockChain.GetFeeConfigAt(block.Header()) require.NoError(t, err) @@ -2770,22 +2771,25 @@ func testAllowFeeRecipientDisabled(t *testing.T, scheme string) { blk, err := tvm.vm.BuildBlock(context.Background()) require.NoError(t, err) // this won't return an error since miner will set the etherbase to blackhole address - ethBlock := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlock := blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() require.Equal(t, constants.BlackholeAddr, ethBlock.Coinbase()) // Create empty block from blk - internalBlk := blk.(*chain.BlockWrapper).Block.(*Block) - modifiedHeader := types.CopyHeader(internalBlk.ethBlock.Header()) + internalBlk := blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock) + modifiedHeader := types.CopyHeader(internalBlk.GetEthBlock().Header()) modifiedHeader.Coinbase = common.HexToAddress("0x0123456789") // set non-blackhole address by force modifiedBlock := types.NewBlock( modifiedHeader, - internalBlk.ethBlock.Transactions(), + internalBlk.GetEthBlock().Transactions(), nil, nil, trie.NewStackTrie(nil), ) - modifiedBlk := tvm.vm.newBlock(modifiedBlock) + modifiedBlk, err := wrapBlock(modifiedBlock, tvm.vm) + if err != nil { + t.Fatal(err) + } err = modifiedBlk.Verify(context.Background()) require.ErrorIs(t, err, vmerrors.ErrInvalidCoinbase) @@ -2842,7 +2846,7 @@ func TestAllowFeeRecipientEnabled(t *testing.T) { if newHead.Head.Hash() != common.Hash(blk.ID()) { t.Fatalf("Expected new block to match") } - ethBlock := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlock := blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() require.Equal(t, etherBase, ethBlock.Coinbase()) // Verify that etherBase has received fees blkState, err := tvm.vm.blockChain.StateAt(ethBlock.Root()) @@ -2922,7 +2926,7 @@ func TestRewardManagerPrecompileSetRewardAddress(t *testing.T) { blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) - ethBlock := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlock := blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() require.Equal(t, etherBase, ethBlock.Coinbase()) // reward address is activated at this block so this is fine tx1 := types.NewTransaction(uint64(0), testEthAddrs[0], big.NewInt(2), 21000, big.NewInt(testMinGasPrice*3), nil) @@ -2937,7 +2941,7 @@ func TestRewardManagerPrecompileSetRewardAddress(t *testing.T) { blk = issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) - ethBlock = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlock = blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() require.Equal(t, testAddr, ethBlock.Coinbase()) // reward address was activated at previous block // Verify that etherBase has received fees blkState, err := tvm.vm.blockChain.StateAt(ethBlock.Root()) @@ -2964,7 +2968,7 @@ func TestRewardManagerPrecompileSetRewardAddress(t *testing.T) { blk = issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) - ethBlock = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlock = blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() // Reward manager deactivated at this block, so we expect the parent state // to determine the coinbase for this block before full deactivation in the // next block. @@ -2985,7 +2989,7 @@ func TestRewardManagerPrecompileSetRewardAddress(t *testing.T) { blk = issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) - ethBlock = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlock = blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() // reward manager was disabled at previous block // so this block should revert back to enabling fee recipients require.Equal(t, etherBase, ethBlock.Coinbase()) @@ -3064,7 +3068,7 @@ func TestRewardManagerPrecompileAllowFeeRecipients(t *testing.T) { blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) - ethBlock := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlock := blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() require.Equal(t, constants.BlackholeAddr, ethBlock.Coinbase()) // reward address is activated at this block so this is fine tx1 := types.NewTransaction(uint64(0), testEthAddrs[0], big.NewInt(2), 21000, big.NewInt(testMinGasPrice*3), nil) @@ -3079,7 +3083,7 @@ func TestRewardManagerPrecompileAllowFeeRecipients(t *testing.T) { blk = issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) - ethBlock = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlock = blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() require.Equal(t, etherBase, ethBlock.Coinbase()) // reward address was activated at previous block // Verify that etherBase has received fees blkState, err := tvm.vm.blockChain.StateAt(ethBlock.Root()) @@ -3105,7 +3109,7 @@ func TestRewardManagerPrecompileAllowFeeRecipients(t *testing.T) { blk = issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) - ethBlock = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlock = blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() require.Equal(t, etherBase, ethBlock.Coinbase()) // reward address was activated at previous block require.GreaterOrEqual(t, int64(ethBlock.Time()), disableTime.Unix()) @@ -3122,7 +3126,7 @@ func TestRewardManagerPrecompileAllowFeeRecipients(t *testing.T) { blk = issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) - ethBlock = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlock = blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() require.Equal(t, constants.BlackholeAddr, ethBlock.Coinbase()) // reward address was activated at previous block require.Greater(t, int64(ethBlock.Time()), disableTime.Unix()) @@ -3282,12 +3286,15 @@ func TestParentBeaconRootBlock(t *testing.T) { } // Modify the block to have a parent beacon root - ethBlock := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlock := blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() header := types.CopyHeader(ethBlock.Header()) header.ParentBeaconRoot = test.beaconRoot parentBeaconEthBlock := ethBlock.WithSeal(header) - parentBeaconBlock := tvm.vm.newBlock(parentBeaconEthBlock) + parentBeaconBlock, err := wrapBlock(parentBeaconEthBlock, tvm.vm) + if err != nil { + t.Fatal(err) + } errCheck := func(err error) { if test.expectedError { @@ -3475,7 +3482,7 @@ func TestFeeManagerRegressionMempoolMinFeeAfterRestart(t *testing.T) { require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) // check that the fee config is updated - block := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + block := blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() feeConfig, lastChangedAt, err = restartedVM.blockChain.GetFeeConfigAt(block.Header()) require.NoError(t, err) require.EqualValues(t, restartedVM.blockChain.CurrentBlock().Number, lastChangedAt) diff --git a/plugin/evm/vm_upgrade_bytes_test.go b/plugin/evm/vm_upgrade_bytes_test.go index 1b86286fbb..32f799d2a1 100644 --- a/plugin/evm/vm_upgrade_bytes_test.go +++ b/plugin/evm/vm_upgrade_bytes_test.go @@ -140,7 +140,7 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { blk := issueAndAccept(t, tvm.vm) // Verify that the constructed block only has the whitelisted tx - block := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + block := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock txs := block.Transactions() if txs.Len() != 1 { t.Fatalf("Expected number of txs to be %d, but found %d", 1, txs.Len()) @@ -162,7 +162,7 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { blk = issueAndAccept(t, tvm.vm) // Verify that the constructed block only has the previously rejected tx - block = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + block = blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock txs = block.Transactions() if txs.Len() != 1 { t.Fatalf("Expected number of txs to be %d, but found %d", 1, txs.Len()) diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index 0e7285e944..89c9444dec 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -40,6 +40,7 @@ import ( "github.com/ava-labs/subnet-evm/eth/tracers" "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/params/extras" + "github.com/ava-labs/subnet-evm/plugin/evm/extension" customheader "github.com/ava-labs/subnet-evm/plugin/evm/header" "github.com/ava-labs/subnet-evm/precompile/contract" warpcontract "github.com/ava-labs/subnet-evm/precompile/contracts/warp" @@ -136,7 +137,7 @@ func testSendWarpMessage(t *testing.T, scheme string) { require.NoError(blk.Verify(context.Background())) // Verify that the constructed block contains the expected log with an unsigned warp message in the log data - ethBlock1 := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlock1 := blk.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() require.Len(ethBlock1.Transactions(), 1) receipts := rawdb.ReadReceipts(tvm.vm.chaindb, ethBlock1.Hash(), ethBlock1.NumberU64(), ethBlock1.Time(), tvm.vm.chainConfig) require.Len(receipts, 1) @@ -455,7 +456,7 @@ func testWarpVMTransaction(t *testing.T, scheme string, unsignedMessage *avalanc require.NoError(warpBlock.Accept(context.Background())) tvm.vm.blockChain.DrainAcceptorQueue() - ethBlock := warpBlock.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlock := warpBlock.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() verifiedMessageReceipts := tvm.vm.blockChain.GetReceiptsByHash(ethBlock.Hash()) require.Len(verifiedMessageReceipts, 2) for i, receipt := range verifiedMessageReceipts { @@ -739,7 +740,7 @@ func testReceiveWarpMessage( require.NoError(err) // Require the block was built with a successful predicate result - ethBlock := block2.(*chain.BlockWrapper).Block.(*Block).ethBlock + ethBlock := block2.(*chain.BlockWrapper).Block.(extension.ExtendedBlock).GetEthBlock() headerPredicateResultsBytes := customheader.PredicateBytesFromExtra(ethBlock.Extra()) results, err := predicate.ParseResults(headerPredicateResultsBytes) require.NoError(err) diff --git a/sync/README.md b/sync/README.md index ad991ada53..ce1530b28f 100644 --- a/sync/README.md +++ b/sync/README.md @@ -42,7 +42,7 @@ When a new node wants to join the network via state sync, it will need a few pie - Number (height) and hash of the latest available syncable block, - Root of the account trie, -The above information is called a _state summary_, and each syncable block corresponds to one such summary (see `message.SyncSummary`). The engine and VM interact as follows to find a syncable state summary: +The above information is called a _state summary_, and each syncable block corresponds to one such summary (see `message.Summary`). The engine and VM interact as follows to find a syncable state summary: 1. The engine calls `StateSyncEnabled`. The VM returns `true` to initiate state sync, or `false` to start bootstrapping. In `subnet-evm`, this is controlled by the `state-sync-enabled` flag. @@ -60,6 +60,8 @@ The following steps are executed by the VM to sync its state from peers (see `st 1. Update in-memory and on-disk pointers. Steps 3 and 4 involve syncing tries. To sync trie data, the VM will send a series of `LeafRequests` to its peers. Each request specifies: +- Type of trie (`NodeType`): + - `statesync.StateTrieNode` (account trie and storage tries share the same database) - `Root` of the trie to sync, - `Start` and `End` specify a range of keys. diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go index 3e10b53bb8..82edeb27f5 100644 --- a/sync/statesync/sync_test.go +++ b/sync/statesync/sync_test.go @@ -67,17 +67,18 @@ func testSync(t *testing.T, test syncTest) { MaxOutstandingCodeHashes: DefaultMaxOutstandingCodeHashes, RequestSize: 1024, }) - if err != nil { - t.Fatal(err) - } - // begin sync - s.Start(ctx) + require.NoError(t, err, "failed to create state syncer") + + require.NoError(t, s.Start(ctx), "failed to start state syncer") + waitFor(t, context.Background(), s.Wait, test.expectedError, testSyncTimeout) + + // Only assert database consistency if the sync was expected to succeed. if test.expectedError != nil { return } - statesynctest.AssertDBConsistency(t, root, clientDB, serverTrieDB, triedb.NewDatabase(clientDB, nil)) + assertDBConsistency(t, root, clientDB, serverTrieDB, triedb.NewDatabase(clientDB, nil)) } // testSyncResumes tests a series of syncTests work as expected, invoking a callback function after each @@ -97,7 +98,6 @@ func waitFor(t *testing.T, ctx context.Context, resultFunc func(context.Context) err := resultFunc(ctx) if ctx.Err() != nil { // print a stack trace to assist with debugging - var stackBuf bytes.Buffer pprof.Lookup("goroutine").WriteTo(&stackBuf, 2) t.Log(stackBuf.String()) From 52e4d2633d3e862316fab812794f84e8994a2aa4 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Fri, 8 Aug 2025 16:18:40 -0400 Subject: [PATCH 05/26] sync part 3! --- plugin/evm/block.go | 30 +++++++----------------- plugin/evm/block_test.go | 6 +++-- plugin/evm/message/leafs_request_test.go | 2 +- plugin/evm/sync/client.go | 6 ++--- plugin/evm/syncervm_test.go | 12 +++++++++- plugin/evm/vm.go | 18 ++++++++++---- plugin/evm/vm_test.go | 2 +- sync/client/leaf_syncer.go | 9 +++---- sync/statesync/state_syncer.go | 3 --- 9 files changed, 46 insertions(+), 42 deletions(-) diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 07c504459d..678ba8e219 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -17,7 +17,7 @@ import ( "github.com/ava-labs/libevm/core/rawdb" "github.com/ava-labs/libevm/core/types" - "github.com/ava-labs/subnet-evm/constants" + "github.com/ava-labs/subnet-evm/core" "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/params/extras" @@ -233,22 +233,6 @@ func (b *wrappedBlock) verify(predicateContext *precompileconfig.PredicateContex return b.vm.blockChain.InsertBlockManual(b.ethBlock, writes) } -// semanticVerify verifies that a *Block is internally consistent. -func (b *wrappedBlock) semanticVerify() error { - // Make sure the block isn't too far in the future - blockTimestamp := b.ethBlock.Time() - if maxBlockTime := uint64(b.vm.clock.Time().Add(maxFutureBlockTime).Unix()); blockTimestamp > maxBlockTime { - return fmt.Errorf("block timestamp is too far in the future: %d > allowed %d", blockTimestamp, maxBlockTime) - } - - if b.extension != nil { - if err := b.extension.SemanticVerify(); err != nil { - return err - } - } - return nil -} - // syntacticVerify verifies that a *Block is well-formed. func (b *wrappedBlock) syntacticVerify() error { if b == nil || b.ethBlock == nil { @@ -287,7 +271,12 @@ func (b *wrappedBlock) syntacticVerify() error { } // Check that the tx hash in the header matches the body - txsHash := types.DeriveSha(b.ethBlock.Transactions(), trie.NewStackTrie(nil)) + txs := b.ethBlock.Transactions() + if len(txs) == 0 { + // Empty blocks are not allowed on Subnet-EVM + return errEmptyBlock + } + txsHash := types.DeriveSha(txs, trie.NewStackTrie(nil)) if txsHash != ethHeader.TxHash { return fmt.Errorf("invalid txs hash %v does not match calculated txs hash %v", ethHeader.TxHash, txsHash) } @@ -296,10 +285,7 @@ func (b *wrappedBlock) syntacticVerify() error { if uncleHash != ethHeader.UncleHash { return fmt.Errorf("invalid uncle hash %v does not match calculated uncle hash %v", ethHeader.UncleHash, uncleHash) } - // Coinbase must match the BlackholeAddr on C-Chain - if ethHeader.Coinbase != constants.BlackholeAddr { - return fmt.Errorf("invalid coinbase %v does not match required blackhole address %v", ethHeader.Coinbase, constants.BlackholeAddr) - } + // Block must not have any uncles if len(b.ethBlock.Uncles()) > 0 { return errUnclesUnsupported diff --git a/plugin/evm/block_test.go b/plugin/evm/block_test.go index 7152bd7daf..4c7c097d31 100644 --- a/plugin/evm/block_test.go +++ b/plugin/evm/block_test.go @@ -13,6 +13,7 @@ import ( "github.com/ava-labs/libevm/trie" "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/params/extras" + "github.com/ava-labs/subnet-evm/plugin/evm/extension" "github.com/ava-labs/subnet-evm/precompile/precompileconfig" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" @@ -25,8 +26,9 @@ func TestHandlePrecompileAccept(t *testing.T) { db := rawdb.NewMemoryDatabase() vm := &VM{ - chaindb: db, - chainConfig: params.TestChainConfig, + chaindb: db, + chainConfig: params.TestChainConfig, + extensionConfig: &extension.Config{}, } precompileAddr := common.Address{0x05} diff --git a/plugin/evm/message/leafs_request_test.go b/plugin/evm/message/leafs_request_test.go index 4b28cdb53f..a611d1f1a1 100644 --- a/plugin/evm/message/leafs_request_test.go +++ b/plugin/evm/message/leafs_request_test.go @@ -35,7 +35,7 @@ func TestMarshalLeafsRequest(t *testing.T) { Limit: 1024, } - base64LeafsRequest := "AAAAAAAAAAAAAAAAAAAAAABpbSBST09UaW5nIGZvciB5YQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIFL9/AchgmVPFj9fD5piHXKVZsdNEAN8TXu7BAfR4sZJAAAAIIGFWthoHQ2G0ekeABZ5OctmlNLEIqzSCKAHKTlIf2mZBAA=" + base64LeafsRequest := "AAAAAAAAAAAAAAAAAAAAAABpbSBST09UaW5nIGZvciB5YQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIFL9/AchgmVPFj9fD5piHXKVZsdNEAN8TXu7BAfR4sZJAAAAIIGFWthoHQ2G0ekeABZ5OctmlNLEIqzSCKAHKTlIf2mZBAAA" leafsRequestBytes, err := Codec.Marshal(Version, leafsRequest) assert.NoError(t, err) diff --git a/plugin/evm/sync/client.go b/plugin/evm/sync/client.go index 103cc9c84d..64cded449f 100644 --- a/plugin/evm/sync/client.go +++ b/plugin/evm/sync/client.go @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" - commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/vms/components/chain" "github.com/ava-labs/libevm/common" @@ -81,7 +80,7 @@ type ClientConfig struct { Client syncclient.Client - ToEngine chan<- commonEng.Message + StateSyncDone chan struct{} } type client struct { @@ -227,6 +226,7 @@ func (client *client) acceptSyncSummary(proposedSummary message.Syncable) (block } log.Info("Starting state sync", "summary", proposedSummary) + // create a cancellable ctx for the state sync goroutine ctx, cancel := context.WithCancel(context.Background()) client.cancel = cancel @@ -244,7 +244,7 @@ func (client *client) acceptSyncSummary(proposedSummary message.Syncable) (block // this error will be propagated to the engine when it calls // vm.SetState(snow.Bootstrapping) log.Info("stateSync completed, notifying engine", "err", client.err) - client.ToEngine <- commonEng.StateSyncDone + close(client.StateSyncDone) }() return block.StateSyncStatic, nil } diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 18c45ee3eb..8a8f4eb616 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -273,8 +273,14 @@ func TestVMShutdownWhileSyncing(t *testing.T) { func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *syncVMSetup { require := require.New(t) // configure [serverVM] + // Align commit intervals with the test's syncable interval so summaries are created + // at the expected heights and Accept() does not skip. + serverConfigJSON := fmt.Sprintf(`{"commit-interval": %d, "state-sync-commit-interval": %d}`, + test.syncableInterval, test.syncableInterval, + ) serverVM := newVM(t, testVMConfig{ genesisJSON: toGenesisJSON(forkToChainConfig[upgradetest.Latest]), + configJSON: serverConfigJSON, }) t.Cleanup(func() { @@ -310,7 +316,11 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s require.NoError(serverVM.vm.State.SetLastAcceptedBlock(internalBlock)) // initialise [syncerVM] with blank genesis state - stateSyncEnabledJSON := fmt.Sprintf(`{"state-sync-enabled":true, "state-sync-min-blocks": %d, "tx-lookup-limit": %d}`, test.stateSyncMinBlocks, 4) + // Match the server's state-sync-commit-interval so parsed summaries are acceptable. + stateSyncEnabledJSON := fmt.Sprintf( + `{"state-sync-enabled":true, "state-sync-min-blocks": %d, "tx-lookup-limit": %d, "state-sync-commit-interval": %d}`, + test.stateSyncMinBlocks, 4, test.syncableInterval, + ) syncerVM := newVM(t, testVMConfig{ genesisJSON: toGenesisJSON(forkToChainConfig[upgradetest.Latest]), configJSON: stateSyncEnabledJSON, diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index eccd2d42d6..d441be98bc 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -146,8 +146,6 @@ var ( errInvalidBlock = errors.New("invalid block") errInvalidNonce = errors.New("invalid nonce") errUnclesUnsupported = errors.New("uncles unsupported") - errNilBaseFeeSubnetEVM = errors.New("nil base fee is invalid after subnetEVM") - errNilBlockGasCostSubnetEVM = errors.New("nil blockGasCost is invalid after subnetEVM") errInvalidHeaderPredicateResults = errors.New("invalid header predicate results") errInitializingLogger = errors.New("failed to initialize logger") errShuttingDownVM = errors.New("shutting down VM") @@ -279,7 +277,10 @@ func (vm *VM) Initialize( appSender commonEng.AppSender, ) error { vm.stateSyncDone = make(chan struct{}) - vm.extensionConfig = &extension.Config{} + vm.extensionConfig = &extension.Config{ + SyncSummaryProvider: &message.BlockSyncSummaryProvider{}, + SyncableParser: message.NewBlockSyncSummaryParser(), + } vm.config.SetDefaults(defaultTxPoolConfig) if len(configBytes) > 0 { if err := json.Unmarshal(configBytes, &vm.config); err != nil { @@ -702,8 +703,9 @@ func (vm *VM) initializeStateSync(lastAcceptedHeight uint64) error { leafMetricsNames[stateLeafRequestConfig.LeafType] = stateLeafRequestConfig.MetricName vm.Client = vmsync.NewClient(&vmsync.ClientConfig{ - Chain: vm.eth, - State: vm.State, + StateSyncDone: vm.stateSyncDone, + Chain: vm.eth, + State: vm.State, Client: statesyncclient.NewClient( &statesyncclient.ClientConfig{ NetworkClient: vm.Network, @@ -737,6 +739,9 @@ func (vm *VM) initializeStateSync(lastAcceptedHeight uint64) error { func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { block, err := wrapBlock(lastAcceptedBlock, vm) + if err != nil { + return fmt.Errorf("failed to wrap last accepted block: %w", err) + } config := &chain.Config{ DecidedCacheSize: decidedCacheSize, @@ -1003,6 +1008,9 @@ func (vm *VM) buildBlockWithContext(ctx context.Context, proposerVMBlockCtx *blo // Note: the status of block is set by ChainState blk, err := wrapBlock(block, vm) + if err != nil { + return nil, fmt.Errorf("failed to wrap built block: %w", err) + } // Verify is called on a non-wrapped block here, such that this // does not add [blk] to the processing blocks map in ChainState. diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 64d8fea08b..e3c88d07de 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -1632,7 +1632,7 @@ func testUncleBlock(t *testing.T, scheme string) { nil, trie.NewStackTrie(nil), ) - uncleBlock, err := wrapBlock(uncleEthBlock, tvm2.vm) + uncleBlock, _ := wrapBlock(uncleEthBlock, tvm2.vm) if err := uncleBlock.Verify(context.Background()); !errors.Is(err, errUnclesUnsupported) { t.Fatalf("VM2 should have failed with %q but got %q", errUnclesUnsupported, err.Error()) diff --git a/sync/client/leaf_syncer.go b/sync/client/leaf_syncer.go index 684225219a..6afb16f9ce 100644 --- a/sync/client/leaf_syncer.go +++ b/sync/client/leaf_syncer.go @@ -97,10 +97,11 @@ func (c *CallbackLeafSyncer) syncTask(ctx context.Context, task LeafSyncTask) er } leafsResponse, err := c.client.GetLeafs(ctx, message.LeafsRequest{ - Root: root, - Account: task.Account(), - Start: start, - Limit: c.requestSize, + Root: root, + Account: task.Account(), + Start: start, + Limit: c.requestSize, + NodeType: message.StateTrieNode, }) if err != nil { return fmt.Errorf("%s: %w", errFailedToFetchLeafs, err) diff --git a/sync/statesync/state_syncer.go b/sync/statesync/state_syncer.go index 03672e7f4b..35e03fb79c 100644 --- a/sync/statesync/state_syncer.go +++ b/sync/statesync/state_syncer.go @@ -34,9 +34,6 @@ type StateSyncerConfig struct { MaxOutstandingCodeHashes int // Maximum number of code hashes in the code syncer queue NumCodeFetchingWorkers int // Number of code syncing threads RequestSize uint16 // Number of leafs to request from a peer at a time - - // context cancellation management - cancelFunc context.CancelFunc } // stateSync keeps the state of the entire state sync operation. From 34e3c487b85d8ac31c7455e87ed2261a353817c4 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Fri, 8 Aug 2025 17:00:07 -0400 Subject: [PATCH 06/26] sync part 4 --- plugin/evm/config/config.go | 13 +++- plugin/evm/vm.go | 45 ++++++------ plugin/evm/vm_extensible.go | 90 +++++++++++++++++++++++ plugin/evm/{block.go => wrapped_block.go} | 85 ++++++++++++++------- 4 files changed, 184 insertions(+), 49 deletions(-) create mode 100644 plugin/evm/vm_extensible.go rename plugin/evm/{block.go => wrapped_block.go} (80%) diff --git a/plugin/evm/config/config.go b/plugin/evm/config/config.go index 89ad5aa5b6..eae84384fe 100644 --- a/plugin/evm/config/config.go +++ b/plugin/evm/config/config.go @@ -9,6 +9,7 @@ import ( "time" "github.com/ava-labs/avalanchego/database/pebbledb" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/common/hexutil" "github.com/spf13/cast" @@ -343,7 +344,17 @@ func (d Duration) MarshalJSON() ([]byte, error) { } // Validate returns an error if this is an invalid config. -func (c *Config) Validate() error { +func (c *Config) Validate(networkID uint32) error { + // Ensure that non-standard commit interval is not allowed for production networks + if constants.ProductionNetworkIDs.Contains(networkID) { + if c.CommitInterval != defaultCommitInterval { + return fmt.Errorf("cannot start non-local network with commit interval %d different than %d", c.CommitInterval, defaultCommitInterval) + } + if c.StateSyncCommitInterval != defaultSyncableCommitInterval { + return fmt.Errorf("cannot start non-local network with syncable interval %d different than %d", c.StateSyncCommitInterval, defaultSyncableCommitInterval) + } + } + if c.PopulateMissingTries != nil && (c.OfflinePruning || c.Pruning) { return fmt.Errorf("cannot enable populate missing tries while offline pruning (enabled: %t)/pruning (enabled: %t) are enabled", c.OfflinePruning, c.Pruning) } diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index d441be98bc..f62ef389cd 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -184,6 +184,7 @@ type VM struct { config config.Config + chainID *big.Int genesisHash common.Hash chainConfig *params.ChainConfig ethConfig ethconfig.Config @@ -225,7 +226,7 @@ type VM struct { builderLock sync.Mutex builder *blockBuilder - clock mockable.Clock + clock *mockable.Clock shutdownChan chan struct{} shutdownWg sync.WaitGroup @@ -276,18 +277,21 @@ func (vm *VM) Initialize( fxs []*commonEng.Fx, appSender commonEng.AppSender, ) error { - vm.stateSyncDone = make(chan struct{}) - vm.extensionConfig = &extension.Config{ - SyncSummaryProvider: &message.BlockSyncSummaryProvider{}, - SyncableParser: message.NewBlockSyncSummaryParser(), + if err := vm.extensionConfig.Validate(); err != nil { + return fmt.Errorf("failed to validate extension config: %w", err) } + + vm.clock = vm.extensionConfig.Clock + vm.config.SetDefaults(defaultTxPoolConfig) if len(configBytes) > 0 { if err := json.Unmarshal(configBytes, &vm.config); err != nil { return fmt.Errorf("failed to unmarshal config %s: %w", string(configBytes), err) } } - if err := vm.config.Validate(); err != nil { + vm.ctx = chainCtx + + if err := vm.config.Validate(vm.ctx.NetworkID); err != nil { return err } // We should deprecate config flags as the first thing, before we do anything else @@ -346,20 +350,15 @@ func (vm *VM) Initialize( return err } - // TODO: FIX THIS vm.syntacticBlockValidator = NewBlockValidator(vm) + // vm.ChainConfig() should be available for wrapping VMs before vm.initializeChain() + vm.chainConfig = g.Config + vm.chainID = g.Config.ChainID vm.ethConfig = ethconfig.NewDefaultConfig() vm.ethConfig.Genesis = g - // NetworkID here is different than Avalanche's NetworkID. - // Avalanche's NetworkID represents the Avalanche network is running on - // like Fuji, Mainnet, Local, etc. - // The NetworkId here is kept same as ChainID to be compatible with - // Ethereum tooling. - vm.ethConfig.NetworkId = g.Config.ChainID.Uint64() - // create genesisHash after applying upgradeBytes in case - // upgradeBytes modifies genesis. - vm.genesisHash = vm.ethConfig.Genesis.ToBlock().Hash() // must create genesis hash before [vm.readLastAccepted] - lastAcceptedHash, lastAcceptedHeight, err := vm.readLastAccepted() + vm.ethConfig.NetworkId = vm.chainID.Uint64() + vm.genesisHash = vm.ethConfig.Genesis.ToBlock().Hash() // must create genesis hash before [vm.ReadLastAccepted] + lastAcceptedHash, lastAcceptedHeight, err := vm.ReadLastAccepted() if err != nil { return err } @@ -374,7 +373,6 @@ func (vm *VM) Initialize( vm.ethConfig.RPCEVMTimeout = vm.config.APIMaxDuration.Duration vm.ethConfig.RPCTxFeeCap = vm.config.RPCTxFeeCap - vm.ethConfig.TxPool.Locals = vm.config.PriorityRegossipAddresses vm.ethConfig.TxPool.NoLocals = !vm.config.LocalTxsEnabled vm.ethConfig.TxPool.PriceLimit = vm.config.TxPoolPriceLimit vm.ethConfig.TxPool.PriceBump = vm.config.TxPoolPriceBump @@ -463,7 +461,7 @@ func (vm *VM) Initialize( } vm.p2pValidators = vm.Network.P2PValidators() - vm.validatorsManager, err = validators.NewManager(vm.ctx, vm.validatorsDB, &vm.clock) + vm.validatorsManager, err = validators.NewManager(vm.ctx, vm.validatorsDB, vm.clock) if err != nil { return fmt.Errorf("failed to initialize validators manager: %w", err) } @@ -499,7 +497,6 @@ func (vm *VM) Initialize( if err != nil { return err } - if err := vm.initializeChain(lastAcceptedHash, vm.ethConfig); err != nil { return err } @@ -510,6 +507,8 @@ func (vm *VM) Initialize( warpHandler := acp118.NewCachedHandler(meteredCache, vm.warpBackend, vm.ctx.WarpSigner) vm.Network.AddHandler(p2p.SignatureRequestHandlerID, warpHandler) + vm.stateSyncDone = make(chan struct{}) + return vm.initializeStateSync(lastAcceptedHeight) } @@ -619,8 +618,8 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash, ethConfig ethconfig. vm.chaindb, eth.Settings{MaxBlocksPerRequest: vm.config.MaxBlocksPerRequest}, lastAcceptedHash, - dummy.NewFakerWithClock(&vm.clock), - &vm.clock, + dummy.NewFakerWithClock(vm.clock), + vm.clock, ) if err != nil { return err @@ -1296,7 +1295,7 @@ func (vm *VM) startContinuousProfiler() { // last accepted block hash and height by reading directly from [vm.chaindb] instead of relying // on [chain]. // Note: assumes [vm.chaindb] and [vm.genesisHash] have been initialized. -func (vm *VM) readLastAccepted() (common.Hash, uint64, error) { +func (vm *VM) ReadLastAccepted() (common.Hash, uint64, error) { // Attempt to load last accepted block to determine if it is necessary to // initialize state with the genesis block. lastAcceptedBytes, lastAcceptedErr := vm.acceptedBlockDB.Get(lastAcceptedKey) diff --git a/plugin/evm/vm_extensible.go b/plugin/evm/vm_extensible.go new file mode 100644 index 0000000000..1f3f37368e --- /dev/null +++ b/plugin/evm/vm_extensible.go @@ -0,0 +1,90 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "context" + "errors" + + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/subnet-evm/core" + "github.com/ava-labs/subnet-evm/params" + "github.com/ava-labs/subnet-evm/plugin/evm/config" + "github.com/ava-labs/subnet-evm/plugin/evm/extension" + vmsync "github.com/ava-labs/subnet-evm/plugin/evm/sync" + "github.com/prometheus/client_golang/prometheus" +) + +var _ extension.InnerVM = (*VM)(nil) + +var ( + errVMAlreadyInitialized = errors.New("vm already initialized") + errExtensionConfigAlreadySet = errors.New("extension config already set") +) + +func (vm *VM) SetExtensionConfig(config *extension.Config) error { + if vm.ctx != nil { + return errVMAlreadyInitialized + } + if vm.extensionConfig != nil { + return errExtensionConfigAlreadySet + } + vm.extensionConfig = config + return nil +} + +// All these methods below assumes that VM is already initialized + +func (vm *VM) GetExtendedBlock(ctx context.Context, blkID ids.ID) (extension.ExtendedBlock, error) { + // Since each internal handler used by [vm.State] always returns a block + // with non-nil ethBlock value, GetBlockInternal should never return a + // (*Block) with a nil ethBlock value. + blk, err := vm.GetBlockInternal(ctx, blkID) + if err != nil { + return nil, err + } + + return blk.(*wrappedBlock), nil +} + +func (vm *VM) LastAcceptedExtendedBlock() extension.ExtendedBlock { + lastAcceptedBlock := vm.LastAcceptedBlockInternal() + if lastAcceptedBlock == nil { + return nil + } + return lastAcceptedBlock.(*wrappedBlock) +} + +// ChainConfig returns the chain config for the VM +// Even though this is available through Blockchain().Config(), +// ChainConfig() here will be available before the blockchain is initialized. +func (vm *VM) ChainConfig() *params.ChainConfig { + return vm.chainConfig +} + +func (vm *VM) Blockchain() *core.BlockChain { + return vm.blockChain +} + +func (vm *VM) Config() config.Config { + return vm.config +} + +func (vm *VM) MetricRegistry() *prometheus.Registry { + return vm.sdkMetrics +} + +func (vm *VM) Validators() *p2p.Validators { + return vm.P2PValidators() +} + +func (vm *VM) VersionDB() *versiondb.Database { + return vm.versiondb +} + +func (vm *VM) SyncerClient() vmsync.Client { + return vm.Client +} diff --git a/plugin/evm/block.go b/plugin/evm/wrapped_block.go similarity index 80% rename from plugin/evm/block.go rename to plugin/evm/wrapped_block.go index 678ba8e219..2b393d00f3 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/wrapped_block.go @@ -37,7 +37,7 @@ var ( _ extension.ExtendedBlock = (*wrappedBlock)(nil) ) -// wrappedBlock implements the snowman.wrappedBlock interface +// wrappedBlock implements the snowman.Block interface by wrapping a libevm block type wrappedBlock struct { id ids.ID ethBlock *types.Block @@ -80,9 +80,7 @@ func (b *wrappedBlock) Accept(context.Context) error { "height", b.Height(), ) - // Call Accept for relevant precompile logs. Note we do this prior to - // calling Accept on the blockChain so any side effects (eg warp signatures) - // take place before the accepted log is emitted to subscribers. + // Call Accept for relevant precompile logs before accepting on the chain. rules := b.vm.rules(b.ethBlock.Number(), b.ethBlock.Time()) if err := b.handlePrecompileAccept(rules); err != nil { return err @@ -95,6 +93,7 @@ func (b *wrappedBlock) Accept(context.Context) error { return fmt.Errorf("failed to put %s as the last accepted block: %w", blkID, err) } + // No block extension batching path in subnet-evm; commit versioned DB directly return b.vm.versiondb.Commit() } @@ -149,14 +148,10 @@ func (b *wrappedBlock) Parent() ids.ID { } // Height implements the snowman.Block interface -func (b *wrappedBlock) Height() uint64 { - return b.ethBlock.NumberU64() -} +func (b *wrappedBlock) Height() uint64 { return b.ethBlock.NumberU64() } // Timestamp implements the snowman.Block interface -func (b *wrappedBlock) Timestamp() time.Time { - return time.Unix(int64(b.ethBlock.Time()), 0) -} +func (b *wrappedBlock) Timestamp() time.Time { return time.Unix(int64(b.ethBlock.Time()), 0) } // Verify implements the snowman.Block interface func (b *wrappedBlock) Verify(context.Context) error { @@ -175,7 +170,7 @@ func (b *wrappedBlock) ShouldVerifyWithContext(context.Context) (bool, error) { return false, nil } - // Check if any of the transactions in the block specify a precompile that enforces a predicate, which requires + // Check if any transaction specifies a precompile that enforces a predicate, requiring // the ProposerVMBlockCtx. for _, tx := range b.ethBlock.Transactions() { for _, accessTuple := range tx.AccessList() { @@ -191,7 +186,7 @@ func (b *wrappedBlock) ShouldVerifyWithContext(context.Context) (bool, error) { } // VerifyWithContext implements the block.WithVerifyContext interface -func (b *wrappedBlock) VerifyWithContext(ctx context.Context, proposerVMBlockCtx *block.Context) error { +func (b *wrappedBlock) VerifyWithContext(_ context.Context, proposerVMBlockCtx *block.Context) error { return b.verify(&precompileconfig.PredicateContext{ SnowCtx: b.vm.ctx, ProposerVMBlockCtx: proposerVMBlockCtx, @@ -211,10 +206,11 @@ func (b *wrappedBlock) verify(predicateContext *precompileconfig.PredicateContex return fmt.Errorf("syntactic block verification failed: %w", err) } + if err := b.semanticVerify(); err != nil { + return fmt.Errorf("failed to verify block: %w", err) + } + // Only enforce predicates if the chain has already bootstrapped. - // If the chain is still bootstrapping, we can assume that all blocks we are verifying have - // been accepted by the network (so the predicate was validated by the network when the - // block was originally verified). if b.vm.bootstrapped.Get() { if err := b.verifyPredicates(predicateContext); err != nil { return fmt.Errorf("failed to verify predicates: %w", err) @@ -222,10 +218,6 @@ func (b *wrappedBlock) verify(predicateContext *precompileconfig.PredicateContex } // The engine may call VerifyWithContext multiple times on the same block with different contexts. - // Since the engine will only call Accept/Reject once, we should only call InsertBlockManual once. - // Additionally, if a block is already in processing, then it has already passed verification and - // at this point we have checked the predicates are still valid in the different context so we - // can return nil. if b.vm.State.IsProcessing(b.id) { return nil } @@ -233,6 +225,22 @@ func (b *wrappedBlock) verify(predicateContext *precompileconfig.PredicateContex return b.vm.blockChain.InsertBlockManual(b.ethBlock, writes) } +// semanticVerify verifies that a *Block is internally consistent. +func (b *wrappedBlock) semanticVerify() error { + // Make sure the block isn't too far in the future + blockTimestamp := b.ethBlock.Time() + if maxBlockTime := uint64(b.vm.clock.Time().Add(maxFutureBlockTime).Unix()); blockTimestamp > maxBlockTime { + return fmt.Errorf("block timestamp is too far in the future: %d > allowed %d", blockTimestamp, maxBlockTime) + } + + if b.extension != nil { + if err := b.extension.SemanticVerify(); err != nil { + return err + } + } + return nil +} + // syntacticVerify verifies that a *Block is well-formed. func (b *wrappedBlock) syntacticVerify() error { if b == nil || b.ethBlock == nil { @@ -291,6 +299,37 @@ func (b *wrappedBlock) syntacticVerify() error { return errUnclesUnsupported } + // Verify the existence / non-existence of excessBlobGas + cancun := rules.IsCancun + if !cancun && ethHeader.ExcessBlobGas != nil { + return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", *ethHeader.ExcessBlobGas) + } + if !cancun && ethHeader.BlobGasUsed != nil { + return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", *ethHeader.BlobGasUsed) + } + if cancun && ethHeader.ExcessBlobGas == nil { + return errors.New("header is missing excessBlobGas") + } + if cancun && ethHeader.BlobGasUsed == nil { + return errors.New("header is missing blobGasUsed") + } + if !cancun && ethHeader.ParentBeaconRoot != nil { + return fmt.Errorf("invalid parentBeaconRoot: have %x, expected nil", *ethHeader.ParentBeaconRoot) + } + if cancun { + switch { + case ethHeader.ParentBeaconRoot == nil: + return errors.New("header is missing parentBeaconRoot") + case *ethHeader.ParentBeaconRoot != (common.Hash{}): + return fmt.Errorf("invalid parentBeaconRoot: have %x, expected empty hash", ethHeader.ParentBeaconRoot) + } + if ethHeader.BlobGasUsed == nil { + return fmt.Errorf("blob gas used must not be nil in Cancun") + } else if *ethHeader.BlobGasUsed > 0 { + return fmt.Errorf("blobs not enabled on avalanche networks: used %d blob gas, expected 0", *ethHeader.BlobGasUsed) + } + } + if b.extension != nil { if err := b.extension.SyntacticVerify(*rulesExtra); err != nil { return err @@ -343,10 +382,6 @@ func (b *wrappedBlock) Bytes() []byte { func (b *wrappedBlock) String() string { return fmt.Sprintf("EVM block, ID = %s", b.ID()) } -func (b *wrappedBlock) GetEthBlock() *types.Block { - return b.ethBlock -} +func (b *wrappedBlock) GetEthBlock() *types.Block { return b.ethBlock } -func (b *wrappedBlock) GetBlockExtension() extension.BlockExtension { - return b.extension -} +func (b *wrappedBlock) GetBlockExtension() extension.BlockExtension { return b.extension } From ae531aad3388500d6cc4c8a0bef4845370f5a232 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Fri, 8 Aug 2025 17:04:34 -0400 Subject: [PATCH 07/26] oops broke the clock --- plugin/evm/vm.go | 16 ++++++++++++++-- plugin/evm/vm_test.go | 6 ++++++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index f62ef389cd..9bcf09d3f2 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -277,12 +277,24 @@ func (vm *VM) Initialize( fxs []*commonEng.Fx, appSender commonEng.AppSender, ) error { + // Initialize extension config if not already set + if vm.extensionConfig == nil { + // Initialize clock if not already set + if vm.clock == nil { + vm.clock = &mockable.Clock{} + } + vm.extensionConfig = &extension.Config{ + SyncSummaryProvider: &message.BlockSyncSummaryProvider{}, + SyncableParser: message.NewBlockSyncSummaryParser(), + } + // Provide a clock to the extension config before validation + vm.extensionConfig.Clock = vm.clock + } + if err := vm.extensionConfig.Validate(); err != nil { return fmt.Errorf("failed to validate extension config: %w", err) } - vm.clock = vm.extensionConfig.Clock - vm.config.SetDefaults(defaultTxPoolConfig) if len(configBytes) > 0 { if err := json.Unmarshal(configBytes, &vm.config); err != nil { diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index e3c88d07de..faa73fac60 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -1701,6 +1701,9 @@ func testEmptyBlock(t *testing.T, scheme string) { ) emptyBlock, err := wrapBlock(emptyEthBlock, tvm.vm) + if err != nil { + t.Fatal(err) + } if _, err := tvm.vm.ParseBlock(context.Background(), emptyBlock.Bytes()); !errors.Is(err, errEmptyBlock) { t.Fatalf("VM should have failed with errEmptyBlock but got %s", err.Error()) @@ -1995,6 +1998,9 @@ func testFutureBlock(t *testing.T, scheme string) { ) futureBlock, err := wrapBlock(modifiedBlock, tvm.vm) + if err != nil { + t.Fatal(err) + } if err := futureBlock.Verify(context.Background()); err == nil { t.Fatal("Future block should have failed verification due to block timestamp too far in the future") From 892379e121eeaa4c6fc4b073d2f269614c7523fe Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Thu, 21 Aug 2025 14:57:27 -0400 Subject: [PATCH 08/26] remove duplicate import --- plugin/evm/sync/client.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/plugin/evm/sync/client.go b/plugin/evm/sync/client.go index b56460d09c..d0883b8adf 100644 --- a/plugin/evm/sync/client.go +++ b/plugin/evm/sync/client.go @@ -8,8 +8,6 @@ import ( "fmt" "sync" - syncclient "github.com/ava-labs/subnet-evm/sync/client" - "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" From c7c0c9aa933742ac3cfa329821acb4aad4106960 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Thu, 21 Aug 2025 15:51:54 -0400 Subject: [PATCH 09/26] lint --- plugin/evm/extension/config.go | 9 ++++----- plugin/evm/message/block_sync_summary.go | 1 - plugin/evm/message/block_sync_summary_provider.go | 1 - plugin/evm/sync/server.go | 6 +++--- plugin/evm/vm.go | 2 +- plugin/evm/vm_extensible.go | 4 +++- plugin/evm/vm_warp_test.go | 2 +- plugin/evm/wrapped_block.go | 2 +- 8 files changed, 13 insertions(+), 14 deletions(-) diff --git a/plugin/evm/extension/config.go b/plugin/evm/extension/config.go index bf1bf3660a..18ff35cdbe 100644 --- a/plugin/evm/extension/config.go +++ b/plugin/evm/extension/config.go @@ -12,10 +12,10 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow/consensus/snowman" - avalanchecommon "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/types" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/subnet-evm/consensus/dummy" @@ -27,8 +27,7 @@ import ( "github.com/ava-labs/subnet-evm/plugin/evm/sync" "github.com/ava-labs/subnet-evm/sync/handlers" - "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/core/types" + avalanchecommon "github.com/ava-labs/avalanchego/snow/engine/common" ) var ( diff --git a/plugin/evm/message/block_sync_summary.go b/plugin/evm/message/block_sync_summary.go index d3683ec40c..45f80052b1 100644 --- a/plugin/evm/message/block_sync_summary.go +++ b/plugin/evm/message/block_sync_summary.go @@ -9,7 +9,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/crypto" ) diff --git a/plugin/evm/message/block_sync_summary_provider.go b/plugin/evm/message/block_sync_summary_provider.go index 959ae4d0b7..f751bf6fa7 100644 --- a/plugin/evm/message/block_sync_summary_provider.go +++ b/plugin/evm/message/block_sync_summary_provider.go @@ -4,7 +4,6 @@ package message import ( "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/libevm/core/types" ) diff --git a/plugin/evm/sync/server.go b/plugin/evm/sync/server.go index ea7dc8b54b..6422328193 100644 --- a/plugin/evm/sync/server.go +++ b/plugin/evm/sync/server.go @@ -5,18 +5,18 @@ package sync import ( "context" + "errors" "fmt" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/libevm/core/types" "github.com/ava-labs/libevm/log" "github.com/ava-labs/subnet-evm/core" - - "github.com/ava-labs/libevm/core/types" ) -var errProviderNotSet = fmt.Errorf("provider not set") +var errProviderNotSet = errors.New("provider not set") type SummaryProvider interface { StateSummaryAtBlock(ethBlock *types.Block) (block.StateSummary, error) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 00c5d99b01..b34b4fb0bc 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -69,7 +69,6 @@ import ( "github.com/ava-labs/subnet-evm/plugin/evm/customrawdb" "github.com/ava-labs/subnet-evm/plugin/evm/extension" "github.com/ava-labs/subnet-evm/plugin/evm/message" - vmsync "github.com/ava-labs/subnet-evm/plugin/evm/sync" "github.com/ava-labs/subnet-evm/plugin/evm/validators" "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" "github.com/ava-labs/subnet-evm/precompile/precompileconfig" @@ -85,6 +84,7 @@ import ( avalanchegoprometheus "github.com/ava-labs/avalanchego/vms/evm/metrics/prometheus" ethparams "github.com/ava-labs/libevm/params" subnetevmlog "github.com/ava-labs/subnet-evm/plugin/evm/log" + vmsync "github.com/ava-labs/subnet-evm/plugin/evm/sync" warpcontract "github.com/ava-labs/subnet-evm/precompile/contracts/warp" statesyncclient "github.com/ava-labs/subnet-evm/sync/client" handlerstats "github.com/ava-labs/subnet-evm/sync/handlers/stats" diff --git a/plugin/evm/vm_extensible.go b/plugin/evm/vm_extensible.go index 1f3f37368e..2fae0e156e 100644 --- a/plugin/evm/vm_extensible.go +++ b/plugin/evm/vm_extensible.go @@ -10,12 +10,14 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" + "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/subnet-evm/core" "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/plugin/evm/config" "github.com/ava-labs/subnet-evm/plugin/evm/extension" + vmsync "github.com/ava-labs/subnet-evm/plugin/evm/sync" - "github.com/prometheus/client_golang/prometheus" ) var _ extension.InnerVM = (*VM)(nil) diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index 9c04fc84a0..254bf739f2 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -40,6 +40,7 @@ import ( "github.com/ava-labs/subnet-evm/eth/tracers" "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/params/extras" + "github.com/ava-labs/subnet-evm/plugin/evm/extension" "github.com/ava-labs/subnet-evm/precompile/contract" "github.com/ava-labs/subnet-evm/predicate" "github.com/ava-labs/subnet-evm/utils" @@ -48,7 +49,6 @@ import ( commonEng "github.com/ava-labs/avalanchego/snow/engine/common" avagoUtils "github.com/ava-labs/avalanchego/utils" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" - "github.com/ava-labs/subnet-evm/plugin/evm/extension" customheader "github.com/ava-labs/subnet-evm/plugin/evm/header" warpcontract "github.com/ava-labs/subnet-evm/precompile/contracts/warp" ) diff --git a/plugin/evm/wrapped_block.go b/plugin/evm/wrapped_block.go index dc4fdb16d0..b1301ecae3 100644 --- a/plugin/evm/wrapped_block.go +++ b/plugin/evm/wrapped_block.go @@ -322,7 +322,7 @@ func (b *wrappedBlock) syntacticVerify() error { return fmt.Errorf("invalid parentBeaconRoot: have %x, expected empty hash", ethHeader.ParentBeaconRoot) } if ethHeader.BlobGasUsed == nil { - return fmt.Errorf("blob gas used must not be nil in Cancun") + return errors.New("blob gas used must not be nil in Cancun") } else if *ethHeader.BlobGasUsed > 0 { return fmt.Errorf("blobs not enabled on avalanche networks: used %d blob gas, expected 0", *ethHeader.BlobGasUsed) } From 434fb4f3fb7cebe0d543361fd7b7967723289d16 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 25 Aug 2025 17:18:36 -0400 Subject: [PATCH 10/26] lint --- plugin/evm/network_handler.go | 2 -- plugin/evm/vm.go | 1 - 2 files changed, 3 deletions(-) diff --git a/plugin/evm/network_handler.go b/plugin/evm/network_handler.go index 84e52ebd10..74952f8375 100644 --- a/plugin/evm/network_handler.go +++ b/plugin/evm/network_handler.go @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/libevm/triedb" "github.com/ava-labs/subnet-evm/plugin/evm/message" - "github.com/ava-labs/subnet-evm/warp" syncHandlers "github.com/ava-labs/subnet-evm/sync/handlers" syncStats "github.com/ava-labs/subnet-evm/sync/handlers/stats" @@ -41,7 +40,6 @@ type LeafRequestTypeConfig struct { func newNetworkHandler( provider syncHandlers.SyncDataProvider, diskDB ethdb.KeyValueReader, - warpBackend warp.Backend, networkCodec codec.Manager, leafRequestHandlers LeafHandlers, syncStats syncStats.HandlerStats, diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 67f8ce62ce..2431d842b2 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -677,7 +677,6 @@ func (vm *VM) initializeStateSync(lastAcceptedHeight uint64) error { networkHandler := newNetworkHandler( vm.blockChain, vm.chaindb, - vm.warpBackend, vm.networkCodec, leafHandlers, syncStats, From 0edb94f1ab584754c3cd3a42538df4537f34d7e7 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Wed, 27 Aug 2025 12:45:13 -0400 Subject: [PATCH 11/26] lint --- plugin/evm/vm_warp_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index 5a8980752f..ca56ae6d1c 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -40,8 +40,8 @@ import ( "github.com/ava-labs/subnet-evm/eth/tracers" "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/params/extras" - "github.com/ava-labs/subnet-evm/plugin/evm/extension" "github.com/ava-labs/subnet-evm/params/paramstest" + "github.com/ava-labs/subnet-evm/plugin/evm/extension" "github.com/ava-labs/subnet-evm/precompile/contract" "github.com/ava-labs/subnet-evm/predicate" "github.com/ava-labs/subnet-evm/utils" From 728620cb19e17bdaef4100382cfd606c002d8e70 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Thu, 4 Sep 2025 15:49:48 -0400 Subject: [PATCH 12/26] align with coreth --- plugin/evm/config/config.go | 13 +++++----- plugin/evm/vm.go | 48 ++++++++++++------------------------- 2 files changed, 22 insertions(+), 39 deletions(-) diff --git a/plugin/evm/config/config.go b/plugin/evm/config/config.go index ee4ad27648..c89e34d6de 100644 --- a/plugin/evm/config/config.go +++ b/plugin/evm/config/config.go @@ -231,15 +231,16 @@ func (d Duration) MarshalJSON() ([]byte, error) { return json.Marshal(d.Duration.String()) } -// Validate returns an error if this is an invalid config. -func (c *Config) Validate(networkID uint32) error { +// validate returns an error if this is an invalid config. +func (c *Config) validate(networkID uint32) error { // Ensure that non-standard commit interval is not allowed for production networks if constants.ProductionNetworkIDs.Contains(networkID) { - if c.CommitInterval != defaultCommitInterval { - return fmt.Errorf("cannot start non-local network with commit interval %d different than %d", c.CommitInterval, defaultCommitInterval) + defaultConfig := NewDefaultConfig() + if c.CommitInterval != defaultConfig.CommitInterval { + return fmt.Errorf("cannot start non-local network with commit interval %d different than %d", c.CommitInterval, defaultConfig.CommitInterval) } - if c.StateSyncCommitInterval != defaultSyncableCommitInterval { - return fmt.Errorf("cannot start non-local network with syncable interval %d different than %d", c.StateSyncCommitInterval, defaultSyncableCommitInterval) + if c.StateSyncCommitInterval != defaultConfig.StateSyncCommitInterval { + return fmt.Errorf("cannot start non-local network with syncable interval %d different than %d", c.StateSyncCommitInterval, defaultConfig.StateSyncCommitInterval) } } diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 929d208e2f..4aab49c15b 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -8,6 +8,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "math/big" "net/http" "os" @@ -144,6 +145,8 @@ var ( errShuttingDownVM = errors.New("shutting down VM") ) +var originalStderr *os.File + // legacyApiNames maps pre geth v1.10.20 api names to their updated counterparts. // used in attachEthService for backward configuration compatibility. var legacyApiNames = map[string]string{ @@ -234,6 +237,7 @@ type VM struct { sdkMetrics *prometheus.Registry bootstrapped avalancheUtils.Atomic[bool] + IsPlugin bool stateSyncDone chan struct{} @@ -269,41 +273,14 @@ func (vm *VM) Initialize( fxs []*commonEng.Fx, appSender commonEng.AppSender, ) error { - // Initialize extension config if not already set - if vm.extensionConfig == nil { - // Initialize clock if not already set - if vm.clock == nil { - vm.clock = &mockable.Clock{} - } - vm.extensionConfig = &extension.Config{ - SyncSummaryProvider: &message.BlockSyncSummaryProvider{}, - SyncableParser: message.NewBlockSyncSummaryParser(), - } - // Provide a clock to the extension config before validation - vm.extensionConfig.Clock = vm.clock - } - - if err := vm.extensionConfig.Validate(); err != nil { - return fmt.Errorf("failed to validate extension config: %w", err) - } - - vm.config.SetDefaults(defaultTxPoolConfig) - if len(configBytes) > 0 { - if err := json.Unmarshal(configBytes, &vm.config); err != nil { - return fmt.Errorf("failed to unmarshal config %s: %w", string(configBytes), err) - } - } vm.ctx = chainCtx + vm.clock = vm.extensionConfig.Clock - if err := vm.config.Validate(vm.ctx.NetworkID); err != nil { - return err + cfg, deprecateMsg, err := config.GetConfig(configBytes, vm.ctx.NetworkID) + if err != nil { + return fmt.Errorf("failed to get config: %w", err) } - // We should deprecate config flags as the first thing, before we do anything else - // because this can set old flags to new flags. log the message after we have - // initialized the logger. - deprecateMsg := vm.config.Deprecate() - - vm.ctx = chainCtx + vm.config = cfg // Create logger alias, err := vm.ctx.BCLookup.PrimaryAlias(vm.ctx.ChainID) @@ -313,7 +290,12 @@ func (vm *VM) Initialize( } vm.chainAlias = alias - subnetEVMLogger, err := subnetevmlog.InitLogger(vm.chainAlias, vm.config.LogLevel, vm.config.LogJSONFormat, vm.ctx.Log) + var writer io.Writer = vm.ctx.Log + if vm.IsPlugin { + writer = originalStderr + } + + subnetEVMLogger, err := subnetevmlog.InitLogger(vm.chainAlias, vm.config.LogLevel, vm.config.LogJSONFormat, writer) if err != nil { return fmt.Errorf("%w: %w ", errInitializingLogger, err) } From 52aba0cce6a7694bda6fe66641ce1af33192db02 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Thu, 4 Sep 2025 16:06:16 -0400 Subject: [PATCH 13/26] add back advance clock setting --- plugin/evm/vm.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 4aab49c15b..e62c3d25d0 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -274,7 +274,20 @@ func (vm *VM) Initialize( appSender commonEng.AppSender, ) error { vm.ctx = chainCtx - vm.clock = vm.extensionConfig.Clock + + // Initialize extension config if not already set + if vm.extensionConfig == nil { + // Initialize clock if not already set + if vm.clock == nil { + vm.clock = &mockable.Clock{} + } + vm.extensionConfig = &extension.Config{ + SyncSummaryProvider: &message.BlockSyncSummaryProvider{}, + SyncableParser: message.NewBlockSyncSummaryParser(), + } + // Provide a clock to the extension config before validation + vm.extensionConfig.Clock = vm.clock + } cfg, deprecateMsg, err := config.GetConfig(configBytes, vm.ctx.NetworkID) if err != nil { From eb25788481d9c92023d6190d87bd7841261778ec Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 8 Sep 2025 13:37:12 -0400 Subject: [PATCH 14/26] simplify registration --- plugin/evm/message/codec.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/plugin/evm/message/codec.go b/plugin/evm/message/codec.go index 9b6a93b45f..ba71af2f32 100644 --- a/plugin/evm/message/codec.go +++ b/plugin/evm/message/codec.go @@ -22,13 +22,11 @@ func init() { c := linearcodec.NewDefault() // Skip registration to keep registeredTypes unchanged after legacy gossip deprecation - c.SkipRegistrations(1) + // Gossip types and sync summary type removed from codec + c.SkipRegistrations(2) errs := wrappers.Errs{} - // Gossip types and sync summary type removed from codec - c.SkipRegistrations(3) errs.Add( - // state sync types c.RegisterType(BlockRequest{}), c.RegisterType(BlockResponse{}), From 07edff7978dcaa9884cfc2df7340c8b8bc291da8 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Tue, 9 Sep 2025 17:06:36 -0400 Subject: [PATCH 15/26] merge --- plugin/evm/extension/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/extension/config.go b/plugin/evm/extension/config.go index 18ff35cdbe..83f07500ac 100644 --- a/plugin/evm/extension/config.go +++ b/plugin/evm/extension/config.go @@ -42,7 +42,7 @@ type ExtensibleVM interface { // Should be called before any other method and only once SetExtensionConfig(config *Config) error // NewClient returns a client to send messages with for the given protocol - NewClient(protocol uint64, options ...p2p.ClientOption) *p2p.Client + NewClient(protocol uint64) *p2p.Client // AddHandler registers a server handler for an application protocol AddHandler(protocol uint64, handler p2p.Handler) error // GetExtendedBlock returns the VMBlock for the given ID or an error if the block is not found From fa8a32d916d3508c2a0cf94da50fac586646efea Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Fri, 12 Sep 2025 16:10:22 -0400 Subject: [PATCH 16/26] lint --- plugin/evm/message/block_sync_summary_parser.go | 2 +- plugin/evm/message/block_sync_summary_provider.go | 2 +- plugin/evm/message/handler.go | 2 +- plugin/evm/vm.go | 4 ++-- sync/client/test_client.go | 2 +- sync/statesync/sync_test.go | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/plugin/evm/message/block_sync_summary_parser.go b/plugin/evm/message/block_sync_summary_parser.go index 97435edece..b93ff47c7a 100644 --- a/plugin/evm/message/block_sync_summary_parser.go +++ b/plugin/evm/message/block_sync_summary_parser.go @@ -15,7 +15,7 @@ func NewBlockSyncSummaryParser() *BlockSyncSummaryParser { return &BlockSyncSummaryParser{} } -func (b *BlockSyncSummaryParser) Parse(summaryBytes []byte, acceptImpl AcceptImplFn) (Syncable, error) { +func (_ *BlockSyncSummaryParser) Parse(summaryBytes []byte, acceptImpl AcceptImplFn) (Syncable, error) { summary := BlockSyncSummary{} if _, err := Codec.Unmarshal(summaryBytes, &summary); err != nil { return nil, fmt.Errorf("failed to parse syncable summary: %w", err) diff --git a/plugin/evm/message/block_sync_summary_provider.go b/plugin/evm/message/block_sync_summary_provider.go index f751bf6fa7..88940e81ae 100644 --- a/plugin/evm/message/block_sync_summary_provider.go +++ b/plugin/evm/message/block_sync_summary_provider.go @@ -10,6 +10,6 @@ import ( type BlockSyncSummaryProvider struct{} // StateSummaryAtBlock returns the block state summary at [block] if valid. -func (a *BlockSyncSummaryProvider) StateSummaryAtBlock(blk *types.Block) (block.StateSummary, error) { +func (_ *BlockSyncSummaryProvider) StateSummaryAtBlock(blk *types.Block) (block.StateSummary, error) { return NewBlockSyncSummary(blk.Hash(), blk.NumberU64(), blk.Root()) } diff --git a/plugin/evm/message/handler.go b/plugin/evm/message/handler.go index 2dcfbd87fd..e7af9feb53 100644 --- a/plugin/evm/message/handler.go +++ b/plugin/evm/message/handler.go @@ -32,7 +32,7 @@ type ResponseHandler interface { type NoopRequestHandler struct{} -func (NoopRequestHandler) HandleLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest LeafsRequest) ([]byte, error) { +func (NoopRequestHandler) HandleLeafsRequest(_ context.Context, _ ids.NodeID, _ uint32, _ LeafsRequest) ([]byte, error) { return nil, nil } diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 69d37d333d..e1ff38d41f 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -1373,6 +1373,6 @@ func (vm *VM) stateSyncEnabled(lastAcceptedHeight uint64) bool { return lastAcceptedHeight == 0 } -func (vm *VM) PutLastAcceptedID(ID ids.ID) error { - return vm.acceptedBlockDB.Put(lastAcceptedKey, ID[:]) +func (vm *VM) PutLastAcceptedID(id ids.ID) error { + return vm.acceptedBlockDB.Put(lastAcceptedKey, id[:]) } diff --git a/sync/client/test_client.go b/sync/client/test_client.go index a720edde45..b6f7970aae 100644 --- a/sync/client/test_client.go +++ b/sync/client/test_client.go @@ -144,7 +144,7 @@ func newTestBlockParser() *testBlockParser { return &testBlockParser{} } -func (t *testBlockParser) ParseEthBlock(b []byte) (*types.Block, error) { +func (_ *testBlockParser) ParseEthBlock(b []byte) (*types.Block, error) { block := new(types.Block) if err := rlp.DecodeBytes(b, block); err != nil { return nil, fmt.Errorf("%w: %w", errUnmarshalResponse, err) diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go index c821798836..0b076c9a11 100644 --- a/sync/statesync/sync_test.go +++ b/sync/statesync/sync_test.go @@ -583,7 +583,7 @@ func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database } func fillAccountsWithStorage(t *testing.T, serverDB ethdb.Database, serverTrieDB *triedb.Database, root common.Hash, numAccounts int) common.Hash { - newRoot, _ := statesynctest.FillAccounts(t, serverTrieDB, root, numAccounts, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { + newRoot, _ := statesynctest.FillAccounts(t, serverTrieDB, root, numAccounts, func(t *testing.T, _ int, account types.StateAccount) types.StateAccount { codeBytes := make([]byte, 256) _, err := rand.Read(codeBytes) if err != nil { From 8fa7586cd4514333d9b189aed06c80270ad6f221 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Thu, 25 Sep 2025 12:03:28 -0400 Subject: [PATCH 17/26] add errors back --- plugin/evm/vm.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 7a6f283522..217e4ebaba 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -141,6 +141,8 @@ var ( errInvalidBlock = errors.New("invalid block") errInvalidNonce = errors.New("invalid nonce") errUnclesUnsupported = errors.New("uncles unsupported") + errNilBaseFeeSubnetEVM = errors.New("nil base fee is invalid after subnetEVM") + errNilBlockGasCostSubnetEVM = errors.New("nil blockGasCost is invalid after subnetEVM") errInvalidHeaderPredicateResults = errors.New("invalid header predicate results") errInitializingLogger = errors.New("failed to initialize logger") errShuttingDownVM = errors.New("shutting down VM") @@ -1376,7 +1378,3 @@ func (vm *VM) stateSyncEnabled(lastAcceptedHeight uint64) bool { func (vm *VM) PutLastAcceptedID(id ids.ID) error { return vm.acceptedBlockDB.Put(lastAcceptedKey, id[:]) } - -func (vm *VM) PutLastAcceptedID(id ids.ID) error { - return vm.acceptedBlockDB.Put(lastAcceptedKey, id[:]) -} From 94c2b467e3ce230a0d6533733bdb5c4037c75a1b Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Thu, 25 Sep 2025 16:26:03 -0400 Subject: [PATCH 18/26] lint --- plugin/evm/wrapped_block.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/wrapped_block.go b/plugin/evm/wrapped_block.go index e817b40029..ce627d1d17 100644 --- a/plugin/evm/wrapped_block.go +++ b/plugin/evm/wrapped_block.go @@ -45,7 +45,7 @@ type wrappedBlock struct { } // wrapBlock returns a new Block wrapping the ethBlock type and implementing the snowman.Block interface -func wrapBlock(ethBlock *types.Block, vm *VM) (*wrappedBlock, error) { //nolint:unparam // this just makes the function compatible with the future syncs I'll do, it's temporary!! +func wrapBlock(ethBlock *types.Block, vm *VM) (*wrappedBlock, error) { b := &wrappedBlock{ id: ids.ID(ethBlock.Hash()), ethBlock: ethBlock, From ac0b665399c8b26a63a1e0f73394f52f0a138255 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Fri, 10 Oct 2025 11:04:36 -0400 Subject: [PATCH 19/26] finish merge --- plugin/evm/vm.go | 4 ++-- sync/statesync/sync_test.go | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 7bc7280886..dfbd1c2960 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -33,7 +33,6 @@ import ( "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/chain" - "github.com/ava-labs/coreth/plugin/evm/vmsync" "github.com/ava-labs/firewood-go-ethhash/ffi" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/core/rawdb" @@ -87,6 +86,7 @@ import ( avalanchegoprometheus "github.com/ava-labs/avalanchego/vms/evm/metrics/prometheus" ethparams "github.com/ava-labs/libevm/params" subnetevmlog "github.com/ava-labs/subnet-evm/plugin/evm/log" + vmsync "github.com/ava-labs/subnet-evm/plugin/evm/sync" statesyncclient "github.com/ava-labs/subnet-evm/sync/client" handlerstats "github.com/ava-labs/subnet-evm/sync/handlers/stats" avalancheRPC "github.com/gorilla/rpc/v2" @@ -622,7 +622,7 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash, ethConfig ethconfig. eth.Settings{MaxBlocksPerRequest: vm.config.MaxBlocksPerRequest}, lastAcceptedHash, dummy.NewFaker(), - &vm.clock, + vm.clock, ) if err != nil { return err diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go index 3ccd0c8b2d..d959353a39 100644 --- a/sync/statesync/sync_test.go +++ b/sync/statesync/sync_test.go @@ -54,7 +54,7 @@ func testSync(t *testing.T, test syncTest) { } r := rand.New(rand.NewSource(1)) clientDB, serverDB, serverTrieDB, root := test.prepareForTest(t, r) - leafsRequestHandler := handlers.NewLeafsRequestHandler(serverTrieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()) + leafsRequestHandler := handlers.NewLeafsRequestHandler(serverTrieDB, message.StateTrieKeyLength, nil, message.Codec, handlerstats.NewNoopHandlerStats()) codeRequestHandler := handlers.NewCodeRequestHandler(serverDB, message.Codec, handlerstats.NewNoopHandlerStats()) mockClient := statesyncclient.NewTestClient(message.Codec, leafsRequestHandler, codeRequestHandler, nil) // Set intercept functions for the mock client @@ -589,8 +589,8 @@ func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database assert.Equal(t, trieAccountLeaves, numSnapshotAccounts) } -func fillAccountsWithStorage(t *testing.T, serverDB ethdb.Database, serverTrieDB *triedb.Database, root common.Hash, numAccounts int) common.Hash { - newRoot, _ := statesynctest.FillAccounts(t, serverTrieDB, root, numAccounts, func(t *testing.T, _ int, account types.StateAccount) types.StateAccount { +func fillAccountsWithStorage(t *testing.T, r *rand.Rand, serverDB ethdb.Database, serverTrieDB *triedb.Database, root common.Hash, numAccounts int) common.Hash { + newRoot, _ := statesynctest.FillAccounts(t, r, serverTrieDB, root, numAccounts, func(t *testing.T, _ int, account types.StateAccount) types.StateAccount { codeBytes := make([]byte, 256) _, err := rand.Read(codeBytes) if err != nil { @@ -603,7 +603,7 @@ func fillAccountsWithStorage(t *testing.T, serverDB ethdb.Database, serverTrieDB // now create state trie numKeys := 16 - account.Root, _, _ = statesynctest.GenerateTrie(t, serverTrieDB, numKeys, common.HashLength) + account.Root, _, _ = statesynctest.GenerateTrie(t, r, serverTrieDB, numKeys, common.HashLength) return account }) return newRoot From 40abb3333e4714fc29bc85049c19a8f1c4cd643e Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Fri, 10 Oct 2025 11:19:41 -0400 Subject: [PATCH 20/26] reduce diff --- plugin/evm/sync/client.go | 1 - 1 file changed, 1 deletion(-) diff --git a/plugin/evm/sync/client.go b/plugin/evm/sync/client.go index d1391e0048..eae7fc2865 100644 --- a/plugin/evm/sync/client.go +++ b/plugin/evm/sync/client.go @@ -162,7 +162,6 @@ func (client *client) ClearOngoingSummary() error { return nil } -// ParseStateSummary parses [summaryBytes] to [commonEng.Summary] func (client *client) ParseStateSummary(_ context.Context, summaryBytes []byte) (block.StateSummary, error) { return client.Parser.Parse(summaryBytes, client.acceptSyncSummary) } From ba927d538cd910f9a88945a3873b145116900692 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Fri, 10 Oct 2025 11:30:29 -0400 Subject: [PATCH 21/26] lint --- sync/statesync/statesynctest/test_sync.go | 20 -------------------- sync/statesync/sync_test.go | 18 ++++++++---------- 2 files changed, 8 insertions(+), 30 deletions(-) diff --git a/sync/statesync/statesynctest/test_sync.go b/sync/statesync/statesynctest/test_sync.go index e380ca9d60..403670185d 100644 --- a/sync/statesync/statesynctest/test_sync.go +++ b/sync/statesync/statesynctest/test_sync.go @@ -89,26 +89,6 @@ func AssertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database assert.Equal(t, trieAccountLeaves, numSnapshotAccounts) } -func FillAccountsWithStorage(t *testing.T, r *rand.Rand, serverDB ethdb.Database, serverTrieDB *triedb.Database, root common.Hash, numAccounts int) common.Hash { - newRoot, _ := FillAccounts(t, r, serverTrieDB, root, numAccounts, func(t *testing.T, _ int, account types.StateAccount) types.StateAccount { - codeBytes := make([]byte, 256) - _, err := r.Read(codeBytes) - if err != nil { - t.Fatalf("error reading random code bytes: %v", err) - } - - codeHash := crypto.Keccak256Hash(codeBytes) - rawdb.WriteCode(serverDB, codeHash, codeBytes) - account.CodeHash = codeHash[:] - - // now create state trie - numKeys := 16 - account.Root, _, _ = GenerateTrie(t, r, serverTrieDB, numKeys, common.HashLength) - return account - }) - return newRoot -} - // FillAccountsWithOverlappingStorage adds [numAccounts] randomly generated accounts to the secure trie at [root] // and commits it to [trieDB]. For each 3 accounts created: // - One does not have a storage trie, diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go index d959353a39..bcaf953743 100644 --- a/sync/statesync/sync_test.go +++ b/sync/statesync/sync_test.go @@ -149,7 +149,7 @@ func TestSimpleSyncCases(t *testing.T) { prepareForTest: func(t *testing.T, r *rand.Rand) (ethdb.Database, ethdb.Database, *triedb.Database, common.Hash) { serverDB := rawdb.NewMemoryDatabase() serverTrieDB := triedb.NewDatabase(serverDB, nil) - root := statesynctest.FillAccountsWithStorage(t, r, serverDB, serverTrieDB, common.Hash{}, numAccounts) + root := fillAccountsWithStorage(t, r, serverDB, serverTrieDB, common.Hash{}, numAccounts) return rawdb.NewMemoryDatabase(), serverDB, serverTrieDB, root }, }, @@ -191,7 +191,7 @@ func TestSimpleSyncCases(t *testing.T) { prepareForTest: func(t *testing.T, r *rand.Rand) (ethdb.Database, ethdb.Database, *triedb.Database, common.Hash) { serverDB := rawdb.NewMemoryDatabase() serverTrieDB := triedb.NewDatabase(serverDB, nil) - root := statesynctest.FillAccountsWithStorage(t, r, serverDB, serverTrieDB, common.Hash{}, numAccountsSmall) + root := fillAccountsWithStorage(t, r, serverDB, serverTrieDB, common.Hash{}, numAccountsSmall) return rawdb.NewMemoryDatabase(), serverDB, serverTrieDB, root }, GetCodeIntercept: func(_ []common.Hash, _ [][]byte) ([][]byte, error) { @@ -213,7 +213,7 @@ func TestCancelSync(t *testing.T) { serverDB := rawdb.NewMemoryDatabase() serverTrieDB := triedb.NewDatabase(serverDB, nil) // Create trie with 2000 accounts (more than one leaf request) - root := statesynctest.FillAccountsWithStorage(t, r, serverDB, serverTrieDB, common.Hash{}, 2000) + root := fillAccountsWithStorage(t, r, serverDB, serverTrieDB, common.Hash{}, 2000) ctx, cancel := context.WithCancel(context.Background()) defer cancel() testSync(t, syncTest{ @@ -589,13 +589,11 @@ func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database assert.Equal(t, trieAccountLeaves, numSnapshotAccounts) } -func fillAccountsWithStorage(t *testing.T, r *rand.Rand, serverDB ethdb.Database, serverTrieDB *triedb.Database, root common.Hash, numAccounts int) common.Hash { - newRoot, _ := statesynctest.FillAccounts(t, r, serverTrieDB, root, numAccounts, func(t *testing.T, _ int, account types.StateAccount) types.StateAccount { +func fillAccountsWithStorage(t *testing.T, r *rand.Rand, serverDB ethdb.Database, serverTrieDB *triedb.Database, root common.Hash, numAccounts int) common.Hash { //nolint:unparam + newRoot, _ := statesynctest.FillAccounts(t, r, serverTrieDB, root, numAccounts, func(_ *testing.T, _ int, account types.StateAccount) types.StateAccount { codeBytes := make([]byte, 256) - _, err := rand.Read(codeBytes) - if err != nil { - t.Fatalf("error reading random code bytes: %v", err) - } + _, err := r.Read(codeBytes) + require.NoError(t, err, "error reading random code bytes") codeHash := crypto.Keccak256Hash(codeBytes) rawdb.WriteCode(serverDB, codeHash, codeBytes) @@ -614,7 +612,7 @@ func TestDifferentWaitContext(t *testing.T) { serverDB := rawdb.NewMemoryDatabase() serverTrieDB := triedb.NewDatabase(serverDB, nil) // Create trie with many accounts to ensure sync takes time - root := statesynctest.FillAccountsWithStorage(t, r, serverDB, serverTrieDB, common.Hash{}, 2000) + root := fillAccountsWithStorage(t, r, serverDB, serverTrieDB, common.Hash{}, 2000) clientDB := rawdb.NewMemoryDatabase() // Track requests to show sync continues after Wait returns From cced3cad38a025d73f1afb376469446e61fa2e62 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 13 Oct 2025 10:52:19 -0400 Subject: [PATCH 22/26] fix merge --- consensus/dummy/consensus.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go index 092dd8aebf..0d0675a4d5 100644 --- a/consensus/dummy/consensus.go +++ b/consensus/dummy/consensus.go @@ -59,12 +59,12 @@ type ( OnFinalizeAndAssemble OnFinalizeAndAssembleCallbackType OnExtraStateChange OnExtraStateChangeType } - - DummyEngine struct { - consensusMode Mode - desiredDelayExcess *acp226.DelayExcess - } -} + + DummyEngine struct { + consensusMode Mode + desiredDelayExcess *acp226.DelayExcess + } +) func NewDummyEngine( mode Mode, From e84bf4a29f992c2f50e80a24dfc34dbb12ac00fe Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Tue, 14 Oct 2025 12:42:39 -0400 Subject: [PATCH 23/26] Update plugin/evm/message/handler.go Co-authored-by: Austin Larson <78000745+alarso16@users.noreply.github.com> Signed-off-by: Jonathan Oppenheimer <147infiniti@gmail.com> --- plugin/evm/message/handler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/message/handler.go b/plugin/evm/message/handler.go index e7af9feb53..42a5319249 100644 --- a/plugin/evm/message/handler.go +++ b/plugin/evm/message/handler.go @@ -32,7 +32,7 @@ type ResponseHandler interface { type NoopRequestHandler struct{} -func (NoopRequestHandler) HandleLeafsRequest(_ context.Context, _ ids.NodeID, _ uint32, _ LeafsRequest) ([]byte, error) { +func (NoopRequestHandler) HandleLeafsRequest(context.Context, ids.NodeID, uint32, LeafsRequest) ([]byte, error) { return nil, nil } From 2356b604ce9b0da01960d0bcb68e3fcfae08a879 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Tue, 14 Oct 2025 12:44:43 -0400 Subject: [PATCH 24/26] Update sync/client/test_client.go Co-authored-by: Austin Larson <78000745+alarso16@users.noreply.github.com> Signed-off-by: Jonathan Oppenheimer <147infiniti@gmail.com> --- sync/client/test_client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/client/test_client.go b/sync/client/test_client.go index b6f7970aae..cc81e5bad4 100644 --- a/sync/client/test_client.go +++ b/sync/client/test_client.go @@ -144,7 +144,7 @@ func newTestBlockParser() *testBlockParser { return &testBlockParser{} } -func (_ *testBlockParser) ParseEthBlock(b []byte) (*types.Block, error) { +func (*testBlockParser) ParseEthBlock(b []byte) (*types.Block, error) { block := new(types.Block) if err := rlp.DecodeBytes(b, block); err != nil { return nil, fmt.Errorf("%w: %w", errUnmarshalResponse, err) From 509e349e142ec5615c54395cc75e77a74d0a3d70 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Tue, 14 Oct 2025 12:48:51 -0400 Subject: [PATCH 25/26] Update sync/client/stats/stats.go Co-authored-by: Austin Larson <78000745+alarso16@users.noreply.github.com> Signed-off-by: Jonathan Oppenheimer <147infiniti@gmail.com> --- sync/client/stats/stats.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/client/stats/stats.go b/sync/client/stats/stats.go index 00e2e140dc..97cbc9d4a7 100644 --- a/sync/client/stats/stats.go +++ b/sync/client/stats/stats.go @@ -77,7 +77,7 @@ func (m *messageMetric) UpdateRequestLatency(duration time.Duration) { type clientSyncerStats struct { leafMetrics map[message.NodeType]MessageMetric - codeRequestMetric, + codeRequestMetric MessageMetric blockRequestMetric MessageMetric } From 33837affc74b100b3eef0bf7cfde0c2d2545c2f8 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Tue, 14 Oct 2025 12:54:23 -0400 Subject: [PATCH 26/26] lint --- sync/client/stats/stats.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sync/client/stats/stats.go b/sync/client/stats/stats.go index 97cbc9d4a7..6d146f7f2c 100644 --- a/sync/client/stats/stats.go +++ b/sync/client/stats/stats.go @@ -76,8 +76,8 @@ func (m *messageMetric) UpdateRequestLatency(duration time.Duration) { } type clientSyncerStats struct { - leafMetrics map[message.NodeType]MessageMetric - codeRequestMetric MessageMetric + leafMetrics map[message.NodeType]MessageMetric + codeRequestMetric MessageMetric blockRequestMetric MessageMetric }