diff --git a/plugin/evm/atomic_backend.go b/plugin/evm/atomic/atomic_backend.go similarity index 97% rename from plugin/evm/atomic_backend.go rename to plugin/evm/atomic/atomic_backend.go index 2420021d6f..5156a8a6e8 100644 --- a/plugin/evm/atomic_backend.go +++ b/plugin/evm/atomic/atomic_backend.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "encoding/binary" @@ -16,7 +16,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/plugin/evm/atomic" syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -24,6 +23,11 @@ import ( var _ AtomicBackend = &atomicBackend{} +var ( + atomicTrieDBPrefix = []byte("atomicTrieDB") + atomicTrieMetaDBPrefix = []byte("atomicTrieMetaDB") +) + // AtomicBackend abstracts the verification and processing // of atomic transactions type AtomicBackend interface { @@ -34,7 +38,7 @@ type AtomicBackend interface { // and it's the caller's responsibility to call either Accept or Reject on // the AtomicState which can be retreived from GetVerifiedAtomicState to commit the // changes or abort them and free memory. - InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*atomic.Tx) (common.Hash, error) + InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*Tx) (common.Hash, error) // Returns an AtomicState corresponding to a block hash that has been inserted // but not Accepted or Rejected yet. @@ -152,7 +156,7 @@ func (a *atomicBackend) initialize(lastAcceptedHeight uint64) error { // iterate over the transactions, indexing them if the height is < commit height // otherwise, add the atomic operations from the transaction to the uncommittedOpsMap height = binary.BigEndian.Uint64(iter.Key()) - txs, err := atomic.ExtractAtomicTxs(iter.Value(), true, a.codec) + txs, err := ExtractAtomicTxs(iter.Value(), true, a.codec) if err != nil { return err } @@ -397,7 +401,7 @@ func (a *atomicBackend) SetLastAccepted(lastAcceptedHash common.Hash) { // and it's the caller's responsibility to call either Accept or Reject on // the AtomicState which can be retreived from GetVerifiedAtomicState to commit the // changes or abort them and free memory. -func (a *atomicBackend) InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*atomic.Tx) (common.Hash, error) { +func (a *atomicBackend) InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*Tx) (common.Hash, error) { // access the atomic trie at the parent block parentRoot, err := a.getAtomicRootAt(parentHash) if err != nil { @@ -460,11 +464,11 @@ func (a *atomicBackend) AtomicTrie() AtomicTrie { // mergeAtomicOps merges atomic requests represented by [txs] // to the [output] map, depending on whether [chainID] is present in the map. -func mergeAtomicOps(txs []*atomic.Tx) (map[ids.ID]*avalancheatomic.Requests, error) { +func mergeAtomicOps(txs []*Tx) (map[ids.ID]*avalancheatomic.Requests, error) { if len(txs) > 1 { // txs should be stored in order of txID to ensure consistency // with txs initialized from the txID index. - copyTxs := make([]*atomic.Tx, len(txs)) + copyTxs := make([]*Tx, len(txs)) copy(copyTxs, txs) utils.Sort(copyTxs) txs = copyTxs diff --git a/plugin/evm/atomic_state.go b/plugin/evm/atomic/atomic_state.go similarity index 97% rename from plugin/evm/atomic_state.go rename to plugin/evm/atomic/atomic_state.go index 911f1afb3a..5b64145d62 100644 --- a/plugin/evm/atomic_state.go +++ b/plugin/evm/atomic/atomic_state.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "fmt" @@ -9,7 +9,6 @@ import ( avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) @@ -37,7 +36,7 @@ type atomicState struct { backend *atomicBackend blockHash common.Hash blockHeight uint64 - txs []*atomic.Tx + txs []*Tx atomicOps map[ids.ID]*avalancheatomic.Requests atomicRoot common.Hash } diff --git a/plugin/evm/atomic_syncer.go b/plugin/evm/atomic/atomic_syncer.go similarity index 95% rename from plugin/evm/atomic_syncer.go rename to plugin/evm/atomic/atomic_syncer.go index d68d61d597..52a8376319 100644 --- a/plugin/evm/atomic_syncer.go +++ b/plugin/evm/atomic/atomic_syncer.go @@ -1,7 +1,7 @@ // (c) 2019-2022, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "bytes" @@ -24,6 +24,15 @@ var ( _ syncclient.LeafSyncTask = &atomicSyncerLeafTask{} ) +// Syncer represents a step in state sync, +// along with Start/Done methods to control +// and monitor progress. +// Error returns an error if any was encountered. +type Syncer interface { + Start(ctx context.Context) error + Done() <-chan error +} + // atomicSyncer is used to sync the atomic trie from the network. The CallbackLeafSyncer // is responsible for orchestrating the sync while atomicSyncer is responsible for maintaining // the state of progress and writing the actual atomic trie to the trieDB. diff --git a/plugin/evm/atomic_syncer_test.go b/plugin/evm/atomic/atomic_syncer_test.go similarity index 97% rename from plugin/evm/atomic_syncer_test.go rename to plugin/evm/atomic/atomic_syncer_test.go index 86589cc4d8..6ff0da417a 100644 --- a/plugin/evm/atomic_syncer_test.go +++ b/plugin/evm/atomic/atomic_syncer_test.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "bytes" @@ -25,6 +25,7 @@ import ( "github.com/ava-labs/coreth/sync/syncutils" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/triedb" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" ) @@ -56,7 +57,7 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *triedb.Database, targetHeight if err != nil { t.Fatal("could not initialize atomix tx repository", err) } - atomicBackend, err := NewAtomicBackend(clientDB, testSharedMemory(), nil, repo, 0, common.Hash{}, commitInterval) + atomicBackend, err := NewAtomicBackend(clientDB, utils.TestSnowContext().SharedMemory, nil, repo, 0, common.Hash{}, commitInterval) if err != nil { t.Fatal("could not initialize atomic backend", err) } diff --git a/plugin/evm/atomic_trie.go b/plugin/evm/atomic/atomic_trie.go similarity index 90% rename from plugin/evm/atomic_trie.go rename to plugin/evm/atomic/atomic_trie.go index d734268e23..bbb299a391 100644 --- a/plugin/evm/atomic_trie.go +++ b/plugin/evm/atomic/atomic_trie.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "fmt" @@ -9,7 +9,7 @@ import ( avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/database" + avalanchedatabase "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" @@ -17,7 +17,7 @@ import ( "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/database" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/trie/trienode" "github.com/ava-labs/coreth/triedb" @@ -117,12 +117,12 @@ type AtomicTrieIterator interface { // atomicTrie implements the AtomicTrie interface type atomicTrie struct { - commitInterval uint64 // commit interval, same as commitHeightInterval by default - metadataDB database.Database // Underlying database containing the atomic trie metadata - trieDB *triedb.Database // Trie database - lastCommittedRoot common.Hash // trie root of the most recent commit - lastCommittedHeight uint64 // index height of the most recent commit - lastAcceptedRoot common.Hash // most recent trie root passed to accept trie or the root of the atomic trie on intialization. + commitInterval uint64 // commit interval, same as commitHeightInterval by default + metadataDB avalanchedatabase.Database // Underlying database containing the atomic trie metadata + trieDB *triedb.Database // Trie database + lastCommittedRoot common.Hash // trie root of the most recent commit + lastCommittedHeight uint64 // index height of the most recent commit + lastAcceptedRoot common.Hash // most recent trie root passed to accept trie or the root of the atomic trie on intialization. codec codec.Manager memoryCap common.StorageSize tipBuffer *core.BoundedBuffer[common.Hash] @@ -131,7 +131,7 @@ type atomicTrie struct { // newAtomicTrie returns a new instance of a atomicTrie with a configurable commitHeightInterval, used in testing. // Initializes the trie before returning it. func newAtomicTrie( - atomicTrieDB database.Database, metadataDB database.Database, + atomicTrieDB avalanchedatabase.Database, metadataDB avalanchedatabase.Database, codec codec.Manager, lastAcceptedHeight uint64, commitHeightInterval uint64, ) (*atomicTrie, error) { root, height, err := lastCommittedRootIfExists(metadataDB) @@ -153,7 +153,7 @@ func newAtomicTrie( } trieDB := triedb.NewDatabase( - rawdb.NewDatabase(Database{atomicTrieDB}), + rawdb.NewDatabase(database.WrapDatabase(atomicTrieDB)), &triedb.Config{ HashDB: &hashdb.Config{ CleanCacheSize: 64 * units.MiB, // Allocate 64MB of memory for clean cache @@ -182,17 +182,17 @@ func newAtomicTrie( // else returns empty common.Hash{} and 0 // returns error only if there are issues with the underlying data store // or if values present in the database are not as expected -func lastCommittedRootIfExists(db database.Database) (common.Hash, uint64, error) { +func lastCommittedRootIfExists(db avalanchedatabase.Database) (common.Hash, uint64, error) { // read the last committed entry if it exists and set the root hash lastCommittedHeightBytes, err := db.Get(lastCommittedKey) switch { - case err == database.ErrNotFound: + case err == avalanchedatabase.ErrNotFound: return common.Hash{}, 0, nil case err != nil: return common.Hash{}, 0, err } - height, err := database.ParseUInt64(lastCommittedHeightBytes) + height, err := avalanchedatabase.ParseUInt64(lastCommittedHeightBytes) if err != nil { return common.Hash{}, 0, fmt.Errorf("expected value at lastCommittedKey to be a valid uint64: %w", err) } @@ -224,7 +224,7 @@ func (a *atomicTrie) commit(height uint64, root common.Hash) error { func (a *atomicTrie) UpdateTrie(trie *trie.Trie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error { for blockchainID, requests := range atomicOps { - valueBytes, err := a.codec.Marshal(atomic.CodecVersion, requests) + valueBytes, err := a.codec.Marshal(CodecVersion, requests) if err != nil { // highly unlikely but possible if atomic.Element // has a change that is unsupported by the codec @@ -251,7 +251,7 @@ func (a *atomicTrie) LastCommitted() (common.Hash, uint64) { // updateLastCommitted adds [height] -> [root] to the index and marks it as the last committed // root/height pair. func (a *atomicTrie) updateLastCommitted(root common.Hash, height uint64) error { - heightBytes := database.PackUInt64(height) + heightBytes := avalanchedatabase.PackUInt64(height) // now save the trie hash against the height it was committed at if err := a.metadataDB.Put(heightBytes, root[:]); err != nil { @@ -297,7 +297,7 @@ func (a *atomicTrie) Root(height uint64) (common.Hash, error) { // getRoot is a helper function to return the committed atomic trie root hash at [height] // from [metadataDB]. -func getRoot(metadataDB database.Database, height uint64) (common.Hash, error) { +func getRoot(metadataDB avalanchedatabase.Database, height uint64) (common.Hash, error) { if height == 0 { // if root is queried at height == 0, return the empty root hash // this may occur if peers ask for the most recent state summary @@ -305,10 +305,10 @@ func getRoot(metadataDB database.Database, height uint64) (common.Hash, error) { return types.EmptyRootHash, nil } - heightBytes := database.PackUInt64(height) + heightBytes := avalanchedatabase.PackUInt64(height) hash, err := metadataDB.Get(heightBytes) switch { - case err == database.ErrNotFound: + case err == avalanchedatabase.ErrNotFound: return common.Hash{}, nil case err != nil: return common.Hash{}, err diff --git a/plugin/evm/atomic_trie_iterator.go b/plugin/evm/atomic/atomic_trie_iterator.go similarity index 99% rename from plugin/evm/atomic_trie_iterator.go rename to plugin/evm/atomic/atomic_trie_iterator.go index 2bdf90b581..20be76416e 100644 --- a/plugin/evm/atomic_trie_iterator.go +++ b/plugin/evm/atomic/atomic_trie_iterator.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "encoding/binary" diff --git a/plugin/evm/atomic_trie_iterator_test.go b/plugin/evm/atomic/atomic_trie_iterator_test.go similarity index 74% rename from plugin/evm/atomic_trie_iterator_test.go rename to plugin/evm/atomic/atomic_trie_iterator_test.go index 21ec9913f9..d2810f14eb 100644 --- a/plugin/evm/atomic_trie_iterator_test.go +++ b/plugin/evm/atomic/atomic_trie_iterator_test.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "testing" @@ -10,23 +10,17 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/coreth/plugin/evm/atomic" + avalancheutils "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func testSharedMemory() avalancheatomic.SharedMemory { - m := avalancheatomic.NewMemory(memdb.New()) - return m.NewSharedMemory(testCChainID) -} - func TestIteratorCanIterate(t *testing.T) { lastAcceptedHeight := uint64(1000) db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec - repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, testTxCodec, lastAcceptedHeight) assert.NoError(t, err) // create state with multiple transactions @@ -35,9 +29,11 @@ func TestIteratorCanIterate(t *testing.T) { operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, lastAcceptedHeight, constTxsPerHeight(3), nil, operationsMap) + snowCtx := utils.TestSnowContext() + // create an atomic trie // on create it will initialize all the transactions from the above atomic repository - atomicBackend, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 100) + atomicBackend, err := NewAtomicBackend(db, snowCtx.SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 100) assert.NoError(t, err) atomicTrie1 := atomicBackend.AtomicTrie() @@ -46,11 +42,11 @@ func TestIteratorCanIterate(t *testing.T) { assert.NotEqual(t, common.Hash{}, lastCommittedHash1) assert.EqualValues(t, 1000, lastCommittedHeight1) - verifyOperations(t, atomicTrie1, codec, lastCommittedHash1, 1, 1000, operationsMap) + verifyOperations(t, atomicTrie1, testTxCodec, lastCommittedHash1, 1, 1000, operationsMap) // iterate on a new atomic trie to make sure there is no resident state affecting the data and the // iterator - atomicBackend2, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 100) + atomicBackend2, err := NewAtomicBackend(db, snowCtx.SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 100) assert.NoError(t, err) atomicTrie2 := atomicBackend2.AtomicTrie() lastCommittedHash2, lastCommittedHeight2 := atomicTrie2.LastCommitted() @@ -58,15 +54,14 @@ func TestIteratorCanIterate(t *testing.T) { assert.NotEqual(t, common.Hash{}, lastCommittedHash2) assert.EqualValues(t, 1000, lastCommittedHeight2) - verifyOperations(t, atomicTrie2, codec, lastCommittedHash1, 1, 1000, operationsMap) + verifyOperations(t, atomicTrie2, testTxCodec, lastCommittedHash1, 1, 1000, operationsMap) } func TestIteratorHandlesInvalidData(t *testing.T) { require := require.New(t) lastAcceptedHeight := uint64(1000) db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec - repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, testTxCodec, lastAcceptedHeight) require.NoError(err) // create state with multiple transactions @@ -77,8 +72,10 @@ func TestIteratorHandlesInvalidData(t *testing.T) { // create an atomic trie // on create it will initialize all the transactions from the above atomic repository + snowCtx := utils.TestSnowContext() + commitInterval := uint64(100) - atomicBackend, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, commitInterval) + atomicBackend, err := NewAtomicBackend(db, snowCtx.SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, commitInterval) require.NoError(err) atomicTrie := atomicBackend.AtomicTrie() @@ -87,13 +84,13 @@ func TestIteratorHandlesInvalidData(t *testing.T) { require.NotEqual(common.Hash{}, lastCommittedHash) require.EqualValues(1000, lastCommittedHeight) - verifyOperations(t, atomicTrie, codec, lastCommittedHash, 1, 1000, operationsMap) + verifyOperations(t, atomicTrie, testTxCodec, lastCommittedHash, 1, 1000, operationsMap) // Add a random key-value pair to the atomic trie in order to test that the iterator correctly // handles an error when it runs into an unexpected key-value pair in the trie. atomicTrieSnapshot, err := atomicTrie.OpenTrie(lastCommittedHash) require.NoError(err) - require.NoError(atomicTrieSnapshot.Update(utils.RandomBytes(50), utils.RandomBytes(50))) + require.NoError(atomicTrieSnapshot.Update(avalancheutils.RandomBytes(50), avalancheutils.RandomBytes(50))) nextRoot, nodes, err := atomicTrieSnapshot.Commit(false) require.NoError(err) diff --git a/plugin/evm/atomic_trie_test.go b/plugin/evm/atomic/atomic_trie_test.go similarity index 76% rename from plugin/evm/atomic_trie_test.go rename to plugin/evm/atomic/atomic_trie_test.go index 2a82964e94..9e29c6aa83 100644 --- a/plugin/evm/atomic_trie_test.go +++ b/plugin/evm/atomic/atomic_trie_test.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "encoding/binary" @@ -19,21 +19,13 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" ) const testCommitInterval = 100 -func mustAtomicOps(tx *atomic.Tx) map[ids.ID]*avalancheatomic.Requests { - id, reqs, err := tx.AtomicOps() - if err != nil { - panic(err) - } - return map[ids.ID]*avalancheatomic.Requests{id: reqs} -} - // indexAtomicTxs updates [tr] with entries in [atomicOps] at height by creating // a new snapshot, calculating a new root, and calling InsertTrie followed // by AcceptTrie on the new root. @@ -139,8 +131,7 @@ func TestAtomicTrieInitialize(t *testing.T) { } { t.Run(name, func(t *testing.T) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec - repo, err := NewAtomicTxRepository(db, codec, test.lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, testTxCodec, test.lastAcceptedHeight) if err != nil { t.Fatal(err) } @@ -148,7 +139,7 @@ func TestAtomicTrieInitialize(t *testing.T) { writeTxs(t, repo, 1, test.lastAcceptedHeight+1, test.numTxsPerBlock, nil, operationsMap) // Construct the atomic trie for the first time - atomicBackend1, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) + atomicBackend1, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) if err != nil { t.Fatal(err) } @@ -161,10 +152,10 @@ func TestAtomicTrieInitialize(t *testing.T) { } // Verify the operations up to the expected commit height - verifyOperations(t, atomicTrie1, codec, rootHash1, 1, test.expectedCommitHeight, operationsMap) + verifyOperations(t, atomicTrie1, testTxCodec, rootHash1, 1, test.expectedCommitHeight, operationsMap) // Construct the atomic trie again (on the same database) and ensure the last accepted root is correct. - atomicBackend2, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) + atomicBackend2, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) if err != nil { t.Fatal(err) } @@ -173,7 +164,7 @@ func TestAtomicTrieInitialize(t *testing.T) { // Construct the atomic trie again (on an empty database) and ensure that it produces the same hash. atomicBackend3, err := NewAtomicBackend( - versiondb.New(memdb.New()), testSharedMemory(), nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval, + versiondb.New(memdb.New()), utils.TestSnowContext().SharedMemory, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval, ) if err != nil { t.Fatal(err) @@ -188,7 +179,7 @@ func TestAtomicTrieInitialize(t *testing.T) { // during the initialization phase will cause an invalid root when indexing continues. nextCommitHeight := nearestCommitHeight(test.lastAcceptedHeight+test.commitInterval, test.commitInterval) for i := test.lastAcceptedHeight + 1; i <= nextCommitHeight; i++ { - txs := atomic.NewTestTxs(test.numTxsPerBlock(i)) + txs := NewTestTxs(test.numTxsPerBlock(i)) if err := repo.Write(i, txs); err != nil { t.Fatal(err) } @@ -207,11 +198,11 @@ func TestAtomicTrieInitialize(t *testing.T) { assert.NotEqual(t, common.Hash{}, updatedRoot) // Verify the operations up to the new expected commit height - verifyOperations(t, atomicTrie1, codec, updatedRoot, 1, updatedLastCommitHeight, operationsMap) + verifyOperations(t, atomicTrie1, testTxCodec, updatedRoot, 1, updatedLastCommitHeight, operationsMap) // Generate a new atomic trie to compare the root against. atomicBackend4, err := NewAtomicBackend( - versiondb.New(memdb.New()), testSharedMemory(), nil, repo, nextCommitHeight, common.Hash{}, test.commitInterval, + versiondb.New(memdb.New()), utils.TestSnowContext().SharedMemory, nil, repo, nextCommitHeight, common.Hash{}, test.commitInterval, ) if err != nil { t.Fatal(err) @@ -228,14 +219,13 @@ func TestAtomicTrieInitialize(t *testing.T) { func TestIndexerInitializesOnlyOnce(t *testing.T) { lastAcceptedHeight := uint64(25) db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec - repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, testTxCodec, lastAcceptedHeight) assert.NoError(t, err) operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, lastAcceptedHeight+1, constTxsPerHeight(2), nil, operationsMap) // Initialize atomic repository - atomicBackend, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 10 /* commitInterval*/) + atomicBackend, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 10 /* commitInterval*/) assert.NoError(t, err) atomicTrie := atomicBackend.AtomicTrie() @@ -247,11 +237,11 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { // re-initialize the atomic trie since initialize is not supposed to run again the height // at the trie should still be the old height with the old commit hash without any changes. // This scenario is not realistic, but is used to test potential double initialization behavior. - err = repo.Write(15, []*atomic.Tx{atomic.GenerateTestExportTx()}) + err = repo.Write(15, []*Tx{GenerateTestExportTx()}) assert.NoError(t, err) // Re-initialize the atomic trie - atomicBackend, err = NewAtomicBackend(db, testSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 10 /* commitInterval */) + atomicBackend, err = NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 10 /* commitInterval */) assert.NoError(t, err) atomicTrie = atomicBackend.AtomicTrie() @@ -262,11 +252,11 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { func newTestAtomicTrie(t *testing.T) AtomicTrie { db := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(db, atomic.TestTxCodec, 0) + repo, err := NewAtomicTxRepository(db, testTxCodec, 0) if err != nil { t.Fatal(err) } - atomicBackend, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, 0, common.Hash{}, testCommitInterval) + atomicBackend, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, 0, common.Hash{}, testCommitInterval) if err != nil { t.Fatal(err) } @@ -282,8 +272,9 @@ func TestIndexerWriteAndRead(t *testing.T) { // process 305 blocks so that we get three commits (100, 200, 300) for height := uint64(1); height <= testCommitInterval*3+5; /*=305*/ height++ { - atomicRequests := mustAtomicOps(atomic.GenerateTestImportTx()) - err := indexAtomicTxs(atomicTrie, height, atomicRequests) + atomicRequests, err := ConvertToAtomicOps(GenerateTestImportTx()) + assert.NoError(t, err) + err = indexAtomicTxs(atomicTrie, height, atomicRequests) assert.NoError(t, err) if height%testCommitInterval == 0 { lastCommittedBlockHash, lastCommittedBlockHeight = atomicTrie.LastCommitted() @@ -313,11 +304,11 @@ func TestAtomicOpsAreNotTxOrderDependent(t *testing.T) { atomicTrie2 := newTestAtomicTrie(t) for height := uint64(0); height <= testCommitInterval; /*=205*/ height++ { - tx1 := atomic.GenerateTestImportTx() - tx2 := atomic.GenerateTestImportTx() - atomicRequests1, err := mergeAtomicOps([]*atomic.Tx{tx1, tx2}) + tx1 := GenerateTestImportTx() + tx2 := GenerateTestImportTx() + atomicRequests1, err := mergeAtomicOps([]*Tx{tx1, tx2}) assert.NoError(t, err) - atomicRequests2, err := mergeAtomicOps([]*atomic.Tx{tx2, tx1}) + atomicRequests2, err := mergeAtomicOps([]*Tx{tx2, tx1}) assert.NoError(t, err) err = indexAtomicTxs(atomicTrie1, height, atomicRequests1) @@ -339,8 +330,7 @@ func TestAtomicTrieDoesNotSkipBonusBlocks(t *testing.T) { commitInterval := uint64(10) expectedCommitHeight := uint64(100) db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec - repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, testTxCodec, lastAcceptedHeight) if err != nil { t.Fatal(err) } @@ -353,7 +343,7 @@ func TestAtomicTrieDoesNotSkipBonusBlocks(t *testing.T) { 14: {}, } // Construct the atomic trie for the first time - atomicBackend, err := NewAtomicBackend(db, testSharedMemory(), bonusBlocks, repo, lastAcceptedHeight, common.Hash{}, commitInterval) + atomicBackend, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, bonusBlocks, repo, lastAcceptedHeight, common.Hash{}, commitInterval) if err != nil { t.Fatal(err) } @@ -364,14 +354,16 @@ func TestAtomicTrieDoesNotSkipBonusBlocks(t *testing.T) { assert.NotEqual(t, common.Hash{}, rootHash) // Verify the operations are as expected - verifyOperations(t, atomicTrie, codec, rootHash, 1, expectedCommitHeight, operationsMap) + verifyOperations(t, atomicTrie, testTxCodec, rootHash, 1, expectedCommitHeight, operationsMap) } func TestIndexingNilShouldNotImpactTrie(t *testing.T) { // operations to index ops := make([]map[ids.ID]*avalancheatomic.Requests, 0) for i := 0; i <= testCommitInterval; i++ { - ops = append(ops, mustAtomicOps(atomic.GenerateTestImportTx())) + atomicOps, err := ConvertToAtomicOps(GenerateTestImportTx()) + assert.NoError(t, err) + ops = append(ops, atomicOps) } // without nils @@ -411,75 +403,6 @@ func TestIndexingNilShouldNotImpactTrie(t *testing.T) { assert.Equal(t, root1, root2) } -type sharedMemories struct { - thisChain avalancheatomic.SharedMemory - peerChain avalancheatomic.SharedMemory - thisChainID ids.ID - peerChainID ids.ID -} - -func (s *sharedMemories) addItemsToBeRemovedToPeerChain(ops map[ids.ID]*avalancheatomic.Requests) error { - for _, reqs := range ops { - puts := make(map[ids.ID]*avalancheatomic.Requests) - puts[s.thisChainID] = &avalancheatomic.Requests{} - for _, key := range reqs.RemoveRequests { - val := []byte{0x1} - puts[s.thisChainID].PutRequests = append(puts[s.thisChainID].PutRequests, &avalancheatomic.Element{Key: key, Value: val}) - } - if err := s.peerChain.Apply(puts); err != nil { - return err - } - } - return nil -} - -func (s *sharedMemories) assertOpsApplied(t *testing.T, ops map[ids.ID]*avalancheatomic.Requests) { - t.Helper() - for _, reqs := range ops { - // should be able to get put requests - for _, elem := range reqs.PutRequests { - val, err := s.peerChain.Get(s.thisChainID, [][]byte{elem.Key}) - if err != nil { - t.Fatalf("error finding puts in peerChainMemory: %s", err) - } - assert.Equal(t, elem.Value, val[0]) - } - - // should not be able to get remove requests - for _, key := range reqs.RemoveRequests { - _, err := s.thisChain.Get(s.peerChainID, [][]byte{key}) - assert.EqualError(t, err, "not found") - } - } -} - -func (s *sharedMemories) assertOpsNotApplied(t *testing.T, ops map[ids.ID]*avalancheatomic.Requests) { - t.Helper() - for _, reqs := range ops { - // should not be able to get put requests - for _, elem := range reqs.PutRequests { - _, err := s.peerChain.Get(s.thisChainID, [][]byte{elem.Key}) - assert.EqualError(t, err, "not found") - } - - // should be able to get remove requests (these were previously added as puts on peerChain) - for _, key := range reqs.RemoveRequests { - val, err := s.thisChain.Get(s.peerChainID, [][]byte{key}) - assert.NoError(t, err) - assert.Equal(t, []byte{0x1}, val[0]) - } - } -} - -func newSharedMemories(atomicMemory *avalancheatomic.Memory, thisChainID, peerChainID ids.ID) *sharedMemories { - return &sharedMemories{ - thisChain: atomicMemory.NewSharedMemory(thisChainID), - peerChain: atomicMemory.NewSharedMemory(peerChainID), - thisChainID: thisChainID, - peerChainID: peerChainID, - } -} - func TestApplyToSharedMemory(t *testing.T) { type test struct { commitInterval, lastAcceptedHeight uint64 @@ -511,9 +434,9 @@ func TestApplyToSharedMemory(t *testing.T) { commitInterval: 10, lastAcceptedHeight: 25, setMarker: func(a *atomicBackend) error { - cursor := make([]byte, wrappers.LongLen+len(atomic.TestBlockchainID[:])) + cursor := make([]byte, wrappers.LongLen+len(TestBlockchainID[:])) binary.BigEndian.PutUint64(cursor, 10) - copy(cursor[wrappers.LongLen:], atomic.TestBlockchainID[:]) + copy(cursor[wrappers.LongLen:], TestBlockchainID[:]) return a.metadataDB.Put(appliedSharedMemoryCursorKey, cursor) }, expectOpsApplied: func(height uint64) bool { return height > 10 && height <= 20 }, @@ -527,15 +450,14 @@ func TestApplyToSharedMemory(t *testing.T) { } { t.Run(name, func(t *testing.T) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec - repo, err := NewAtomicTxRepository(db, codec, test.lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, testTxCodec, test.lastAcceptedHeight) assert.NoError(t, err) operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, test.lastAcceptedHeight+1, constTxsPerHeight(2), nil, operationsMap) // Initialize atomic repository m := avalancheatomic.NewMemory(db) - sharedMemories := newSharedMemories(m, testCChainID, atomic.TestBlockchainID) + sharedMemories := NewSharedMemories(m, ids.GenerateTestID(), TestBlockchainID) backend, err := NewAtomicBackend(db, sharedMemories.thisChain, test.bonusBlockHeights, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) assert.NoError(t, err) atomicTrie := backend.AtomicTrie().(*atomicTrie) @@ -558,7 +480,7 @@ func TestApplyToSharedMemory(t *testing.T) { // assert ops were applied as expected for height, ops := range operationsMap { if test.expectOpsApplied(height) { - sharedMemories.assertOpsApplied(t, ops) + sharedMemories.AssertOpsApplied(t, ops) } else { sharedMemories.assertOpsNotApplied(t, ops) } @@ -577,7 +499,7 @@ func TestApplyToSharedMemory(t *testing.T) { // assert they are as they were prior to reinitializing for height, ops := range operationsMap { if test.expectOpsApplied(height) { - sharedMemories.assertOpsApplied(t, ops) + sharedMemories.AssertOpsApplied(t, ops) } else { sharedMemories.assertOpsNotApplied(t, ops) } @@ -593,13 +515,12 @@ func TestApplyToSharedMemory(t *testing.T) { func BenchmarkAtomicTrieInit(b *testing.B) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) lastAcceptedHeight := uint64(25000) // add 25000 * 3 = 75000 transactions - repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, testTxCodec, lastAcceptedHeight) assert.NoError(b, err) writeTxs(b, repo, 1, lastAcceptedHeight, constTxsPerHeight(3), nil, operationsMap) @@ -611,7 +532,7 @@ func BenchmarkAtomicTrieInit(b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - sharedMemory := testSharedMemory() + sharedMemory := utils.TestSnowContext().SharedMemory atomicBackend, err := NewAtomicBackend(db, sharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 5000) assert.NoError(b, err) atomicTrie = atomicBackend.AtomicTrie() @@ -623,22 +544,21 @@ func BenchmarkAtomicTrieInit(b *testing.B) { b.StopTimer() // Verify operations - verifyOperations(b, atomicTrie, codec, hash, 1, lastAcceptedHeight, operationsMap) + verifyOperations(b, atomicTrie, testTxCodec, hash, 1, lastAcceptedHeight, operationsMap) } func BenchmarkAtomicTrieIterate(b *testing.B) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) lastAcceptedHeight := uint64(25_000) // add 25000 * 3 = 75000 transactions - repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, testTxCodec, lastAcceptedHeight) assert.NoError(b, err) writeTxs(b, repo, 1, lastAcceptedHeight, constTxsPerHeight(3), nil, operationsMap) - atomicBackend, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 5000) + atomicBackend, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 5000) assert.NoError(b, err) atomicTrie := atomicBackend.AtomicTrie() @@ -707,11 +627,10 @@ func BenchmarkApplyToSharedMemory(b *testing.B) { func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks uint64) { db := versiondb.New(disk) - codec := atomic.TestTxCodec - sharedMemory := testSharedMemory() + sharedMemory := utils.TestSnowContext().SharedMemory lastAcceptedHeight := blocks - repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, testTxCodec, lastAcceptedHeight) assert.NoError(b, err) backend, err := NewAtomicBackend(db, sharedMemory, nil, repo, 0, common.Hash{}, 5000) @@ -720,7 +639,7 @@ func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks u } trie := backend.AtomicTrie() for height := uint64(1); height <= lastAcceptedHeight; height++ { - txs := atomic.NewTestTxs(constTxsPerHeight(3)(height)) + txs := NewTestTxs(constTxsPerHeight(3)(height)) ops, err := mergeAtomicOps(txs) assert.NoError(b, err) assert.NoError(b, indexAtomicTxs(trie, height, ops)) @@ -733,7 +652,7 @@ func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks u b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - backend.(*atomicBackend).sharedMemory = testSharedMemory() + backend.(*atomicBackend).sharedMemory = utils.TestSnowContext().SharedMemory assert.NoError(b, backend.MarkApplyToSharedMemoryCursor(0)) assert.NoError(b, db.Commit()) assert.NoError(b, backend.ApplyToSharedMemory(lastAcceptedHeight)) diff --git a/plugin/evm/atomic_tx_repository.go b/plugin/evm/atomic/atomic_tx_repository.go similarity index 91% rename from plugin/evm/atomic_tx_repository.go rename to plugin/evm/atomic/atomic_tx_repository.go index d1074f60f2..d451ce9d86 100644 --- a/plugin/evm/atomic_tx_repository.go +++ b/plugin/evm/atomic/atomic_tx_repository.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "encoding/binary" @@ -19,7 +19,6 @@ import ( "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/plugin/evm/atomic" ) const ( @@ -40,10 +39,10 @@ var ( // atomic transactions type AtomicTxRepository interface { GetIndexHeight() (uint64, error) - GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) - GetByHeight(height uint64) ([]*atomic.Tx, error) - Write(height uint64, txs []*atomic.Tx) error - WriteBonus(height uint64, txs []*atomic.Tx) error + GetByTxID(txID ids.ID) (*Tx, uint64, error) + GetByHeight(height uint64) ([]*Tx, error) + Write(height uint64, txs []*Tx) error + WriteBonus(height uint64, txs []*Tx) error IterateByHeight(start uint64) database.Iterator Codec() codec.Manager @@ -137,7 +136,7 @@ func (a *atomicTxRepository) initializeHeightIndex(lastAcceptedHeight uint64) er // Get the tx iter is pointing to, len(txs) == 1 is expected here. txBytes := iterValue[wrappers.LongLen+wrappers.IntLen:] - tx, err := atomic.ExtractAtomicTx(txBytes, a.codec) + tx, err := ExtractAtomicTx(txBytes, a.codec) if err != nil { return err } @@ -199,10 +198,10 @@ func (a *atomicTxRepository) GetIndexHeight() (uint64, error) { return indexHeight, nil } -// GetByTxID queries [acceptedAtomicTxDB] for the [txID], parses a [*atomic.Tx] object +// GetByTxID queries [acceptedAtomicTxDB] for the [txID], parses a [*Tx] object // if an entry is found, and returns it with the block height the atomic tx it // represents was accepted on, along with an optional error. -func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) { +func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*Tx, uint64, error) { indexedTxBytes, err := a.acceptedAtomicTxDB.Get(txID[:]) if err != nil { return nil, 0, err @@ -216,7 +215,7 @@ func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) packer := wrappers.Packer{Bytes: indexedTxBytes} height := packer.UnpackLong() txBytes := packer.UnpackBytes() - tx, err := atomic.ExtractAtomicTx(txBytes, a.codec) + tx, err := ExtractAtomicTx(txBytes, a.codec) if err != nil { return nil, 0, err } @@ -230,40 +229,40 @@ func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) // no atomic transactions in the block accepted at [height]. // If [height] is greater than the last accepted height, then this will always return // [database.ErrNotFound] -func (a *atomicTxRepository) GetByHeight(height uint64) ([]*atomic.Tx, error) { +func (a *atomicTxRepository) GetByHeight(height uint64) ([]*Tx, error) { heightBytes := make([]byte, wrappers.LongLen) binary.BigEndian.PutUint64(heightBytes, height) return a.getByHeightBytes(heightBytes) } -func (a *atomicTxRepository) getByHeightBytes(heightBytes []byte) ([]*atomic.Tx, error) { +func (a *atomicTxRepository) getByHeightBytes(heightBytes []byte) ([]*Tx, error) { txsBytes, err := a.acceptedAtomicTxByHeightDB.Get(heightBytes) if err != nil { return nil, err } - return atomic.ExtractAtomicTxsBatch(txsBytes, a.codec) + return ExtractAtomicTxsBatch(txsBytes, a.codec) } // Write updates indexes maintained on atomic txs, so they can be queried // by txID or height. This method must be called only once per height, // and [txs] must include all atomic txs for the block accepted at the // corresponding height. -func (a *atomicTxRepository) Write(height uint64, txs []*atomic.Tx) error { +func (a *atomicTxRepository) Write(height uint64, txs []*Tx) error { return a.write(height, txs, false) } // WriteBonus is similar to Write, except the [txID] => [height] is not // overwritten if already exists. -func (a *atomicTxRepository) WriteBonus(height uint64, txs []*atomic.Tx) error { +func (a *atomicTxRepository) WriteBonus(height uint64, txs []*Tx) error { return a.write(height, txs, true) } -func (a *atomicTxRepository) write(height uint64, txs []*atomic.Tx, bonus bool) error { +func (a *atomicTxRepository) write(height uint64, txs []*Tx, bonus bool) error { if len(txs) > 1 { // txs should be stored in order of txID to ensure consistency // with txs initialized from the txID index. - copyTxs := make([]*atomic.Tx, len(txs)) + copyTxs := make([]*Tx, len(txs)) copy(copyTxs, txs) utils.Sort(copyTxs) txs = copyTxs @@ -301,8 +300,8 @@ func (a *atomicTxRepository) write(height uint64, txs []*atomic.Tx, bonus bool) // indexTxByID writes [tx] into the [acceptedAtomicTxDB] stored as // [height] + [tx bytes] -func (a *atomicTxRepository) indexTxByID(heightBytes []byte, tx *atomic.Tx) error { - txBytes, err := a.codec.Marshal(atomic.CodecVersion, tx) +func (a *atomicTxRepository) indexTxByID(heightBytes []byte, tx *Tx) error { + txBytes, err := a.codec.Marshal(CodecVersion, tx) if err != nil { return err } @@ -321,8 +320,8 @@ func (a *atomicTxRepository) indexTxByID(heightBytes []byte, tx *atomic.Tx) erro } // indexTxsAtHeight adds [height] -> [txs] to the [acceptedAtomicTxByHeightDB] -func (a *atomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*atomic.Tx) error { - txsBytes, err := a.codec.Marshal(atomic.CodecVersion, txs) +func (a *atomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*Tx) error { + txsBytes, err := a.codec.Marshal(CodecVersion, txs) if err != nil { return err } @@ -336,7 +335,7 @@ func (a *atomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*atomic. // [tx] to the slice of transactions stored there. // This function is used while initializing the atomic repository to re-index the atomic transactions // by txID into the height -> txs index. -func (a *atomicTxRepository) appendTxToHeightIndex(heightBytes []byte, tx *atomic.Tx) error { +func (a *atomicTxRepository) appendTxToHeightIndex(heightBytes []byte, tx *Tx) error { txs, err := a.getByHeightBytes(heightBytes) if err != nil && err != database.ErrNotFound { return err diff --git a/plugin/evm/atomic_tx_repository_test.go b/plugin/evm/atomic/atomic_tx_repository_test.go similarity index 85% rename from plugin/evm/atomic_tx_repository_test.go rename to plugin/evm/atomic/atomic_tx_repository_test.go index 224f8fa726..fcde4f01dd 100644 --- a/plugin/evm/atomic_tx_repository_test.go +++ b/plugin/evm/atomic/atomic_tx_repository_test.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "encoding/binary" @@ -12,7 +12,6 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ava-labs/avalanchego/codec" @@ -28,13 +27,13 @@ import ( // addTxs writes [txsPerHeight] txs for heights ranging in [fromHeight, toHeight) directly to [acceptedAtomicTxDB], // storing the resulting transactions in [txMap] if non-nil and the resulting atomic operations in [operationsMap] // if non-nil. -func addTxs(t testing.TB, codec codec.Manager, acceptedAtomicTxDB database.Database, fromHeight uint64, toHeight uint64, txsPerHeight int, txMap map[uint64][]*atomic.Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests) { +func addTxs(t testing.TB, codec codec.Manager, acceptedAtomicTxDB database.Database, fromHeight uint64, toHeight uint64, txsPerHeight int, txMap map[uint64][]*Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests) { for height := fromHeight; height < toHeight; height++ { - txs := make([]*atomic.Tx, 0, txsPerHeight) + txs := make([]*Tx, 0, txsPerHeight) for i := 0; i < txsPerHeight; i++ { - tx := atomic.NewTestTx() + tx := NewTestTx() txs = append(txs, tx) - txBytes, err := codec.Marshal(atomic.CodecVersion, tx) + txBytes, err := codec.Marshal(CodecVersion, tx) assert.NoError(t, err) // Write atomic transactions to the [acceptedAtomicTxDB] @@ -71,10 +70,10 @@ func constTxsPerHeight(txCount int) func(uint64) int { // storing the resulting transactions in [txMap] if non-nil and the resulting atomic operations in [operationsMap] // if non-nil. func writeTxs(t testing.TB, repo AtomicTxRepository, fromHeight uint64, toHeight uint64, - txsPerHeight func(height uint64) int, txMap map[uint64][]*atomic.Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests, + txsPerHeight func(height uint64) int, txMap map[uint64][]*Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests, ) { for height := fromHeight; height < toHeight; height++ { - txs := atomic.NewTestTxs(txsPerHeight(height)) + txs := NewTestTxs(txsPerHeight(height)) if err := repo.Write(height, txs); err != nil { t.Fatal(err) } @@ -96,7 +95,7 @@ func writeTxs(t testing.TB, repo AtomicTxRepository, fromHeight uint64, toHeight } // verifyTxs asserts [repo] can find all txs in [txMap] by height and txID -func verifyTxs(t testing.TB, repo AtomicTxRepository, txMap map[uint64][]*atomic.Tx) { +func verifyTxs(t testing.TB, repo AtomicTxRepository, txMap map[uint64][]*Tx) { // We should be able to fetch indexed txs by height: for height, expectedTxs := range txMap { txs, err := repo.GetByHeight(height) @@ -183,12 +182,11 @@ func verifyOperations(t testing.TB, atomicTrie AtomicTrie, codec codec.Manager, func TestAtomicRepositoryReadWriteSingleTx(t *testing.T) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec - repo, err := NewAtomicTxRepository(db, codec, 0) + repo, err := NewAtomicTxRepository(db, testTxCodec, 0) if err != nil { t.Fatal(err) } - txMap := make(map[uint64][]*atomic.Tx) + txMap := make(map[uint64][]*Tx) writeTxs(t, repo, 1, 100, constTxsPerHeight(1), txMap, nil) verifyTxs(t, repo, txMap) @@ -196,12 +194,11 @@ func TestAtomicRepositoryReadWriteSingleTx(t *testing.T) { func TestAtomicRepositoryReadWriteMultipleTxs(t *testing.T) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec - repo, err := NewAtomicTxRepository(db, codec, 0) + repo, err := NewAtomicTxRepository(db, testTxCodec, 0) if err != nil { t.Fatal(err) } - txMap := make(map[uint64][]*atomic.Tx) + txMap := make(map[uint64][]*Tx) writeTxs(t, repo, 1, 100, constTxsPerHeight(10), txMap, nil) verifyTxs(t, repo, txMap) @@ -209,18 +206,17 @@ func TestAtomicRepositoryReadWriteMultipleTxs(t *testing.T) { func TestAtomicRepositoryPreAP5Migration(t *testing.T) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) - txMap := make(map[uint64][]*atomic.Tx) - addTxs(t, codec, acceptedAtomicTxDB, 1, 100, 1, txMap, nil) + txMap := make(map[uint64][]*Tx) + addTxs(t, testTxCodec, acceptedAtomicTxDB, 1, 100, 1, txMap, nil) if err := db.Commit(); err != nil { t.Fatal(err) } // Ensure the atomic repository can correctly migrate the transactions // from the old accepted atomic tx DB to add the height index. - repo, err := NewAtomicTxRepository(db, codec, 100) + repo, err := NewAtomicTxRepository(db, testTxCodec, 100) if err != nil { t.Fatal(err) } @@ -234,19 +230,18 @@ func TestAtomicRepositoryPreAP5Migration(t *testing.T) { func TestAtomicRepositoryPostAP5Migration(t *testing.T) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) - txMap := make(map[uint64][]*atomic.Tx) - addTxs(t, codec, acceptedAtomicTxDB, 1, 100, 1, txMap, nil) - addTxs(t, codec, acceptedAtomicTxDB, 100, 200, 10, txMap, nil) + txMap := make(map[uint64][]*Tx) + addTxs(t, testTxCodec, acceptedAtomicTxDB, 1, 100, 1, txMap, nil) + addTxs(t, testTxCodec, acceptedAtomicTxDB, 100, 200, 10, txMap, nil) if err := db.Commit(); err != nil { t.Fatal(err) } // Ensure the atomic repository can correctly migrate the transactions // from the old accepted atomic tx DB to add the height index. - repo, err := NewAtomicTxRepository(db, codec, 200) + repo, err := NewAtomicTxRepository(db, testTxCodec, 200) if err != nil { t.Fatal(err) } @@ -259,16 +254,15 @@ func TestAtomicRepositoryPostAP5Migration(t *testing.T) { func benchAtomicRepositoryIndex10_000(b *testing.B, maxHeight uint64, txsPerHeight int) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) - txMap := make(map[uint64][]*atomic.Tx) + txMap := make(map[uint64][]*Tx) - addTxs(b, codec, acceptedAtomicTxDB, 0, maxHeight, txsPerHeight, txMap, nil) + addTxs(b, testTxCodec, acceptedAtomicTxDB, 0, maxHeight, txsPerHeight, txMap, nil) if err := db.Commit(); err != nil { b.Fatal(err) } - repo, err := NewAtomicTxRepository(db, codec, maxHeight) + repo, err := NewAtomicTxRepository(db, testTxCodec, maxHeight) if err != nil { b.Fatal(err) } diff --git a/plugin/evm/atomic/export_tx.go b/plugin/evm/atomic/export_tx.go index 4938ddc53e..e63152d273 100644 --- a/plugin/evm/atomic/export_tx.go +++ b/plugin/evm/atomic/export_tx.go @@ -180,7 +180,7 @@ func (utx *UnsignedExportTx) Burned(assetID ids.ID) (uint64, error) { // SemanticVerify this transaction is valid. func (utx *UnsignedExportTx) SemanticVerify( - backend *Backend, + backend *VerifierBackend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int, diff --git a/plugin/evm/atomic/import_tx.go b/plugin/evm/atomic/import_tx.go index 9213299d8c..5b608ca2f5 100644 --- a/plugin/evm/atomic/import_tx.go +++ b/plugin/evm/atomic/import_tx.go @@ -187,7 +187,7 @@ func (utx *UnsignedImportTx) Burned(assetID ids.ID) (uint64, error) { // SemanticVerify this transaction is valid. func (utx *UnsignedImportTx) SemanticVerify( - backend *Backend, + backend *VerifierBackend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int, @@ -441,7 +441,7 @@ func (utx *UnsignedImportTx) EVMStateTransfer(ctx *snow.Context, state StateDB) // or any of its ancestor blocks going back to the last accepted block in its ancestry. If [ancestor] is // accepted, then nil will be returned immediately. // If the ancestry of [ancestor] cannot be fetched, then [errRejectedParent] may be returned. -func conflicts(backend *Backend, inputs set.Set[ids.ID], ancestor AtomicBlockContext) error { +func conflicts(backend *VerifierBackend, inputs set.Set[ids.ID], ancestor AtomicBlockContext) error { fetcher := backend.BlockFetcher lastAcceptedBlock := fetcher.LastAcceptedBlockInternal() lastAcceptedHeight := lastAcceptedBlock.Height() diff --git a/plugin/evm/atomic/test_shared_memories.go b/plugin/evm/atomic/test_shared_memories.go new file mode 100644 index 0000000000..2f92639a5c --- /dev/null +++ b/plugin/evm/atomic/test_shared_memories.go @@ -0,0 +1,79 @@ +package atomic + +import ( + "testing" + + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/ids" + "github.com/stretchr/testify/assert" +) + +type SharedMemories struct { + thisChain atomic.SharedMemory + peerChain atomic.SharedMemory + thisChainID ids.ID + peerChainID ids.ID +} + +func (s *SharedMemories) addItemsToBeRemovedToPeerChain(ops map[ids.ID]*atomic.Requests) error { + for _, reqs := range ops { + puts := make(map[ids.ID]*atomic.Requests) + puts[s.thisChainID] = &atomic.Requests{} + for _, key := range reqs.RemoveRequests { + val := []byte{0x1} + puts[s.thisChainID].PutRequests = append(puts[s.thisChainID].PutRequests, &atomic.Element{Key: key, Value: val}) + } + if err := s.peerChain.Apply(puts); err != nil { + return err + } + } + return nil +} + +func (s *SharedMemories) AssertOpsApplied(t *testing.T, ops map[ids.ID]*atomic.Requests) { + t.Helper() + for _, reqs := range ops { + // should be able to get put requests + for _, elem := range reqs.PutRequests { + val, err := s.peerChain.Get(s.thisChainID, [][]byte{elem.Key}) + if err != nil { + t.Fatalf("error finding puts in peerChainMemory: %s", err) + } + assert.Equal(t, elem.Value, val[0]) + } + + // should not be able to get remove requests + for _, key := range reqs.RemoveRequests { + _, err := s.thisChain.Get(s.peerChainID, [][]byte{key}) + assert.EqualError(t, err, "not found") + } + } +} + +func (s *SharedMemories) assertOpsNotApplied(t *testing.T, ops map[ids.ID]*atomic.Requests) { + t.Helper() + for _, reqs := range ops { + // should not be able to get put requests + for _, elem := range reqs.PutRequests { + _, err := s.peerChain.Get(s.thisChainID, [][]byte{elem.Key}) + assert.EqualError(t, err, "not found") + } + + // should be able to get remove requests (these were previously added as puts on peerChain) + for _, key := range reqs.RemoveRequests { + val, err := s.thisChain.Get(s.peerChainID, [][]byte{key}) + assert.NoError(t, err) + assert.Equal(t, []byte{0x1}, val[0]) + } + } +} + +// TODO: once tests are moved to atomic package, unexport this function +func NewSharedMemories(atomicMemory *atomic.Memory, thisChainID, peerChainID ids.ID) *SharedMemories { + return &SharedMemories{ + thisChain: atomicMemory.NewSharedMemory(thisChainID), + peerChain: atomicMemory.NewSharedMemory(peerChainID), + thisChainID: thisChainID, + peerChainID: peerChainID, + } +} diff --git a/plugin/evm/atomic/test_tx.go b/plugin/evm/atomic/test_tx.go index 71437ea41e..0c2121a6b8 100644 --- a/plugin/evm/atomic/test_tx.go +++ b/plugin/evm/atomic/test_tx.go @@ -19,16 +19,18 @@ import ( "github.com/ava-labs/coreth/params" ) -var TestTxCodec codec.Manager +const testCodecVersion = 0 + +var testTxCodec codec.Manager func init() { - TestTxCodec = codec.NewDefaultManager() + testTxCodec = codec.NewDefaultManager() c := linearcodec.NewDefault() errs := wrappers.Errs{} errs.Add( c.RegisterType(&TestUnsignedTx{}), - TestTxCodec.RegisterCodec(CodecVersion, c), + testTxCodec.RegisterCodec(testCodecVersion, c), ) if errs.Errored() { @@ -82,7 +84,7 @@ func (t *TestUnsignedTx) SignedBytes() []byte { return t.SignedBytesV } func (t *TestUnsignedTx) InputUTXOs() set.Set[ids.ID] { return t.InputUTXOsV } // SemanticVerify implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) SemanticVerify(backend *Backend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int) error { +func (t *TestUnsignedTx) SemanticVerify(backend *VerifierBackend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int) error { return t.SemanticVerifyV } diff --git a/plugin/evm/atomic/tx.go b/plugin/evm/atomic/tx.go index a911402dea..5c44b61937 100644 --- a/plugin/evm/atomic/tx.go +++ b/plugin/evm/atomic/tx.go @@ -127,7 +127,7 @@ type UnsignedTx interface { SignedBytes() []byte } -type Backend struct { +type VerifierBackend struct { Ctx *snow.Context Fx fx.Fx Rules params.Rules @@ -170,7 +170,7 @@ type UnsignedAtomicTx interface { Verify(ctx *snow.Context, rules params.Rules) error // Attempts to verify this transaction with the provided state. // SemanticVerify this transaction is valid. - SemanticVerify(backend *Backend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int) error + SemanticVerify(backend *VerifierBackend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int) error // AtomicOps returns the blockchainID and set of atomic requests that // must be applied to shared memory for this transaction to be accepted. // The set of atomic requests must be returned in a consistent order. diff --git a/plugin/evm/atomic/utils.go b/plugin/evm/atomic/utils.go new file mode 100644 index 0000000000..e599b75201 --- /dev/null +++ b/plugin/evm/atomic/utils.go @@ -0,0 +1,17 @@ +// (c) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/ids" +) + +func ConvertToAtomicOps(tx *Tx) (map[ids.ID]*avalancheatomic.Requests, error) { + id, reqs, err := tx.AtomicOps() + if err != nil { + return nil, err + } + return map[ids.ID]*avalancheatomic.Requests{id: reqs}, nil +} diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 9a2de32601..d139b640a0 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -158,7 +158,7 @@ func (b *Block) Accept(context.Context) error { return fmt.Errorf("chain could not accept %s: %w", b.ID(), err) } - if err := vm.acceptedBlockDB.Put(lastAcceptedKey, b.id[:]); err != nil { + if err := vm.PutLastAcceptedID(b.id); err != nil { return fmt.Errorf("failed to put %s as the last accepted block: %w", b.ID(), err) } @@ -428,3 +428,7 @@ func (b *Block) Bytes() []byte { } func (b *Block) String() string { return fmt.Sprintf("EVM block, ID = %s", b.ID()) } + +func (b *Block) GetEthBlock() *types.Block { + return b.ethBlock +} diff --git a/plugin/evm/database.go b/plugin/evm/database/wrapped_database.go similarity index 54% rename from plugin/evm/database.go rename to plugin/evm/database/wrapped_database.go index 479c995ba3..9421e514a8 100644 --- a/plugin/evm/database.go +++ b/plugin/evm/database/wrapped_database.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package database import ( "errors" @@ -11,25 +11,37 @@ import ( ) var ( - _ ethdb.KeyValueStore = &Database{} + _ ethdb.KeyValueStore = ðDbWrapper{} ErrSnapshotNotSupported = errors.New("snapshot is not supported") ) -// Database implements ethdb.Database -type Database struct{ database.Database } +// ethDbWrapper implements ethdb.Database +type ethDbWrapper struct { + database.Database +} + +func WrapDatabase(db database.Database) ethdb.KeyValueStore { + return ethDbWrapper{db} +} // Stat implements ethdb.Database -func (db Database) Stat(string) (string, error) { return "", database.ErrNotFound } +func (db ethDbWrapper) Stat(string) (string, error) { + return "", database.ErrNotFound +} // NewBatch implements ethdb.Database -func (db Database) NewBatch() ethdb.Batch { return Batch{db.Database.NewBatch()} } +func (db ethDbWrapper) NewBatch() ethdb.Batch { + return wrappedBatch{db.Database.NewBatch()} +} // NewBatchWithSize implements ethdb.Database // TODO: propagate size through avalanchego Database interface -func (db Database) NewBatchWithSize(size int) ethdb.Batch { return Batch{db.Database.NewBatch()} } +func (db ethDbWrapper) NewBatchWithSize(size int) ethdb.Batch { + return wrappedBatch{db.Database.NewBatch()} +} -func (db Database) NewSnapshot() (ethdb.Snapshot, error) { +func (db ethDbWrapper) NewSnapshot() (ethdb.Snapshot, error) { return nil, ErrSnapshotNotSupported } @@ -37,7 +49,7 @@ func (db Database) NewSnapshot() (ethdb.Snapshot, error) { // // Note: This method assumes that the prefix is NOT part of the start, so there's // no need for the caller to prepend the prefix to the start. -func (db Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { +func (db ethDbWrapper) NewIterator(prefix []byte, start []byte) ethdb.Iterator { // avalanchego's database implementation assumes that the prefix is part of the // start, so it is added here (if it is provided). if len(prefix) > 0 { @@ -50,15 +62,21 @@ func (db Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { } // NewIteratorWithStart implements ethdb.Database -func (db Database) NewIteratorWithStart(start []byte) ethdb.Iterator { +func (db ethDbWrapper) NewIteratorWithStart(start []byte) ethdb.Iterator { return db.Database.NewIteratorWithStart(start) } -// Batch implements ethdb.Batch -type Batch struct{ database.Batch } +// wrappedBatch implements ethdb.wrappedBatch +type wrappedBatch struct { + database.Batch +} // ValueSize implements ethdb.Batch -func (batch Batch) ValueSize() int { return batch.Batch.Size() } +func (batch wrappedBatch) ValueSize() int { + return batch.Batch.Size() +} // Replay implements ethdb.Batch -func (batch Batch) Replay(w ethdb.KeyValueWriter) error { return batch.Batch.Replay(w) } +func (batch wrappedBatch) Replay(w ethdb.KeyValueWriter) error { + return batch.Batch.Replay(w) +} diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/export_tx_test.go index 36b74ab45a..1c5507d218 100644 --- a/plugin/evm/export_tx_test.go +++ b/plugin/evm/export_tx_test.go @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" ) @@ -155,7 +156,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { { Address: ethAddr, Amount: avaxAmount / 2, - AssetID: testAvaxAssetID, + AssetID: utils.TestAvaxAssetID, Nonce: 0, }, }, @@ -172,7 +173,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { { Address: ethAddr, Amount: avaxAmount, - AssetID: testAvaxAssetID, + AssetID: utils.TestAvaxAssetID, Nonce: 0, }, }, @@ -189,7 +190,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { { Address: ethAddr, Amount: avaxAmount + 1, - AssetID: testAvaxAssetID, + AssetID: utils.TestAvaxAssetID, Nonce: 0, }, }, @@ -263,7 +264,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { { Address: ethAddr, Amount: avaxAmount, - AssetID: testAvaxAssetID, + AssetID: utils.TestAvaxAssetID, Nonce: 0, }, }, @@ -286,7 +287,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { { Address: ethAddr, Amount: avaxAmount, - AssetID: testAvaxAssetID, + AssetID: utils.TestAvaxAssetID, Nonce: 1, }, }, @@ -309,7 +310,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { { Address: ethAddr, Amount: avaxAmount, - AssetID: testAvaxAssetID, + AssetID: utils.TestAvaxAssetID, Nonce: 1, }, }, @@ -912,7 +913,7 @@ func TestExportTxSemanticVerify(t *testing.T) { t.Fatal(err) } - backend := &atomic.Backend{ + backend := &atomic.VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, Rules: test.rules, @@ -1087,27 +1088,28 @@ func TestExportTxAccept(t *testing.T) { func TestExportTxVerify(t *testing.T) { var exportAmount uint64 = 10000000 + ctx := utils.TestSnowContext() exportTx := &atomic.UnsignedExportTx{ - NetworkID: testNetworkID, - BlockchainID: testCChainID, - DestinationChain: testXChainID, + NetworkID: ctx.NetworkID, + BlockchainID: ctx.CChainID, + DestinationChain: ctx.XChainID, Ins: []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: exportAmount, - AssetID: testAvaxAssetID, + AssetID: ctx.AVAXAssetID, Nonce: 0, }, { Address: testEthAddrs[2], Amount: exportAmount, - AssetID: testAvaxAssetID, + AssetID: ctx.AVAXAssetID, Nonce: 0, }, }, ExportedOutputs: []*avax.TransferableOutput{ { - Asset: avax.Asset{ID: testAvaxAssetID}, + Asset: avax.Asset{ID: ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: exportAmount, OutputOwners: secp256k1fx.OutputOwners{ @@ -1118,7 +1120,7 @@ func TestExportTxVerify(t *testing.T) { }, }, { - Asset: avax.Asset{ID: testAvaxAssetID}, + Asset: avax.Asset{ID: ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: exportAmount, OutputOwners: secp256k1fx.OutputOwners{ @@ -1138,8 +1140,6 @@ func TestExportTxVerify(t *testing.T) { emptySigners := make([][]*secp256k1.PrivateKey, 2) atomic.SortEVMInputsAndSigners(exportTx.Ins, emptySigners) - ctx := NewContext() - tests := map[string]atomicTxVerifyTest{ "nil tx": { generate: func(t *testing.T) atomic.UnsignedAtomicTx { @@ -1261,7 +1261,7 @@ func TestExportTxVerify(t *testing.T) { { Address: testEthAddrs[0], Amount: 0, - AssetID: testAvaxAssetID, + AssetID: ctx.AVAXAssetID, Nonce: 0, }, } @@ -1779,7 +1779,7 @@ func TestNewExportTx(t *testing.T) { exportTx := tx.UnsignedAtomicTx - backend := &atomic.Backend{ + backend := &atomic.VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, Rules: vm.currentRules(), @@ -1987,7 +1987,7 @@ func TestNewExportTxMulticoin(t *testing.T) { } exportTx := tx.UnsignedAtomicTx - backend := &atomic.Backend{ + backend := &atomic.VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, Rules: vm.currentRules(), diff --git a/plugin/evm/import_tx_test.go b/plugin/evm/import_tx_test.go index 3195fd7dae..da8a5c9db6 100644 --- a/plugin/evm/import_tx_test.go +++ b/plugin/evm/import_tx_test.go @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" @@ -66,7 +67,7 @@ func createImportTxOptions(t *testing.T, vm *VM, sharedMemory *avalancheatomic.M } func TestImportTxVerify(t *testing.T) { - ctx := NewContext() + ctx := utils.TestSnowContext() var importAmount uint64 = 10000000 txID := ids.GenerateTestID() @@ -316,7 +317,7 @@ func TestImportTxVerify(t *testing.T) { { Address: testEthAddrs[0], Amount: 0, - AssetID: testAvaxAssetID, + AssetID: ctx.AVAXAssetID, }, } return &tx diff --git a/plugin/evm/syncervm_client.go b/plugin/evm/syncervm_client.go index 0b704d6233..4649e33952 100644 --- a/plugin/evm/syncervm_client.go +++ b/plugin/evm/syncervm_client.go @@ -1,6 +1,6 @@ // (c) 2021-2022, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. - +// TODO: move to separate package package evm import ( @@ -8,7 +8,10 @@ import ( "fmt" "sync" + syncclient "github.com/ava-labs/coreth/sync/client" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" @@ -16,51 +19,60 @@ import ( "github.com/ava-labs/avalanchego/vms/components/chain" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state/snapshot" + "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/eth" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/message" - syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/statesync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) -const ( - // State sync fetches [parentsToGet] parents of the block it syncs to. - // The last 256 block hashes are necessary to support the BLOCKHASH opcode. - parentsToGet = 256 +// StateSyncParentsToFetch is the number of the block parents the state syncs to. +// The last 256 block hashes are necessary to support the BLOCKHASH opcode. +const StateSyncParentsToFetch = 256 + +var ( + metadataPrefix = []byte("metadata") + stateSyncSummaryKey = []byte("stateSyncSummary") ) -var stateSyncSummaryKey = []byte("stateSyncSummary") +type BlockAcceptor interface { + PutLastAcceptedID(ids.ID) error +} + +type EthBlockWrapper interface { + GetEthBlock() *types.Block +} -// stateSyncClientConfig defines the options and dependencies needed to construct a StateSyncerClient -type stateSyncClientConfig struct { - enabled bool - skipResume bool +// StateSyncClientConfig defines the options and dependencies needed to construct a StateSyncClient +type StateSyncClientConfig struct { + Enabled bool + SkipResume bool // Specifies the number of blocks behind the latest state summary that the chain must be // in order to prefer performing state sync over falling back to the normal bootstrapping // algorithm. - stateSyncMinBlocks uint64 - stateSyncRequestSize uint16 // number of key/value pairs to ask peers for per request + StateSyncMinBlocks uint64 + StateSyncRequestSize uint16 // number of key/value pairs to ask peers for per request - lastAcceptedHeight uint64 + LastAcceptedHeight uint64 - chain *eth.Ethereum - state *chain.State - chaindb ethdb.Database - metadataDB database.Database - acceptedBlockDB database.Database - db *versiondb.Database - atomicBackend AtomicBackend + Chain *eth.Ethereum + State *chain.State + ChaindDB ethdb.Database + Acceptor BlockAcceptor + DB *versiondb.Database + AtomicBackend atomic.AtomicBackend - client syncclient.Client + Client syncclient.Client - toEngine chan<- commonEng.Message + ToEngine chan<- commonEng.Message } type stateSyncerClient struct { - *stateSyncClientConfig + *StateSyncClientConfig resumableSummary message.SyncSummary @@ -70,11 +82,13 @@ type stateSyncerClient struct { // State Sync results syncSummary message.SyncSummary stateSyncErr error + metadataDB database.Database } -func NewStateSyncClient(config *stateSyncClientConfig) StateSyncClient { +func NewStateSyncClient(config *StateSyncClientConfig) StateSyncClient { return &stateSyncerClient{ - stateSyncClientConfig: config, + StateSyncClientConfig: config, + metadataDB: prefixdb.New(metadataPrefix, config.DB), } } @@ -101,14 +115,14 @@ type Syncer interface { // StateSyncEnabled returns [client.enabled], which is set in the chain's config file. func (client *stateSyncerClient) StateSyncEnabled(context.Context) (bool, error) { - return client.enabled, nil + return client.Enabled, nil } // GetOngoingSyncStateSummary returns a state summary that was previously started // and not finished, and sets [resumableSummary] if one was found. // Returns [database.ErrNotFound] if no ongoing summary is found or if [client.skipResume] is true. func (client *stateSyncerClient) GetOngoingSyncStateSummary(context.Context) (block.StateSummary, error) { - if client.skipResume { + if client.SkipResume { return nil, database.ErrNotFound } @@ -130,7 +144,7 @@ func (client *stateSyncerClient) ClearOngoingSummary() error { if err := client.metadataDB.Delete(stateSyncSummaryKey); err != nil { return fmt.Errorf("failed to clear ongoing summary: %w", err) } - if err := client.db.Commit(); err != nil { + if err := client.DB.Commit(); err != nil { return fmt.Errorf("failed to commit db while clearing ongoing summary: %w", err) } @@ -145,7 +159,7 @@ func (client *stateSyncerClient) ParseStateSummary(_ context.Context, summaryByt // stateSync blockingly performs the state sync for the EVM state and the atomic state // to [client.syncSummary]. returns an error if one occurred. func (client *stateSyncerClient) stateSync(ctx context.Context) error { - if err := client.syncBlocks(ctx, client.syncSummary.BlockHash, client.syncSummary.BlockNumber, parentsToGet); err != nil { + if err := client.syncBlocks(ctx, client.syncSummary.BlockHash, client.syncSummary.BlockNumber, StateSyncParentsToFetch); err != nil { return err } @@ -166,10 +180,10 @@ func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncS // Skip syncing if the blockchain is not significantly ahead of local state, // since bootstrapping would be faster. // (Also ensures we don't sync to a height prior to local state.) - if client.lastAcceptedHeight+client.stateSyncMinBlocks > proposedSummary.Height() { + if client.LastAcceptedHeight+client.StateSyncMinBlocks > proposedSummary.Height() { log.Info( "last accepted too close to most recent syncable block, skipping state sync", - "lastAccepted", client.lastAcceptedHeight, + "lastAccepted", client.LastAcceptedHeight, "syncableHeight", proposedSummary.Height(), ) return block.StateSyncSkipped, nil @@ -181,11 +195,11 @@ func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncS // sync marker will be wiped, so we do not accidentally resume progress from an incorrect version // of the snapshot. (if switching between versions that come before this change and back this could // lead to the snapshot not being cleaned up correctly) - <-snapshot.WipeSnapshot(client.chaindb, true) + <-snapshot.WipeSnapshot(client.ChaindDB, true) // Reset the snapshot generator here so that when state sync completes, snapshots will not attempt to read an // invalid generator. // Note: this must be called after WipeSnapshot is called so that we do not invalidate a partially generated snapshot. - snapshot.ResetSnapshotGeneration(client.chaindb) + snapshot.ResetSnapshotGeneration(client.ChaindDB) } client.syncSummary = proposedSummary @@ -195,7 +209,7 @@ func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncS if err := client.metadataDB.Put(stateSyncSummaryKey, proposedSummary.Bytes()); err != nil { return block.StateSyncSkipped, fmt.Errorf("failed to write state sync summary key to disk: %w", err) } - if err := client.db.Commit(); err != nil { + if err := client.DB.Commit(); err != nil { return block.StateSyncSkipped, fmt.Errorf("failed to commit db: %w", err) } @@ -218,7 +232,7 @@ func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncS // this error will be propagated to the engine when it calls // vm.SetState(snow.Bootstrapping) log.Info("stateSync completed, notifying engine", "err", client.stateSyncErr) - client.toEngine <- commonEng.StateSyncDone + client.ToEngine <- commonEng.StateSyncDone }() return block.StateSyncStatic, nil } @@ -235,7 +249,7 @@ func (client *stateSyncerClient) syncBlocks(ctx context.Context, fromHash common // first, check for blocks already available on disk so we don't // request them from peers. for parentsToGet >= 0 { - blk := rawdb.ReadBlock(client.chaindb, nextHash, nextHeight) + blk := rawdb.ReadBlock(client.ChaindDB, nextHash, nextHeight) if blk != nil { // block exists nextHash = blk.ParentHash() @@ -250,12 +264,12 @@ func (client *stateSyncerClient) syncBlocks(ctx context.Context, fromHash common // get any blocks we couldn't find on disk from peers and write // them to disk. - batch := client.chaindb.NewBatch() + batch := client.ChaindDB.NewBatch() for i := parentsToGet - 1; i >= 0 && (nextHash != common.Hash{}); { if err := ctx.Err(); err != nil { return err } - blocks, err := client.client.GetBlocks(ctx, nextHash, nextHeight, parentsPerRequest) + blocks, err := client.Client.GetBlocks(ctx, nextHash, nextHeight, parentsPerRequest) if err != nil { log.Error("could not get blocks from peer", "err", err, "nextHash", nextHash, "remaining", i+1) return err @@ -276,7 +290,7 @@ func (client *stateSyncerClient) syncBlocks(ctx context.Context, fromHash common func (client *stateSyncerClient) syncAtomicTrie(ctx context.Context) error { log.Info("atomic tx: sync starting", "root", client.syncSummary.AtomicRoot) - atomicSyncer, err := client.atomicBackend.Syncer(client.client, client.syncSummary.AtomicRoot, client.syncSummary.BlockNumber, client.stateSyncRequestSize) + atomicSyncer, err := client.AtomicBackend.Syncer(client.Client, client.syncSummary.AtomicRoot, client.syncSummary.BlockNumber, client.StateSyncRequestSize) if err != nil { return err } @@ -291,13 +305,13 @@ func (client *stateSyncerClient) syncAtomicTrie(ctx context.Context) error { func (client *stateSyncerClient) syncStateTrie(ctx context.Context) error { log.Info("state sync: sync starting", "root", client.syncSummary.BlockRoot) evmSyncer, err := statesync.NewStateSyncer(&statesync.StateSyncerConfig{ - Client: client.client, + Client: client.Client, Root: client.syncSummary.BlockRoot, BatchSize: ethdb.IdealBatchSize, - DB: client.chaindb, + DB: client.ChaindDB, MaxOutstandingCodeHashes: statesync.DefaultMaxOutstandingCodeHashes, NumCodeFetchingWorkers: statesync.DefaultNumCodeFetchingWorkers, - RequestSize: client.stateSyncRequestSize, + RequestSize: client.StateSyncRequestSize, }) if err != nil { return err @@ -321,7 +335,7 @@ func (client *stateSyncerClient) Shutdown() error { // finishSync is responsible for updating disk and memory pointers so the VM is prepared // for bootstrapping. Executes any shared memory operations from the atomic trie to shared memory. func (client *stateSyncerClient) finishSync() error { - stateBlock, err := client.state.GetBlock(context.TODO(), ids.ID(client.syncSummary.BlockHash)) + stateBlock, err := client.State.GetBlock(context.TODO(), ids.ID(client.syncSummary.BlockHash)) if err != nil { return fmt.Errorf("could not get block by hash from client state: %s", client.syncSummary.BlockHash) } @@ -330,12 +344,14 @@ func (client *stateSyncerClient) finishSync() error { if !ok { return fmt.Errorf("could not convert block(%T) to *chain.BlockWrapper", wrapper) } - evmBlock, ok := wrapper.Block.(*Block) + wrappedBlock := wrapper.Block + + evmBlockGetter, ok := wrappedBlock.(EthBlockWrapper) if !ok { - return fmt.Errorf("could not convert block(%T) to evm.Block", stateBlock) + return fmt.Errorf("could not convert block(%T) to evm.EthBlockWrapper", stateBlock) } - block := evmBlock.ethBlock + block := evmBlockGetter.GetEthBlock() if block.Hash() != client.syncSummary.BlockHash { return fmt.Errorf("attempted to set last summary block to unexpected block hash: (%s != %s)", block.Hash(), client.syncSummary.BlockHash) @@ -354,9 +370,9 @@ func (client *stateSyncerClient) finishSync() error { // by [params.BloomBitsBlocks]. parentHeight := block.NumberU64() - 1 parentHash := block.ParentHash() - client.chain.BloomIndexer().AddCheckpoint(parentHeight/params.BloomBitsBlocks, parentHash) + client.Chain.BloomIndexer().AddCheckpoint(parentHeight/params.BloomBitsBlocks, parentHash) - if err := client.chain.BlockChain().ResetToStateSyncedBlock(block); err != nil { + if err := client.Chain.BlockChain().ResetToStateSyncedBlock(block); err != nil { return err } @@ -364,7 +380,7 @@ func (client *stateSyncerClient) finishSync() error { return fmt.Errorf("error updating vm markers, height=%d, hash=%s, err=%w", block.NumberU64(), block.Hash(), err) } - if err := client.state.SetLastAcceptedBlock(evmBlock); err != nil { + if err := client.State.SetLastAcceptedBlock(wrappedBlock); err != nil { return err } @@ -374,7 +390,7 @@ func (client *stateSyncerClient) finishSync() error { // ApplyToSharedMemory does this, and even if the VM is stopped // (gracefully or ungracefully), since MarkApplyToSharedMemoryCursor // is called, VM will resume ApplyToSharedMemory on Initialize. - return client.atomicBackend.ApplyToSharedMemory(block.NumberU64()) + return client.AtomicBackend.ApplyToSharedMemory(block.NumberU64()) } // updateVMMarkers updates the following markers in the VM's database @@ -387,17 +403,21 @@ func (client *stateSyncerClient) updateVMMarkers() error { // Mark the previously last accepted block for the shared memory cursor, so that we will execute shared // memory operations from the previously last accepted block to [vm.syncSummary] when ApplyToSharedMemory // is called. - if err := client.atomicBackend.MarkApplyToSharedMemoryCursor(client.lastAcceptedHeight); err != nil { + if err := client.AtomicBackend.MarkApplyToSharedMemoryCursor(client.LastAcceptedHeight); err != nil { + return err + } + client.AtomicBackend.SetLastAccepted(client.syncSummary.BlockHash) + id, err := ids.ToID(client.syncSummary.BlockHash.Bytes()) + if err != nil { return err } - client.atomicBackend.SetLastAccepted(client.syncSummary.BlockHash) - if err := client.acceptedBlockDB.Put(lastAcceptedKey, client.syncSummary.BlockHash[:]); err != nil { + if err := client.Acceptor.PutLastAcceptedID(id); err != nil { return err } if err := client.metadataDB.Delete(stateSyncSummaryKey); err != nil { return err } - return client.db.Commit() + return client.DB.Commit() } // Error returns a non-nil error if one occurred during the sync. diff --git a/plugin/evm/syncervm_server.go b/plugin/evm/syncervm_server.go index 3bf051bf87..f434f4ae7d 100644 --- a/plugin/evm/syncervm_server.go +++ b/plugin/evm/syncervm_server.go @@ -1,6 +1,6 @@ // (c) 2021-2022, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. - +// TODO: move to separate package package evm import ( @@ -11,14 +11,15 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) -type stateSyncServerConfig struct { +type StateSyncServerConfig struct { Chain *core.BlockChain - AtomicTrie AtomicTrie + AtomicTrie atomic.AtomicTrie // SyncableInterval is the interval at which blocks are eligible to provide syncable block summaries. SyncableInterval uint64 @@ -26,7 +27,7 @@ type stateSyncServerConfig struct { type stateSyncServer struct { chain *core.BlockChain - atomicTrie AtomicTrie + atomicTrie atomic.AtomicTrie syncableInterval uint64 } @@ -36,7 +37,7 @@ type StateSyncServer interface { GetStateSummary(context.Context, uint64) (block.StateSummary, error) } -func NewStateSyncServer(config *stateSyncServerConfig) StateSyncServer { +func NewStateSyncServer(config *StateSyncServerConfig) StateSyncServer { return &stateSyncServer{ chain: config.Chain, atomicTrie: config.AtomicTrie, diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index bd7f993cb5..10f55031ba 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -16,7 +16,7 @@ import ( "github.com/stretchr/testify/require" avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" - "github.com/ava-labs/avalanchego/database" + avalanchedatabase "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -36,6 +36,7 @@ import ( "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/database" "github.com/ava-labs/coreth/predicate" statesyncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/statesync" @@ -54,7 +55,7 @@ func TestSkipStateSync(t *testing.T) { stateSyncMinBlocks: 300, // must be greater than [syncableInterval] to skip sync syncMode: block.StateSyncSkipped, } - vmSetup := createSyncServerAndClientVMs(t, test, parentsToGet) + vmSetup := createSyncServerAndClientVMs(t, test, StateSyncParentsToFetch) testSyncerVM(t, vmSetup, test) } @@ -66,14 +67,14 @@ func TestStateSyncFromScratch(t *testing.T) { stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync syncMode: block.StateSyncStatic, } - vmSetup := createSyncServerAndClientVMs(t, test, parentsToGet) + vmSetup := createSyncServerAndClientVMs(t, test, StateSyncParentsToFetch) testSyncerVM(t, vmSetup, test) } func TestStateSyncFromScratchExceedParent(t *testing.T) { rand.Seed(1) - numToGen := parentsToGet + uint64(32) + numToGen := StateSyncParentsToFetch + uint64(32) test := syncTest{ syncableInterval: numToGen, stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync @@ -120,7 +121,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { }, expectedErr: context.Canceled, } - vmSetup := createSyncServerAndClientVMs(t, test, parentsToGet) + vmSetup := createSyncServerAndClientVMs(t, test, StateSyncParentsToFetch) // Perform sync resulting in early termination. testSyncerVM(t, vmSetup, test) @@ -271,7 +272,7 @@ func TestVMShutdownWhileSyncing(t *testing.T) { }, expectedErr: context.Canceled, } - vmSetup = createSyncServerAndClientVMs(t, test, parentsToGet) + vmSetup = createSyncServerAndClientVMs(t, test, StateSyncParentsToFetch) // Perform sync resulting in early termination. testSyncerVM(t, vmSetup, test) } @@ -284,8 +285,13 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s testShortIDAddrs[0]: importAmount, } ) + + // override serverAtomicTrie's commitInterval so the call to [serverAtomicTrie.Index] + // creates a commit at the height [syncableInterval]. This is necessary to support + // fetching a state summary. + config := fmt.Sprintf(`{"commit-interval": %d}`, test.syncableInterval) _, serverVM, _, serverAtomicMemory, serverAppSender := GenesisVMWithUTXOs( - t, true, "", "", "", alloc, + t, true, "", config, "", alloc, ) t.Cleanup(func() { log.Info("Shutting down server VM") @@ -328,17 +334,17 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s } }, nil) - // override serverAtomicTrie's commitInterval so the call to [serverAtomicTrie.Index] - // creates a commit at the height [syncableInterval]. This is necessary to support - // fetching a state summary. - serverAtomicTrie := serverVM.atomicTrie.(*atomicTrie) - serverAtomicTrie.commitInterval = test.syncableInterval - require.NoError(serverAtomicTrie.commit(test.syncableInterval, serverAtomicTrie.LastAcceptedRoot())) + serverAtomicTrie := serverVM.atomicTrie + require.True(serverAtomicTrie.AcceptTrie(test.syncableInterval, serverAtomicTrie.LastAcceptedRoot())) require.NoError(serverVM.db.Commit()) - serverSharedMemories := newSharedMemories(serverAtomicMemory, serverVM.ctx.ChainID, serverVM.ctx.XChainID) - serverSharedMemories.assertOpsApplied(t, mustAtomicOps(importTx)) - serverSharedMemories.assertOpsApplied(t, mustAtomicOps(exportTx)) + serverSharedMemories := atomic.NewSharedMemories(serverAtomicMemory, serverVM.ctx.ChainID, serverVM.ctx.XChainID) + importOps, err := atomic.ConvertToAtomicOps(importTx) + require.NoError(err) + exportOps, err := atomic.ConvertToAtomicOps(exportTx) + require.NoError(err) + serverSharedMemories.AssertOpsApplied(t, importOps) + serverSharedMemories.AssertOpsApplied(t, exportOps) // make some accounts trieDB := triedb.NewDatabase(serverVM.chaindb, nil) @@ -359,7 +365,8 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s serverVM.StateSyncServer.(*stateSyncServer).syncableInterval = test.syncableInterval // initialise [syncerVM] with blank genesis state - stateSyncEnabledJSON := fmt.Sprintf(`{"state-sync-enabled":true, "state-sync-min-blocks": %d, "tx-lookup-limit": %d}`, test.stateSyncMinBlocks, 4) + // we also override [syncerVM]'s commit interval so the atomic trie works correctly. + stateSyncEnabledJSON := fmt.Sprintf(`{"state-sync-enabled":true, "state-sync-min-blocks": %d, "tx-lookup-limit": %d, "commit-interval": %d}`, test.stateSyncMinBlocks, 4, test.syncableInterval) syncerEngineChan, syncerVM, syncerDB, syncerAtomicMemory, syncerAppSender := GenesisVMWithUTXOs( t, false, "", stateSyncEnabledJSON, "", alloc, ) @@ -372,9 +379,6 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s require.NoError(err) require.True(enabled) - // override [syncerVM]'s commit interval so the atomic trie works correctly. - syncerVM.atomicTrie.(*atomicTrie).commitInterval = test.syncableInterval - // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] serverAppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { if test.responseIntercept == nil { @@ -430,7 +434,7 @@ type syncVMSetup struct { fundedAccounts map[*keystore.Key]*types.StateAccount syncerVM *VM - syncerDB database.Database + syncerDB avalanchedatabase.Database syncerEngineChan <-chan commonEng.Message syncerAtomicMemory *avalancheatomic.Memory shutdownOnceSyncerVM *shutdownOnceVM @@ -491,7 +495,7 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { if test.expectedErr != nil { require.ErrorIs(err, test.expectedErr) // Note we re-open the database here to avoid a closed error when the test is for a shutdown VM. - chaindb := Database{prefixdb.NewNested(ethDBPrefix, syncerVM.db)} + chaindb := database.WrapDatabase(prefixdb.NewNested(ethDBPrefix, syncerVM.db)) assertSyncPerformedHeights(t, chaindb, map[uint64]struct{}{}) return } @@ -558,10 +562,12 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { require.True(syncerVM.bootstrapped.Get()) // check atomic memory was synced properly - syncerSharedMemories := newSharedMemories(syncerAtomicMemory, syncerVM.ctx.ChainID, syncerVM.ctx.XChainID) + syncerSharedMemories := atomic.NewSharedMemories(syncerAtomicMemory, syncerVM.ctx.ChainID, syncerVM.ctx.XChainID) for _, tx := range includedAtomicTxs { - syncerSharedMemories.assertOpsApplied(t, mustAtomicOps(tx)) + ops, err := atomic.ConvertToAtomicOps(tx) + require.NoError(err) + syncerSharedMemories.AssertOpsApplied(t, ops) } // Generate blocks after we have entered normal consensus as well diff --git a/plugin/evm/tx_heap.go b/plugin/evm/tx_heap.go deleted file mode 100644 index c6562fd9b0..0000000000 --- a/plugin/evm/tx_heap.go +++ /dev/null @@ -1,164 +0,0 @@ -// (c) 2020-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "container/heap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/plugin/evm/atomic" -) - -// txEntry is used to track the [gasPrice] transactions pay to be included in -// the mempool. -type txEntry struct { - id ids.ID - gasPrice uint64 - tx *atomic.Tx - index int -} - -// internalTxHeap is used to track pending atomic transactions by [gasPrice] -type internalTxHeap struct { - isMinHeap bool - items []*txEntry - lookup map[ids.ID]*txEntry -} - -func newInternalTxHeap(items int, isMinHeap bool) *internalTxHeap { - return &internalTxHeap{ - isMinHeap: isMinHeap, - items: make([]*txEntry, 0, items), - lookup: map[ids.ID]*txEntry{}, - } -} - -func (th internalTxHeap) Len() int { return len(th.items) } - -func (th internalTxHeap) Less(i, j int) bool { - if th.isMinHeap { - return th.items[i].gasPrice < th.items[j].gasPrice - } - return th.items[i].gasPrice > th.items[j].gasPrice -} - -func (th internalTxHeap) Swap(i, j int) { - th.items[i], th.items[j] = th.items[j], th.items[i] - th.items[i].index = i - th.items[j].index = j -} - -func (th *internalTxHeap) Push(x interface{}) { - entry := x.(*txEntry) - if th.Has(entry.id) { - return - } - th.items = append(th.items, entry) - th.lookup[entry.id] = entry -} - -func (th *internalTxHeap) Pop() interface{} { - n := len(th.items) - item := th.items[n-1] - th.items[n-1] = nil // avoid memory leak - th.items = th.items[0 : n-1] - delete(th.lookup, item.id) - return item -} - -func (th *internalTxHeap) Get(id ids.ID) (*txEntry, bool) { - entry, ok := th.lookup[id] - if !ok { - return nil, false - } - return entry, true -} - -func (th *internalTxHeap) Has(id ids.ID) bool { - _, has := th.Get(id) - return has -} - -type txHeap struct { - maxHeap *internalTxHeap - minHeap *internalTxHeap -} - -func newTxHeap(maxSize int) *txHeap { - return &txHeap{ - maxHeap: newInternalTxHeap(maxSize, false), - minHeap: newInternalTxHeap(maxSize, true), - } -} - -func (th *txHeap) Push(tx *atomic.Tx, gasPrice uint64) { - txID := tx.ID() - oldLen := th.Len() - heap.Push(th.maxHeap, &txEntry{ - id: txID, - gasPrice: gasPrice, - tx: tx, - index: oldLen, - }) - heap.Push(th.minHeap, &txEntry{ - id: txID, - gasPrice: gasPrice, - tx: tx, - index: oldLen, - }) -} - -// Assumes there is non-zero items in [txHeap] -func (th *txHeap) PeekMax() (*atomic.Tx, uint64) { - txEntry := th.maxHeap.items[0] - return txEntry.tx, txEntry.gasPrice -} - -// Assumes there is non-zero items in [txHeap] -func (th *txHeap) PeekMin() (*atomic.Tx, uint64) { - txEntry := th.minHeap.items[0] - return txEntry.tx, txEntry.gasPrice -} - -// Assumes there is non-zero items in [txHeap] -func (th *txHeap) PopMax() *atomic.Tx { - return th.Remove(th.maxHeap.items[0].id) -} - -// Assumes there is non-zero items in [txHeap] -func (th *txHeap) PopMin() *atomic.Tx { - return th.Remove(th.minHeap.items[0].id) -} - -func (th *txHeap) Remove(id ids.ID) *atomic.Tx { - maxEntry, ok := th.maxHeap.Get(id) - if !ok { - return nil - } - heap.Remove(th.maxHeap, maxEntry.index) - - minEntry, ok := th.minHeap.Get(id) - if !ok { - // This should never happen, as that would mean the heaps are out of - // sync. - return nil - } - return heap.Remove(th.minHeap, minEntry.index).(*txEntry).tx -} - -func (th *txHeap) Len() int { - return th.maxHeap.Len() -} - -func (th *txHeap) Get(id ids.ID) (*atomic.Tx, bool) { - txEntry, ok := th.maxHeap.Get(id) - if !ok { - return nil, false - } - return txEntry.tx, true -} - -func (th *txHeap) Has(id ids.ID) bool { - return th.maxHeap.Has(id) -} diff --git a/plugin/evm/tx_heap_test.go b/plugin/evm/tx_heap_test.go deleted file mode 100644 index a054b7362e..0000000000 --- a/plugin/evm/tx_heap_test.go +++ /dev/null @@ -1,143 +0,0 @@ -// (c) 2019-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "testing" - - "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/stretchr/testify/assert" -) - -func TestTxHeap(t *testing.T) { - var ( - tx0 = &atomic.Tx{ - UnsignedAtomicTx: &atomic.UnsignedImportTx{ - NetworkID: 0, - }, - } - tx0Bytes = []byte{0} - - tx1 = &atomic.Tx{ - UnsignedAtomicTx: &atomic.UnsignedImportTx{ - NetworkID: 1, - }, - } - tx1Bytes = []byte{1} - - tx2 = &atomic.Tx{ - UnsignedAtomicTx: &atomic.UnsignedImportTx{ - NetworkID: 2, - }, - } - tx2Bytes = []byte{2} - ) - tx0.Initialize(tx0Bytes, tx0Bytes) - tx1.Initialize(tx1Bytes, tx1Bytes) - tx2.Initialize(tx2Bytes, tx2Bytes) - - id0 := tx0.ID() - id1 := tx1.ID() - id2 := tx2.ID() - - t.Run("add/remove single entry", func(t *testing.T) { - h := newTxHeap(3) - assert.Zero(t, h.Len()) - - assert := assert.New(t) - h.Push(tx0, 5) - assert.True(h.Has(id0)) - gTx0, gHas0 := h.Get(id0) - assert.Equal(tx0, gTx0) - assert.True(gHas0) - h.Remove(id0) - assert.False(h.Has(id0)) - assert.Zero(h.Len()) - h.Push(tx0, 5) - assert.True(h.Has(id0)) - assert.Equal(1, h.Len()) - }) - - t.Run("add other items", func(t *testing.T) { - h := newTxHeap(3) - assert.Zero(t, h.Len()) - - assert := assert.New(t) - h.Push(tx1, 10) - assert.True(h.Has(id1)) - gTx1, gHas1 := h.Get(id1) - assert.Equal(tx1, gTx1) - assert.True(gHas1) - - h.Push(tx2, 2) - assert.True(h.Has(id2)) - gTx2, gHas2 := h.Get(id2) - assert.Equal(tx2, gTx2) - assert.True(gHas2) - - assert.Equal(id1, h.PopMax().ID()) - assert.Equal(id2, h.PopMax().ID()) - - assert.False(h.Has(id0)) - gTx0, gHas0 := h.Get(id0) - assert.Nil(gTx0) - assert.False(gHas0) - - assert.False(h.Has(id1)) - gTx1, gHas1 = h.Get(id1) - assert.Nil(gTx1) - assert.False(gHas1) - - assert.False(h.Has(id2)) - gTx2, gHas2 = h.Get(id2) - assert.Nil(gTx2) - assert.False(gHas2) - }) - - verifyRemovalOrder := func(t *testing.T, h *txHeap) { - t.Helper() - - assert := assert.New(t) - assert.Equal(id2, h.PopMin().ID()) - assert.True(h.Has(id0)) - assert.True(h.Has(id1)) - assert.False(h.Has(id2)) - assert.Equal(id0, h.PopMin().ID()) - assert.False(h.Has(id0)) - assert.True(h.Has(id1)) - assert.False(h.Has(id2)) - assert.Equal(id1, h.PopMin().ID()) - assert.False(h.Has(id0)) - assert.False(h.Has(id1)) - assert.False(h.Has(id2)) - } - - t.Run("drop", func(t *testing.T) { - h := newTxHeap(3) - assert.Zero(t, h.Len()) - - h.Push(tx0, 5) - h.Push(tx1, 10) - h.Push(tx2, 2) - verifyRemovalOrder(t, h) - }) - t.Run("drop (alt order)", func(t *testing.T) { - h := newTxHeap(3) - assert.Zero(t, h.Len()) - - h.Push(tx0, 5) - h.Push(tx2, 2) - h.Push(tx1, 10) - verifyRemovalOrder(t, h) - }) - t.Run("drop (alt order 2)", func(t *testing.T) { - h := newTxHeap(3) - assert.Zero(t, h.Len()) - - h.Push(tx2, 2) - h.Push(tx0, 5) - h.Push(tx1, 10) - verifyRemovalOrder(t, h) - }) -} diff --git a/plugin/evm/tx_test.go b/plugin/evm/tx_test.go index a710a3c9e1..9bef967e68 100644 --- a/plugin/evm/tx_test.go +++ b/plugin/evm/tx_test.go @@ -116,7 +116,7 @@ func executeTxTest(t *testing.T, test atomicTxTest) { } lastAcceptedBlock := vm.LastAcceptedBlockInternal().(*Block) - backend := &atomic.Backend{ + backend := &atomic.VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, Rules: rules, diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 6900c26ea0..e2e53ca124 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -42,6 +42,7 @@ import ( "github.com/ava-labs/coreth/peer" "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/config" + "github.com/ava-labs/coreth/plugin/evm/database" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/triedb" "github.com/ava-labs/coreth/triedb/hashdb" @@ -75,7 +76,7 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/database" + avalanchedatabase "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" @@ -106,6 +107,7 @@ var ( _ block.StateSyncableVM = &VM{} _ statesyncclient.EthBlockParser = &VM{} _ secp256k1fx.VM = &VM{} + _ BlockAcceptor = &VM{} ) const ( @@ -152,13 +154,8 @@ var ( // Set last accepted key to be longer than the keys used to store accepted block IDs. lastAcceptedKey = []byte("last_accepted_key") acceptedPrefix = []byte("snowman_accepted") - metadataPrefix = []byte("metadata") warpPrefix = []byte("warp") ethDBPrefix = []byte("ethdb") - - // Prefixes for atomic trie - atomicTrieDBPrefix = []byte("atomicTrieDB") - atomicTrieMetaDBPrefix = []byte("atomicTrieMetaDB") ) var ( @@ -228,19 +225,16 @@ type VM struct { // [db] is the VM's current database managed by ChainState db *versiondb.Database - // metadataDB is used to store one off keys. - metadataDB database.Database - // [chaindb] is the database supplied to the Ethereum backend chaindb ethdb.Database // [acceptedBlockDB] is the database to store the last accepted // block. - acceptedBlockDB database.Database + acceptedBlockDB avalanchedatabase.Database // [warpDB] is used to store warp message signatures // set to a prefixDB with the prefix [warpPrefix] - warpDB database.Database + warpDB avalanchedatabase.Database toEngine chan<- commonEng.Message @@ -249,11 +243,11 @@ type VM struct { // [atomicTxRepository] maintains two indexes on accepted atomic txs. // - txID to accepted atomic tx // - block height to list of atomic txs accepted on block at that height - atomicTxRepository AtomicTxRepository + atomicTxRepository atomic.AtomicTxRepository // [atomicTrie] maintains a merkle forest of [height]=>[atomic txs]. - atomicTrie AtomicTrie + atomicTrie atomic.AtomicTrie // [atomicBackend] abstracts verification and processing of atomic transactions - atomicBackend AtomicBackend + atomicBackend atomic.AtomicBackend builder *blockBuilder @@ -329,7 +323,7 @@ func (vm *VM) GetActivationTime() time.Time { func (vm *VM) Initialize( _ context.Context, chainCtx *snow.Context, - db database.Database, + db avalanchedatabase.Database, genesisBytes []byte, upgradeBytes []byte, configBytes []byte, @@ -560,7 +554,7 @@ func (vm *VM) Initialize( // clear warpdb on initialization if config enabled if vm.config.PruneWarpDB { - if err := database.Clear(vm.warpDB, ethdb.IdealBatchSize); err != nil { + if err := avalanchedatabase.Clear(vm.warpDB, ethdb.IdealBatchSize); err != nil { return fmt.Errorf("failed to prune warpDB: %w", err) } } @@ -592,11 +586,11 @@ func (vm *VM) Initialize( } // initialize atomic repository - vm.atomicTxRepository, err = NewAtomicTxRepository(vm.db, atomic.Codec, lastAcceptedHeight) + vm.atomicTxRepository, err = atomic.NewAtomicTxRepository(vm.db, atomic.Codec, lastAcceptedHeight) if err != nil { return fmt.Errorf("failed to create atomic repository: %w", err) } - vm.atomicBackend, err = NewAtomicBackend( + vm.atomicBackend, err = atomic.NewAtomicBackend( vm.db, vm.ctx.SharedMemory, bonusBlockHeights, vm.atomicTxRepository, lastAcceptedHeight, lastAcceptedHash, vm.config.CommitInterval, @@ -623,7 +617,7 @@ func (vm *VM) Initialize( vm.setAppRequestHandlers() - vm.StateSyncServer = NewStateSyncServer(&stateSyncServerConfig{ + vm.StateSyncServer = NewStateSyncServer(&StateSyncServerConfig{ Chain: vm.blockChain, AtomicTrie: vm.atomicTrie, SyncableInterval: vm.config.StateSyncCommitInterval, @@ -703,10 +697,10 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { } } - vm.StateSyncClient = NewStateSyncClient(&stateSyncClientConfig{ - chain: vm.eth, - state: vm.State, - client: statesyncclient.NewClient( + vm.StateSyncClient = NewStateSyncClient(&StateSyncClientConfig{ + Chain: vm.eth, + State: vm.State, + Client: statesyncclient.NewClient( &statesyncclient.ClientConfig{ NetworkClient: vm.client, Codec: vm.networkCodec, @@ -715,17 +709,16 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { BlockParser: vm, }, ), - enabled: stateSyncEnabled, - skipResume: vm.config.StateSyncSkipResume, - stateSyncMinBlocks: vm.config.StateSyncMinBlocks, - stateSyncRequestSize: vm.config.StateSyncRequestSize, - lastAcceptedHeight: lastAcceptedHeight, // TODO clean up how this is passed around - chaindb: vm.chaindb, - metadataDB: vm.metadataDB, - acceptedBlockDB: vm.acceptedBlockDB, - db: vm.db, - atomicBackend: vm.atomicBackend, - toEngine: vm.toEngine, + Enabled: stateSyncEnabled, + SkipResume: vm.config.StateSyncSkipResume, + StateSyncMinBlocks: vm.config.StateSyncMinBlocks, + StateSyncRequestSize: vm.config.StateSyncRequestSize, + LastAcceptedHeight: lastAcceptedHeight, // TODO clean up how this is passed around + ChaindDB: vm.chaindb, + DB: vm.db, + AtomicBackend: vm.atomicBackend, + ToEngine: vm.toEngine, + Acceptor: vm, }) // If StateSync is disabled, clear any ongoing summary so that we will not attempt to resume @@ -1347,10 +1340,10 @@ func (vm *VM) ParseEthBlock(b []byte) (*types.Block, error) { // by ChainState. func (vm *VM) getBlock(_ context.Context, id ids.ID) (snowman.Block, error) { ethBlock := vm.blockChain.GetBlockByHash(common.Hash(id)) - // If [ethBlock] is nil, return [database.ErrNotFound] here + // If [ethBlock] is nil, return [avalanchedatabase.ErrNotFound] here // so that the miss is considered cacheable. if ethBlock == nil { - return nil, database.ErrNotFound + return nil, avalanchedatabase.ErrNotFound } // Note: the status of block is set by ChainState return vm.newBlock(ethBlock) @@ -1372,7 +1365,7 @@ func (vm *VM) GetAcceptedBlock(ctx context.Context, blkID ids.ID) (snowman.Block if acceptedBlkID != blkID { // The provided block is not accepted. - return nil, database.ErrNotFound + return nil, avalanchedatabase.ErrNotFound } return blk, nil } @@ -1398,17 +1391,17 @@ func (vm *VM) VerifyHeightIndex(context.Context) error { // GetBlockIDAtHeight returns the canonical block at [height]. // Note: the engine assumes that if a block is not found at [height], then -// [database.ErrNotFound] will be returned. This indicates that the VM has state +// [avalanchedatabase.ErrNotFound] will be returned. This indicates that the VM has state // synced and does not have all historical blocks available. func (vm *VM) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.ID, error) { lastAcceptedBlock := vm.LastAcceptedBlock() if lastAcceptedBlock.Height() < height { - return ids.ID{}, database.ErrNotFound + return ids.ID{}, avalanchedatabase.ErrNotFound } hash := vm.blockChain.GetCanonicalHash(height) if hash == (common.Hash{}) { - return ids.ID{}, database.ErrNotFound + return ids.ID{}, avalanchedatabase.ErrNotFound } return ids.ID(hash), nil } @@ -1485,14 +1478,13 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { } // initializeDBs initializes the databases used by the VM. -// coreth always uses the avalanchego provided database. -func (vm *VM) initializeDBs(db database.Database) error { +// coreth always uses the avalanchego provided avalanchedatabase. +func (vm *VM) initializeDBs(db avalanchedatabase.Database) error { // Use NewNested rather than New so that the structure of the database // remains the same regardless of the provided baseDB type. - vm.chaindb = rawdb.NewDatabase(Database{prefixdb.NewNested(ethDBPrefix, db)}) + vm.chaindb = rawdb.NewDatabase(database.WrapDatabase(prefixdb.NewNested(ethDBPrefix, db))) vm.db = versiondb.New(db) vm.acceptedBlockDB = prefixdb.New(acceptedPrefix, vm.db) - vm.metadataDB = prefixdb.New(metadataPrefix, vm.db) // Note warpDB is not part of versiondb because it is not necessary // that warp signatures are committed to the database atomically with // the last accepted block. @@ -1527,7 +1519,7 @@ func (vm *VM) CreateStaticHandlers(context.Context) (map[string]http.Handler, er func (vm *VM) getAtomicTx(txID ids.ID) (*atomic.Tx, atomic.Status, uint64, error) { if tx, height, err := vm.atomicTxRepository.GetByTxID(txID); err == nil { return tx, atomic.Accepted, height, nil - } else if err != database.ErrNotFound { + } else if err != avalanchedatabase.ErrNotFound { return nil, atomic.Unknown, 0, err } tx, dropped, found := vm.mempool.GetTx(txID) @@ -1618,7 +1610,7 @@ func (vm *VM) verifyTx(tx *atomic.Tx, parentHash common.Hash, baseFee *big.Int, if !ok { return fmt.Errorf("parent block %s had unexpected type %T", parentIntf.ID(), parentIntf) } - atomicBackend := &atomic.Backend{ + atomicBackend := &atomic.VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, Rules: rules, @@ -1657,7 +1649,7 @@ func (vm *VM) verifyTxs(txs []*atomic.Tx, parentHash common.Hash, baseFee *big.I // Ensure each tx in [txs] doesn't conflict with any other atomic tx in // a processing ancestor block. inputs := set.Set[ids.ID]{} - atomicBackend := &atomic.Backend{ + atomicBackend := &atomic.VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, Rules: rules, @@ -1773,7 +1765,7 @@ func (vm *VM) readLastAccepted() (common.Hash, uint64, error) { // initialize state with the genesis block. lastAcceptedBytes, lastAcceptedErr := vm.acceptedBlockDB.Get(lastAcceptedKey) switch { - case lastAcceptedErr == database.ErrNotFound: + case lastAcceptedErr == avalanchedatabase.ErrNotFound: // If there is nothing in the database, return the genesis block hash and height return vm.genesisHash, 0, nil case lastAcceptedErr != nil: @@ -1888,3 +1880,7 @@ func (vm *VM) newExportTx( return tx, nil } + +func (vm *VM) PutLastAcceptedID(ID ids.ID) error { + return vm.acceptedBlockDB.Put(lastAcceptedKey, ID[:]) +} diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 56b56cf93c..3a2c3a0de9 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -39,10 +39,8 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/validators/validatorstest" "github.com/ava-labs/avalanchego/upgrade" "github.com/ava-labs/avalanchego/utils/cb58" - "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/hashing" @@ -56,7 +54,6 @@ import ( commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/enginetest" - constantsEng "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/core" @@ -65,19 +62,15 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" - avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" accountKeystore "github.com/ava-labs/coreth/accounts/keystore" ) var ( testNetworkID uint32 = 10 - testCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} - testXChainID = ids.ID{'t', 'e', 's', 't', 'x'} nonExistentID = ids.ID{'F'} testKeys []*secp256k1.PrivateKey testEthAddrs []common.Address // testEthAddrs[i] corresponds to testKeys[i] testShortIDAddrs []ids.ShortID - testAvaxAssetID = ids.ID{1, 2, 3} username = "Johns" password = "CjasdjhiPeirbSenfeI13" // #nosec G101 @@ -198,41 +191,6 @@ func BuildGenesisTest(t *testing.T, genesisJSON string) []byte { return genesisBytes } -func NewContext() *snow.Context { - ctx := utils.TestSnowContext() - ctx.NodeID = ids.GenerateTestNodeID() - ctx.NetworkID = testNetworkID - ctx.ChainID = testCChainID - ctx.AVAXAssetID = testAvaxAssetID - ctx.XChainID = testXChainID - ctx.SharedMemory = testSharedMemory() - aliaser := ctx.BCLookup.(ids.Aliaser) - _ = aliaser.Alias(testCChainID, "C") - _ = aliaser.Alias(testCChainID, testCChainID.String()) - _ = aliaser.Alias(testXChainID, "X") - _ = aliaser.Alias(testXChainID, testXChainID.String()) - ctx.ValidatorState = &validatorstest.State{ - GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { - subnetID, ok := map[ids.ID]ids.ID{ - constantsEng.PlatformChainID: constantsEng.PrimaryNetworkID, - testXChainID: constantsEng.PrimaryNetworkID, - testCChainID: constantsEng.PrimaryNetworkID, - }[chainID] - if !ok { - return ids.Empty, errors.New("unknown chain") - } - return subnetID, nil - }, - } - blsSecretKey, err := bls.NewSigner() - if err != nil { - panic(err) - } - ctx.WarpSigner = avalancheWarp.NewSigner(blsSecretKey, ctx.NetworkID, ctx.ChainID) - ctx.PublicKey = blsSecretKey.PublicKey() - return ctx -} - // setupGenesis sets up the genesis // If [genesisJSON] is empty, defaults to using [genesisJSONLatest] func setupGenesis( @@ -248,7 +206,7 @@ func setupGenesis( genesisJSON = genesisJSONLatest } genesisBytes := BuildGenesisTest(t, genesisJSON) - ctx := NewContext() + ctx := utils.TestSnowContext() baseDB := memdb.New() @@ -807,7 +765,7 @@ func TestBuildEthTxBlock(t *testing.T) { restartedVM := &VM{} if err := restartedVM.Initialize( context.Background(), - NewContext(), + utils.TestSnowContext(), dbManager, []byte(genesisJSONApricotPhase2), []byte(""), @@ -1509,6 +1467,16 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { } } +type wrappedBackend struct { + atomic.AtomicBackend + registeredBonusBlocks map[uint64]common.Hash +} + +func (w *wrappedBackend) IsBonus(blockHeight uint64, blockHash common.Hash) bool { + hash, ok := w.registeredBonusBlocks[blockHeight] + return ok && blockHash.Cmp(hash) == 0 +} + func TestBonusBlocksTxs(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase0, "", "") @@ -1566,7 +1534,11 @@ func TestBonusBlocksTxs(t *testing.T) { } // Make [blk] a bonus block. - vm.atomicBackend.(*atomicBackend).bonusBlocks = map[uint64]ids.ID{blk.Height(): blk.ID()} + wrappedBackend := &wrappedBackend{ + AtomicBackend: vm.atomicBackend, + registeredBonusBlocks: map[uint64]common.Hash{blk.Height(): common.Hash(blk.ID())}, + } + vm.atomicBackend = wrappedBackend // Remove the UTXOs from shared memory, so that non-bonus blocks will fail verification if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.XChainID: {RemoveRequests: [][]byte{inputID[:]}}}); err != nil { diff --git a/utils/snow.go b/utils/snow.go index 36b9b7b7fb..b8557e249b 100644 --- a/utils/snow.go +++ b/utils/snow.go @@ -8,6 +8,8 @@ import ( "errors" "github.com/ava-labs/avalanchego/api/metrics" + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" @@ -19,9 +21,9 @@ import ( ) var ( - testCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} - testXChainID = ids.ID{'t', 'e', 's', 't', 'x'} - testChainID = ids.ID{'t', 'e', 's', 't', 'c', 'h', 'a', 'i', 'n'} + testCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} + testXChainID = ids.ID{'t', 'e', 's', 't', 'x'} + TestAvaxAssetID = ids.ID{1, 2, 3} ) func TestSnowContext() *snow.Context { @@ -31,19 +33,30 @@ func TestSnowContext() *snow.Context { } pk := sk.PublicKey() networkID := constants.UnitTestID - chainID := testChainID + chainID := testCChainID + + aliaser := ids.NewAliaser() + _ = aliaser.Alias(testCChainID, "C") + _ = aliaser.Alias(testCChainID, testCChainID.String()) + _ = aliaser.Alias(testXChainID, "X") + _ = aliaser.Alias(testXChainID, testXChainID.String()) + + m := atomic.NewMemory(memdb.New()) + sm := m.NewSharedMemory(testCChainID) ctx := &snow.Context{ NetworkID: networkID, SubnetID: ids.Empty, ChainID: chainID, + AVAXAssetID: TestAvaxAssetID, NodeID: ids.GenerateTestNodeID(), + SharedMemory: sm, XChainID: testXChainID, CChainID: testCChainID, PublicKey: pk, WarpSigner: warp.NewSigner(sk, networkID, chainID), Log: logging.NoLog{}, - BCLookup: ids.NewAliaser(), + BCLookup: aliaser, Metrics: metrics.NewPrefixGatherer(), ChainDataDir: "", ValidatorState: NewTestValidatorState(),