diff --git a/accounts/abi/bind/util_test.go b/accounts/abi/bind/util_test.go
index a9038a0f7a..685b034d14 100644
--- a/accounts/abi/bind/util_test.go
+++ b/accounts/abi/bind/util_test.go
@@ -125,10 +125,15 @@ func TestWaitDeployedCornerCases(t *testing.T) {
// Create a transaction to an account.
code := "6060604052600a8060106000396000f360606040526008565b00"
tx := types.NewTransaction(0, common.HexToAddress("0x01"), big.NewInt(0), 3000000, gasPrice, common.FromHex(code))
- tx, _ = types.SignTx(tx, types.LatestSigner(params.TestChainConfig), testKey)
+ tx, err := types.SignTx(tx, types.LatestSignerForChainID(big.NewInt(1337)), testKey)
+ if err != nil {
+ t.Fatalf("Failed to sign transaction: %s", err)
+ }
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- backend.Client().SendTransaction(ctx, tx)
+ if err := backend.Client().SendTransaction(ctx, tx); err != nil {
+ t.Fatalf("Failed to send transaction: %s", err)
+ }
backend.Commit(true)
notContractCreation := errors.New("tx is not contract creation")
if _, err := bind.WaitDeployed(ctx, backend.Client(), tx); err.Error() != notContractCreation.Error() {
diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go
index dc00530e91..822ea8fe22 100644
--- a/consensus/dummy/consensus.go
+++ b/consensus/dummy/consensus.go
@@ -16,8 +16,8 @@ import (
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/common"
+ "github.com/ava-labs/libevm/trie"
)
var (
diff --git a/core/block_validator.go b/core/block_validator.go
index a75eeb01a1..2c0fdceaf3 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -34,7 +34,7 @@ import (
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/trie"
+ "github.com/ava-labs/libevm/trie"
)
// BlockValidator is responsible for validating block headers, uncles and
diff --git a/core/blockchain.go b/core/blockchain.go
index 30214c68ad..1f94fe845d 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -48,8 +48,6 @@ import (
"github.com/ava-labs/coreth/internal/version"
"github.com/ava-labs/coreth/metrics"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/hashdb"
"github.com/ava-labs/coreth/triedb/pathdb"
"github.com/ava-labs/libevm/common"
@@ -58,6 +56,8 @@ import (
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/event"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
)
var (
@@ -177,18 +177,19 @@ type CacheConfig struct {
// triedbConfig derives the configures for trie database.
func (c *CacheConfig) triedbConfig() *triedb.Config {
config := &triedb.Config{Preimages: c.Preimages}
- if c.StateScheme == rawdb.HashScheme {
- config.HashDB = &hashdb.Config{
+ if c.StateScheme == rawdb.HashScheme || c.StateScheme == "" {
+ config.DBOverride = hashdb.Config{
CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
StatsPrefix: trieCleanCacheStatsNamespace,
- }
+ ReferenceRoot: true, // Automatically reference root nodes when an update is made
+ }.BackendConstructor
}
if c.StateScheme == rawdb.PathScheme {
- config.PathDB = &pathdb.Config{
+ config.DBOverride = pathdb.Config{
StateHistory: c.StateHistory,
CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
DirtyCacheSize: c.TrieDirtyLimit * 1024 * 1024,
- }
+ }.BackendConstructor
}
return config
}
@@ -1125,8 +1126,8 @@ func (bc *BlockChain) newTip(block *types.Block) bool {
// canonical chain.
// writeBlockAndSetHead expects to be the last verification step during InsertBlock
// since it creates a reference that will only be cleaned up by Accept/Reject.
-func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB) error {
- if err := bc.writeBlockWithState(block, receipts, state); err != nil {
+func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, parentRoot common.Hash, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB) error {
+ if err := bc.writeBlockWithState(block, parentRoot, receipts, state); err != nil {
return err
}
@@ -1143,7 +1144,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
// writeBlockWithState writes the block and all associated state to the database,
// but it expects the chain mutex to be held.
-func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) error {
+func (bc *BlockChain) writeBlockWithState(block *types.Block, parentRoot common.Hash, receipts []*types.Receipt, state *state.StateDB) error {
// Irrelevant of the canonical status, write the block itself to the database.
//
// Note all the components of block(hash->number map, header, body, receipts)
@@ -1157,14 +1158,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
}
// Commit all cached state changes into underlying memory database.
- // If snapshots are enabled, call CommitWithSnaps to explicitly create a snapshot
- // diff layer for the block.
var err error
- if bc.snaps == nil {
- _, err = state.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), true)
- } else {
- _, err = state.CommitWithSnap(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.snaps, block.Hash(), block.ParentHash(), true)
- }
+ _, err = bc.commitWithSnap(block, parentRoot, state)
if err != nil {
return err
}
@@ -1367,7 +1362,7 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error {
// will be cleaned up in Accept/Reject so we need to ensure an error cannot occur
// later in verification, since that would cause the referenced root to never be dereferenced.
wstart := time.Now()
- if err := bc.writeBlockAndSetHead(block, receipts, logs, statedb); err != nil {
+ if err := bc.writeBlockAndSetHead(block, parent.Root, receipts, logs, statedb); err != nil {
return err
}
// Update the metrics touched during block commit
@@ -1667,7 +1662,7 @@ func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block)
if snap == nil {
return common.Hash{}, fmt.Errorf("failed to get snapshot for parent root: %s", parentRoot)
}
- statedb, err = state.NewWithSnapshot(parentRoot, bc.stateCache, snap)
+ statedb, err = state.New(parentRoot, bc.stateCache, bc.snaps)
}
if err != nil {
return common.Hash{}, fmt.Errorf("could not fetch state for (%s: %d): %v", parent.Hash().Hex(), parent.NumberU64(), err)
@@ -1692,12 +1687,28 @@ func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block)
log.Debug("Processed block", "block", current.Hash(), "number", current.NumberU64())
// Commit all cached state changes into underlying memory database.
- // If snapshots are enabled, call CommitWithSnaps to explicitly create a snapshot
- // diff layer for the block.
- if bc.snaps == nil {
- return statedb.Commit(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()), false)
+ return bc.commitWithSnap(current, parentRoot, statedb)
+}
+
+func (bc *BlockChain) commitWithSnap(
+ current *types.Block, parentRoot common.Hash, statedb *state.StateDB,
+) (common.Hash, error) {
+ // blockHashes must be passed through Commit since snapshots are based on the
+ // block hash.
+ blockHashes := snapshot.WithBlockHashes(current.Hash(), current.ParentHash())
+ root, err := statedb.Commit(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()), blockHashes)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ // Upstream does not perform a snapshot update if the root is the same as the
+ // parent root, however here the snapshots are based on the block hash, so
+ // this update is necessary. Note blockHashes are passed here as well.
+ if bc.snaps != nil && root == parentRoot {
+ if err := bc.snaps.Update(root, parentRoot, nil, nil, nil, blockHashes); err != nil {
+ return common.Hash{}, err
+ }
}
- return statedb.CommitWithSnap(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()), bc.snaps, current.Hash(), current.ParentHash(), false)
+ return root, nil
}
// initSnapshot instantiates a Snapshot instance and adds it to [bc]
@@ -1838,7 +1849,6 @@ func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error
// Flatten snapshot if initialized, holding a reference to the state root until the next block
// is processed.
if err := bc.flattenSnapshot(func() error {
- triedb.Reference(root, common.Hash{})
if previousRoot != (common.Hash{}) {
triedb.Dereference(previousRoot)
}
diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go
index 335ef2fcb8..96199417ee 100644
--- a/core/blockchain_reader.go
+++ b/core/blockchain_reader.go
@@ -33,10 +33,10 @@ import (
"github.com/ava-labs/coreth/core/state/snapshot"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/core/vm"
"github.com/ava-labs/libevm/event"
+ "github.com/ava-labs/libevm/triedb"
)
// CurrentHeader retrieves the current head header of the canonical chain. The
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
index e8c812f261..4f29d198ce 100644
--- a/core/blockchain_repair_test.go
+++ b/core/blockchain_repair_test.go
@@ -38,10 +38,10 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/core/vm"
"github.com/ava-labs/libevm/crypto"
+ "github.com/ava-labs/libevm/triedb"
"github.com/stretchr/testify/require"
)
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 52b6351922..9a2d833cd7 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -37,10 +37,10 @@ import (
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/core/vm"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/triedb"
"github.com/holiman/uint256"
)
@@ -298,7 +298,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
}
// Write state changes to db
- root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), false)
+ root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number))
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
}
diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go
index 7ff11def9d..06b482388d 100644
--- a/core/chain_makers_test.go
+++ b/core/chain_makers_test.go
@@ -34,10 +34,10 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/core/vm"
"github.com/ava-labs/libevm/crypto"
+ "github.com/ava-labs/libevm/triedb"
)
func ExampleGenerateChain() {
diff --git a/core/extstate/statedb.go b/core/extstate/statedb.go
index 86687acca8..1c81601064 100644
--- a/core/extstate/statedb.go
+++ b/core/extstate/statedb.go
@@ -15,8 +15,9 @@ import (
type VmStateDB interface {
vm.StateDB
+ Logs() []*types.Log
+
GetTxHash() common.Hash
- GetLogData() (topics [][]common.Hash, data [][]byte)
GetBalanceMultiCoin(common.Address, common.Hash) *big.Int
AddBalanceMultiCoin(common.Address, common.Hash, *big.Int)
SubBalanceMultiCoin(common.Address, common.Hash, *big.Int)
@@ -36,6 +37,16 @@ func (s *StateDB) Prepare(rules params.Rules, sender, coinbase common.Address, d
s.VmStateDB.Prepare(rules, sender, coinbase, dst, precompiles, list)
}
+// GetLogData returns the underlying topics and data from each log included in the StateDB
+// Test helper function.
+func (s *StateDB) GetLogData() (topics [][]common.Hash, data [][]byte) {
+ for _, log := range s.Logs() {
+ topics = append(topics, log.Topics)
+ data = append(data, common.CopyBytes(log.Data))
+ }
+ return topics, data
+}
+
// GetPredicateStorageSlots returns the storage slots associated with the address, index pair.
// A list of access tuples can be included within transaction types post EIP-2930. The address
// is declared directly on the access tuple and the index is the i'th occurrence of an access
diff --git a/core/gen_genesis.go b/core/gen_genesis.go
index 8d1aee7c0f..c3ffd954b3 100644
--- a/core/gen_genesis.go
+++ b/core/gen_genesis.go
@@ -19,21 +19,21 @@ var _ = (*genesisSpecMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (g Genesis) MarshalJSON() ([]byte, error) {
type Genesis struct {
- Config *params.ChainConfig `json:"config"`
- Nonce math.HexOrDecimal64 `json:"nonce"`
- Timestamp math.HexOrDecimal64 `json:"timestamp"`
- ExtraData hexutil.Bytes `json:"extraData"`
- GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
- Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
- Mixhash common.Hash `json:"mixHash"`
- Coinbase common.Address `json:"coinbase"`
- Alloc map[common.UnprefixedAddress]types.GenesisAccount `json:"alloc" gencodec:"required"`
- Number math.HexOrDecimal64 `json:"number"`
- GasUsed math.HexOrDecimal64 `json:"gasUsed"`
- ParentHash common.Hash `json:"parentHash"`
- BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
- ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
- BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
+ Config *params.ChainConfig `json:"config"`
+ Nonce math.HexOrDecimal64 `json:"nonce"`
+ Timestamp math.HexOrDecimal64 `json:"timestamp"`
+ ExtraData hexutil.Bytes `json:"extraData"`
+ GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
+ Mixhash common.Hash `json:"mixHash"`
+ Coinbase common.Address `json:"coinbase"`
+ Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
+ Number math.HexOrDecimal64 `json:"number"`
+ GasUsed math.HexOrDecimal64 `json:"gasUsed"`
+ ParentHash common.Hash `json:"parentHash"`
+ BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
+ ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
+ BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
}
var enc Genesis
enc.Config = g.Config
@@ -45,7 +45,7 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
enc.Mixhash = g.Mixhash
enc.Coinbase = g.Coinbase
if g.Alloc != nil {
- enc.Alloc = make(map[common.UnprefixedAddress]types.GenesisAccount, len(g.Alloc))
+ enc.Alloc = make(map[common.UnprefixedAddress]types.Account, len(g.Alloc))
for k, v := range g.Alloc {
enc.Alloc[common.UnprefixedAddress(k)] = v
}
@@ -62,21 +62,21 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON.
func (g *Genesis) UnmarshalJSON(input []byte) error {
type Genesis struct {
- Config *params.ChainConfig `json:"config"`
- Nonce *math.HexOrDecimal64 `json:"nonce"`
- Timestamp *math.HexOrDecimal64 `json:"timestamp"`
- ExtraData *hexutil.Bytes `json:"extraData"`
- GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
- Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
- Mixhash *common.Hash `json:"mixHash"`
- Coinbase *common.Address `json:"coinbase"`
- Alloc map[common.UnprefixedAddress]types.GenesisAccount `json:"alloc" gencodec:"required"`
- Number *math.HexOrDecimal64 `json:"number"`
- GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
- ParentHash *common.Hash `json:"parentHash"`
- BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
- ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
- BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
+ Config *params.ChainConfig `json:"config"`
+ Nonce *math.HexOrDecimal64 `json:"nonce"`
+ Timestamp *math.HexOrDecimal64 `json:"timestamp"`
+ ExtraData *hexutil.Bytes `json:"extraData"`
+ GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
+ Mixhash *common.Hash `json:"mixHash"`
+ Coinbase *common.Address `json:"coinbase"`
+ Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
+ Number *math.HexOrDecimal64 `json:"number"`
+ GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
+ ParentHash *common.Hash `json:"parentHash"`
+ BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
+ ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
+ BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
}
var dec Genesis
if err := json.Unmarshal(input, &dec); err != nil {
diff --git a/core/genesis.go b/core/genesis.go
index 9b0ed37946..ae3dca01cb 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -37,14 +37,14 @@ import (
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/pathdb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/common/hexutil"
"github.com/ava-labs/libevm/common/math"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
"github.com/holiman/uint256"
)
@@ -53,7 +53,7 @@ import (
var errGenesisNoConfig = errors.New("genesis has no chain configuration")
// Deprecated: use types.GenesisAccount instead.
-type GenesisAccount = types.GenesisAccount
+type GenesisAccount = types.Account
// Deprecated: use types.GenesisAlloc instead.
type GenesisAlloc = types.GenesisAlloc
@@ -219,8 +219,8 @@ func (g *Genesis) trieConfig() *triedb.Config {
return nil
}
return &triedb.Config{
- PathDB: pathdb.Defaults,
- IsVerkle: true,
+ DBOverride: pathdb.Defaults.BackendConstructor,
+ IsVerkle: true,
}
}
@@ -258,11 +258,6 @@ func (g *Genesis) toBlock(db ethdb.Database, triedb *triedb.Database) *types.Blo
for key, value := range account.Storage {
statedb.SetState(addr, key, value)
}
- if account.MCBalance != nil {
- for coinID, value := range account.MCBalance {
- statedb.AddBalanceMultiCoin(addr, coinID, value)
- }
- }
}
root := statedb.IntermediateRoot(false)
head.Root = root
@@ -299,7 +294,7 @@ func (g *Genesis) toBlock(db ethdb.Database, triedb *triedb.Database) *types.Blo
}
}
- statedb.Commit(0, false, false)
+ statedb.Commit(0, false)
// Commit newly generated states into disk if it's not empty.
if root != types.EmptyRootHash {
if err := triedb.Commit(root, true); err != nil {
diff --git a/core/genesis_extra_test.go b/core/genesis_extra_test.go
index 91e7a47176..7889d6668d 100644
--- a/core/genesis_extra_test.go
+++ b/core/genesis_extra_test.go
@@ -34,9 +34,9 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
+ "github.com/ava-labs/libevm/triedb"
"github.com/stretchr/testify/require"
)
diff --git a/core/genesis_test.go b/core/genesis_test.go
index ae3ed5de70..94d3435f7d 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -38,13 +38,13 @@ import (
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/precompile/contracts/warp"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/pathdb"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/core/vm"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
"github.com/davecgh/go-spew/spew"
"github.com/stretchr/testify/require"
)
@@ -285,7 +285,7 @@ func newDbConfig(scheme string) *triedb.Config {
if scheme == rawdb.HashScheme {
return triedb.HashDefaults
}
- return &triedb.Config{PathDB: pathdb.Defaults}
+ return &triedb.Config{DBOverride: pathdb.Defaults.BackendConstructor}
}
func TestVerkleGenesisCommit(t *testing.T) {
@@ -325,7 +325,7 @@ func TestVerkleGenesisCommit(t *testing.T) {
}
db := rawdb.NewMemoryDatabase()
- triedb := triedb.NewDatabase(db, &triedb.Config{IsVerkle: true, PathDB: pathdb.Defaults})
+ triedb := triedb.NewDatabase(db, &triedb.Config{IsVerkle: true, DBOverride: pathdb.Defaults.BackendConstructor})
block := genesis.MustCommit(db, triedb)
if !bytes.Equal(block.Root().Bytes(), expected) {
t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got)
diff --git a/core/state/access_list.go b/core/state/access_list.go
deleted file mode 100644
index 88bddc1ff3..0000000000
--- a/core/state/access_list.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// (c) 2019-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package state
-
-import (
- "github.com/ava-labs/libevm/common"
-)
-
-type accessList struct {
- addresses map[common.Address]int
- slots []map[common.Hash]struct{}
-}
-
-// ContainsAddress returns true if the address is in the access list.
-func (al *accessList) ContainsAddress(address common.Address) bool {
- _, ok := al.addresses[address]
- return ok
-}
-
-// Contains checks if a slot within an account is present in the access list, returning
-// separate flags for the presence of the account and the slot respectively.
-func (al *accessList) Contains(address common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) {
- idx, ok := al.addresses[address]
- if !ok {
- // no such address (and hence zero slots)
- return false, false
- }
- if idx == -1 {
- // address yes, but no slots
- return true, false
- }
- _, slotPresent = al.slots[idx][slot]
- return true, slotPresent
-}
-
-// newAccessList creates a new accessList.
-func newAccessList() *accessList {
- return &accessList{
- addresses: make(map[common.Address]int),
- }
-}
-
-// Copy creates an independent copy of an accessList.
-func (a *accessList) Copy() *accessList {
- cp := newAccessList()
- for k, v := range a.addresses {
- cp.addresses[k] = v
- }
- cp.slots = make([]map[common.Hash]struct{}, len(a.slots))
- for i, slotMap := range a.slots {
- newSlotmap := make(map[common.Hash]struct{}, len(slotMap))
- for k := range slotMap {
- newSlotmap[k] = struct{}{}
- }
- cp.slots[i] = newSlotmap
- }
- return cp
-}
-
-// AddAddress adds an address to the access list, and returns 'true' if the operation
-// caused a change (addr was not previously in the list).
-func (al *accessList) AddAddress(address common.Address) bool {
- if _, present := al.addresses[address]; present {
- return false
- }
- al.addresses[address] = -1
- return true
-}
-
-// AddSlot adds the specified (addr, slot) combo to the access list.
-// Return values are:
-// - address added
-// - slot added
-// For any 'true' value returned, a corresponding journal entry must be made.
-func (al *accessList) AddSlot(address common.Address, slot common.Hash) (addrChange bool, slotChange bool) {
- idx, addrPresent := al.addresses[address]
- if !addrPresent || idx == -1 {
- // Address not present, or addr present but no slots there
- al.addresses[address] = len(al.slots)
- slotmap := map[common.Hash]struct{}{slot: {}}
- al.slots = append(al.slots, slotmap)
- return !addrPresent, true
- }
- // There is already an (address,slot) mapping
- slotmap := al.slots[idx]
- if _, ok := slotmap[slot]; !ok {
- slotmap[slot] = struct{}{}
- // Journal add slot change
- return false, true
- }
- // No changes required
- return false, false
-}
-
-// DeleteSlot removes an (address, slot)-tuple from the access list.
-// This operation needs to be performed in the same order as the addition happened.
-// This method is meant to be used by the journal, which maintains ordering of
-// operations.
-func (al *accessList) DeleteSlot(address common.Address, slot common.Hash) {
- idx, addrOk := al.addresses[address]
- // There are two ways this can fail
- if !addrOk {
- panic("reverting slot change, address not present in list")
- }
- slotmap := al.slots[idx]
- delete(slotmap, slot)
- // If that was the last (first) slot, remove it
- // Since additions and rollbacks are always performed in order,
- // we can delete the item without worrying about screwing up later indices
- if len(slotmap) == 0 {
- al.slots = al.slots[:idx]
- al.addresses[address] = -1
- }
-}
-
-// DeleteAddress removes an address from the access list. This operation
-// needs to be performed in the same order as the addition happened.
-// This method is meant to be used by the journal, which maintains ordering of
-// operations.
-func (al *accessList) DeleteAddress(address common.Address) {
- delete(al.addresses, address)
-}
diff --git a/core/state/database.go b/core/state/database.go
index 1f9fd40f13..b810bf2c3d 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -31,15 +31,14 @@ import (
"fmt"
"github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/utils"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/common/lru"
+ ethstate "github.com/ava-labs/libevm/core/state"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/trie/utils"
+ "github.com/ava-labs/libevm/triedb"
"github.com/crate-crypto/go-ipa/banderwagon"
)
@@ -82,74 +81,7 @@ type Database interface {
}
// Trie is a Ethereum Merkle Patricia trie.
-type Trie interface {
- // GetKey returns the sha3 preimage of a hashed key that was previously used
- // to store a value.
- //
- // TODO(fjl): remove this when StateTrie is removed
- GetKey([]byte) []byte
-
- // GetAccount abstracts an account read from the trie. It retrieves the
- // account blob from the trie with provided account address and decodes it
- // with associated decoding algorithm. If the specified account is not in
- // the trie, nil will be returned. If the trie is corrupted(e.g. some nodes
- // are missing or the account blob is incorrect for decoding), an error will
- // be returned.
- GetAccount(address common.Address) (*types.StateAccount, error)
-
- // GetStorage returns the value for key stored in the trie. The value bytes
- // must not be modified by the caller. If a node was not found in the database,
- // a trie.MissingNodeError is returned.
- GetStorage(addr common.Address, key []byte) ([]byte, error)
-
- // UpdateAccount abstracts an account write to the trie. It encodes the
- // provided account object with associated algorithm and then updates it
- // in the trie with provided address.
- UpdateAccount(address common.Address, account *types.StateAccount) error
-
- // UpdateStorage associates key with value in the trie. If value has length zero,
- // any existing value is deleted from the trie. The value bytes must not be modified
- // by the caller while they are stored in the trie. If a node was not found in the
- // database, a trie.MissingNodeError is returned.
- UpdateStorage(addr common.Address, key, value []byte) error
-
- // DeleteAccount abstracts an account deletion from the trie.
- DeleteAccount(address common.Address) error
-
- // DeleteStorage removes any existing value for key from the trie. If a node
- // was not found in the database, a trie.MissingNodeError is returned.
- DeleteStorage(addr common.Address, key []byte) error
-
- // UpdateContractCode abstracts code write to the trie. It is expected
- // to be moved to the stateWriter interface when the latter is ready.
- UpdateContractCode(address common.Address, codeHash common.Hash, code []byte) error
-
- // Hash returns the root hash of the trie. It does not write to the database and
- // can be used even if the trie doesn't have one.
- Hash() common.Hash
-
- // Commit collects all dirty nodes in the trie and replace them with the
- // corresponding node hash. All collected nodes(including dirty leaves if
- // collectLeaf is true) will be encapsulated into a nodeset for return.
- // The returned nodeset can be nil if the trie is clean(nothing to commit).
- // Once the trie is committed, it's not usable anymore. A new trie must
- // be created with new root and updated trie database for following usage
- Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error)
-
- // NodeIterator returns an iterator that returns nodes of the trie. Iteration
- // starts at the key after the given start key. And error will be returned
- // if fails to create node iterator.
- NodeIterator(startKey []byte) (trie.NodeIterator, error)
-
- // Prove constructs a Merkle proof for key. The result contains all encoded nodes
- // on the path to the value at key. The value itself is also included in the last
- // node and can be retrieved by verifying the proof.
- //
- // If the trie does not contain a value for key, the returned proof contains all
- // nodes of the longest existing prefix of the key (at least the root), ending
- // with the node that proves the absence of the key.
- Prove(key []byte, proofDb ethdb.KeyValueWriter) error
-}
+type Trie = ethstate.Trie
// NewDatabase creates a backing store for state. The returned database is safe for
// concurrent use, but does not retain any recent trie nodes in memory. To keep some
diff --git a/core/state/dump.go b/core/state/dump.go
index a8239b616d..deac606017 100644
--- a/core/state/dump.go
+++ b/core/state/dump.go
@@ -27,222 +27,12 @@
package state
import (
- "encoding/json"
- "fmt"
- "time"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/common/hexutil"
- "github.com/ava-labs/libevm/log"
- "github.com/ava-labs/libevm/rlp"
+ ethstate "github.com/ava-labs/libevm/core/state"
)
-// DumpConfig is a set of options to control what portions of the state will be
-// iterated and collected.
-type DumpConfig struct {
- SkipCode bool
- SkipStorage bool
- OnlyWithAddresses bool
- Start []byte
- Max uint64
-}
-
-// DumpCollector interface which the state trie calls during iteration
-type DumpCollector interface {
- // OnRoot is called with the state root
- OnRoot(common.Hash)
- // OnAccount is called once for each account in the trie
- OnAccount(*common.Address, DumpAccount)
-}
-
-// DumpAccount represents an account in the state.
-type DumpAccount struct {
- Balance string `json:"balance"`
- Nonce uint64 `json:"nonce"`
- Root hexutil.Bytes `json:"root"`
- CodeHash hexutil.Bytes `json:"codeHash"`
- Code hexutil.Bytes `json:"code,omitempty"`
- IsMultiCoin bool `json:"isMultiCoin"`
- Storage map[common.Hash]string `json:"storage,omitempty"`
- Address *common.Address `json:"address,omitempty"` // Address only present in iterative (line-by-line) mode
- AddressHash hexutil.Bytes `json:"key,omitempty"` // If we don't have address, we can output the key
-
-}
-
-// Dump represents the full dump in a collected format, as one large map.
-type Dump struct {
- Root string `json:"root"`
- Accounts map[string]DumpAccount `json:"accounts"`
- // Next can be set to represent that this dump is only partial, and Next
- // is where an iterator should be positioned in order to continue the dump.
- Next []byte `json:"next,omitempty"` // nil if no more accounts
-}
-
-// OnRoot implements DumpCollector interface
-func (d *Dump) OnRoot(root common.Hash) {
- d.Root = fmt.Sprintf("%x", root)
-}
-
-// OnAccount implements DumpCollector interface
-func (d *Dump) OnAccount(addr *common.Address, account DumpAccount) {
- if addr == nil {
- d.Accounts[fmt.Sprintf("pre(%s)", account.AddressHash)] = account
- }
- if addr != nil {
- d.Accounts[(*addr).String()] = account
- }
-}
-
-// iterativeDump is a DumpCollector-implementation which dumps output line-by-line iteratively.
-type iterativeDump struct {
- *json.Encoder
-}
-
-// OnAccount implements DumpCollector interface
-func (d iterativeDump) OnAccount(addr *common.Address, account DumpAccount) {
- dumpAccount := &DumpAccount{
- Balance: account.Balance,
- Nonce: account.Nonce,
- Root: account.Root,
- CodeHash: account.CodeHash,
- IsMultiCoin: account.IsMultiCoin,
- Code: account.Code,
- Storage: account.Storage,
- AddressHash: account.AddressHash,
- Address: addr,
- }
- d.Encode(dumpAccount)
-}
-
-// OnRoot implements DumpCollector interface
-func (d iterativeDump) OnRoot(root common.Hash) {
- d.Encode(struct {
- Root common.Hash `json:"root"`
- }{root})
-}
-
-// DumpToCollector iterates the state according to the given options and inserts
-// the items into a collector for aggregation or serialization.
-func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []byte) {
- // Sanitize the input to allow nil configs
- if conf == nil {
- conf = new(DumpConfig)
- }
- var (
- missingPreimages int
- accounts uint64
- start = time.Now()
- logged = time.Now()
- )
- log.Info("Trie dumping started", "root", s.trie.Hash())
- c.OnRoot(s.trie.Hash())
-
- trieIt, err := s.trie.NodeIterator(conf.Start)
- if err != nil {
- log.Error("Trie dumping error", "err", err)
- return nil
- }
- it := trie.NewIterator(trieIt)
- for it.Next() {
- var data types.StateAccount
- if err := rlp.DecodeBytes(it.Value, &data); err != nil {
- panic(err)
- }
- var (
- account = DumpAccount{
- Balance: data.Balance.String(),
- Nonce: data.Nonce,
- Root: data.Root[:],
- CodeHash: data.CodeHash,
- IsMultiCoin: data.IsMultiCoin,
- AddressHash: it.Key,
- }
- address *common.Address
- addr common.Address
- addrBytes = s.trie.GetKey(it.Key)
- )
- if addrBytes == nil {
- missingPreimages++
- if conf.OnlyWithAddresses {
- continue
- }
- } else {
- addr = common.BytesToAddress(addrBytes)
- address = &addr
- account.Address = address
- }
- obj := newObject(s, addr, &data)
- if !conf.SkipCode {
- account.Code = obj.Code()
- }
- if !conf.SkipStorage {
- account.Storage = make(map[common.Hash]string)
- tr, err := obj.getTrie()
- if err != nil {
- log.Error("Failed to load storage trie", "err", err)
- continue
- }
- trieIt, err := tr.NodeIterator(nil)
- if err != nil {
- log.Error("Failed to create trie iterator", "err", err)
- continue
- }
- storageIt := trie.NewIterator(trieIt)
- for storageIt.Next() {
- _, content, _, err := rlp.Split(storageIt.Value)
- if err != nil {
- log.Error("Failed to decode the value returned by iterator", "error", err)
- continue
- }
- account.Storage[common.BytesToHash(s.trie.GetKey(storageIt.Key))] = common.Bytes2Hex(content)
- }
- }
- c.OnAccount(address, account)
- accounts++
- if time.Since(logged) > 8*time.Second {
- log.Info("Trie dumping in progress", "at", it.Key, "accounts", accounts,
- "elapsed", common.PrettyDuration(time.Since(start)))
- logged = time.Now()
- }
- if conf.Max > 0 && accounts >= conf.Max {
- if it.Next() {
- nextKey = it.Key
- }
- break
- }
- }
- if missingPreimages > 0 {
- log.Warn("Dump incomplete due to missing preimages", "missing", missingPreimages)
- }
- log.Info("Trie dumping complete", "accounts", accounts,
- "elapsed", common.PrettyDuration(time.Since(start)))
-
- return nextKey
-}
-
-// RawDump returns the state. If the processing is aborted e.g. due to options
-// reaching Max, the `Next` key is set on the returned Dump.
-func (s *StateDB) RawDump(opts *DumpConfig) Dump {
- dump := &Dump{
- Accounts: make(map[string]DumpAccount),
- }
- dump.Next = s.DumpToCollector(dump, opts)
- return *dump
-}
-
-// Dump returns a JSON string representing the entire state as a single json-object
-func (s *StateDB) Dump(opts *DumpConfig) []byte {
- dump := s.RawDump(opts)
- json, err := json.MarshalIndent(dump, "", " ")
- if err != nil {
- log.Error("Error dumping state", "err", err)
- }
- return json
-}
-
-// IterativeDump dumps out accounts as json-objects, delimited by linebreaks on stdout
-func (s *StateDB) IterativeDump(opts *DumpConfig, output *json.Encoder) {
- s.DumpToCollector(iterativeDump{output}, opts)
-}
+type (
+ DumpConfig = ethstate.DumpConfig
+ DumpCollector = ethstate.DumpCollector
+ DumpAccount = ethstate.DumpAccount
+ Dump = ethstate.Dump
+)
diff --git a/core/state/iterator.go b/core/state/iterator.go
deleted file mode 100644
index 409a08f148..0000000000
--- a/core/state/iterator.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package state
-
-import (
- "bytes"
- "errors"
- "fmt"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/rlp"
-)
-
-// nodeIterator is an iterator to traverse the entire state trie post-order,
-// including all of the contract code and contract state tries. Preimage is
-// required in order to resolve the contract address.
-type nodeIterator struct {
- state *StateDB // State being iterated
-
- stateIt trie.NodeIterator // Primary iterator for the global state trie
- dataIt trie.NodeIterator // Secondary iterator for the data trie of a contract
-
- accountHash common.Hash // Hash of the node containing the account
- codeHash common.Hash // Hash of the contract source code
- code []byte // Source code associated with a contract
-
- Hash common.Hash // Hash of the current entry being iterated (nil if not standalone)
- Parent common.Hash // Hash of the first full ancestor node (nil if current is the root)
-
- Error error // Failure set in case of an internal error in the iterator
-}
-
-// newNodeIterator creates an post-order state node iterator.
-func newNodeIterator(state *StateDB) *nodeIterator {
- return &nodeIterator{
- state: state,
- }
-}
-
-// Next moves the iterator to the next node, returning whether there are any
-// further nodes. In case of an internal error this method returns false and
-// sets the Error field to the encountered failure.
-func (it *nodeIterator) Next() bool {
- // If the iterator failed previously, don't do anything
- if it.Error != nil {
- return false
- }
- // Otherwise step forward with the iterator and report any errors
- if err := it.step(); err != nil {
- it.Error = err
- return false
- }
- return it.retrieve()
-}
-
-// step moves the iterator to the next entry of the state trie.
-func (it *nodeIterator) step() error {
- // Abort if we reached the end of the iteration
- if it.state == nil {
- return nil
- }
- // Initialize the iterator if we've just started
- var err error
- if it.stateIt == nil {
- it.stateIt, err = it.state.trie.NodeIterator(nil)
- if err != nil {
- return err
- }
- }
- // If we had data nodes previously, we surely have at least state nodes
- if it.dataIt != nil {
- if cont := it.dataIt.Next(true); !cont {
- if it.dataIt.Error() != nil {
- return it.dataIt.Error()
- }
- it.dataIt = nil
- }
- return nil
- }
- // If we had source code previously, discard that
- if it.code != nil {
- it.code = nil
- return nil
- }
- // Step to the next state trie node, terminating if we're out of nodes
- if cont := it.stateIt.Next(true); !cont {
- if it.stateIt.Error() != nil {
- return it.stateIt.Error()
- }
- it.state, it.stateIt = nil, nil
- return nil
- }
- // If the state trie node is an internal entry, leave as is
- if !it.stateIt.Leaf() {
- return nil
- }
- // Otherwise we've reached an account node, initiate data iteration
- var account types.StateAccount
- if err := rlp.DecodeBytes(it.stateIt.LeafBlob(), &account); err != nil {
- return err
- }
- // Lookup the preimage of account hash
- preimage := it.state.trie.GetKey(it.stateIt.LeafKey())
- if preimage == nil {
- return errors.New("account address is not available")
- }
- address := common.BytesToAddress(preimage)
-
- // Traverse the storage slots belong to the account
- dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, address, account.Root, it.state.trie)
- if err != nil {
- return err
- }
- it.dataIt, err = dataTrie.NodeIterator(nil)
- if err != nil {
- return err
- }
- if !it.dataIt.Next(true) {
- it.dataIt = nil
- }
- if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) {
- it.codeHash = common.BytesToHash(account.CodeHash)
- it.code, err = it.state.db.ContractCode(address, common.BytesToHash(account.CodeHash))
- if err != nil {
- return fmt.Errorf("code %x: %v", account.CodeHash, err)
- }
- }
- it.accountHash = it.stateIt.Parent()
- return nil
-}
-
-// retrieve pulls and caches the current state entry the iterator is traversing.
-// The method returns whether there are any more data left for inspection.
-func (it *nodeIterator) retrieve() bool {
- // Clear out any previously set values
- it.Hash = common.Hash{}
-
- // If the iteration's done, return no available data
- if it.state == nil {
- return false
- }
- // Otherwise retrieve the current entry
- switch {
- case it.dataIt != nil:
- it.Hash, it.Parent = it.dataIt.Hash(), it.dataIt.Parent()
- if it.Parent == (common.Hash{}) {
- it.Parent = it.accountHash
- }
- case it.code != nil:
- it.Hash, it.Parent = it.codeHash, it.accountHash
- case it.stateIt != nil:
- it.Hash, it.Parent = it.stateIt.Hash(), it.stateIt.Parent()
- }
- return true
-}
diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go
deleted file mode 100644
index 1f79618f9f..0000000000
--- a/core/state/iterator_test.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// (c) 2023, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package state
-
-import (
- "testing"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
-)
-
-// Tests that the node iterator indeed walks over the entire database contents.
-func TestNodeIteratorCoverage(t *testing.T) {
- testNodeIteratorCoverage(t, rawdb.HashScheme)
- testNodeIteratorCoverage(t, rawdb.PathScheme)
-}
-
-func testNodeIteratorCoverage(t *testing.T, scheme string) {
- // Create some arbitrary test state to iterate
- db, sdb, ndb, root, _ := makeTestState(scheme)
- ndb.Commit(root, false)
-
- state, err := New(root, sdb, nil)
- if err != nil {
- t.Fatalf("failed to create state trie at %x: %v", root, err)
- }
- // Gather all the node hashes found by the iterator
- hashes := make(map[common.Hash]struct{})
- for it := newNodeIterator(state); it.Next(); {
- if it.Hash != (common.Hash{}) {
- hashes[it.Hash] = struct{}{}
- }
- }
- // Check in-disk nodes
- var (
- seenNodes = make(map[common.Hash]struct{})
- seenCodes = make(map[common.Hash]struct{})
- )
- it := db.NewIterator(nil, nil)
- for it.Next() {
- ok, hash := isTrieNode(scheme, it.Key(), it.Value())
- if !ok {
- continue
- }
- seenNodes[hash] = struct{}{}
- }
- it.Release()
-
- // Check in-disk codes
- it = db.NewIterator(nil, nil)
- for it.Next() {
- ok, hash := rawdb.IsCodeKey(it.Key())
- if !ok {
- continue
- }
- if _, ok := hashes[common.BytesToHash(hash)]; !ok {
- t.Errorf("state entry not reported %x", it.Key())
- }
- seenCodes[common.BytesToHash(hash)] = struct{}{}
- }
- it.Release()
-
- // Cross check the iterated hashes and the database/nodepool content
- for hash := range hashes {
- _, ok := seenNodes[hash]
- if !ok {
- _, ok = seenCodes[hash]
- }
- if !ok {
- t.Errorf("failed to retrieve reported node %x", hash)
- }
- }
-}
-
-// isTrieNode is a helper function which reports if the provided
-// database entry belongs to a trie node or not.
-func isTrieNode(scheme string, key, val []byte) (bool, common.Hash) {
- if scheme == rawdb.HashScheme {
- if rawdb.IsLegacyTrieNode(key, val) {
- return true, common.BytesToHash(key)
- }
- } else {
- ok := rawdb.IsAccountTrieNode(key)
- if ok {
- return true, crypto.Keccak256Hash(val)
- }
- ok = rawdb.IsStorageTrieNode(key)
- if ok {
- return true, crypto.Keccak256Hash(val)
- }
- }
- return false, common.Hash{}
-}
diff --git a/core/state/journal.go b/core/state/journal.go
deleted file mode 100644
index 4a3dedf8e4..0000000000
--- a/core/state/journal.go
+++ /dev/null
@@ -1,321 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package state
-
-import (
- "github.com/ava-labs/libevm/common"
- "github.com/holiman/uint256"
-)
-
-// journalEntry is a modification entry in the state change journal that can be
-// reverted on demand.
-type journalEntry interface {
- // revert undoes the changes introduced by this journal entry.
- revert(*StateDB)
-
- // dirtied returns the Ethereum address modified by this journal entry.
- dirtied() *common.Address
-}
-
-// journal contains the list of state modifications applied since the last state
-// commit. These are tracked to be able to be reverted in the case of an execution
-// exception or request for reversal.
-type journal struct {
- entries []journalEntry // Current changes tracked by the journal
- dirties map[common.Address]int // Dirty accounts and the number of changes
-}
-
-// newJournal creates a new initialized journal.
-func newJournal() *journal {
- return &journal{
- dirties: make(map[common.Address]int),
- }
-}
-
-// append inserts a new modification entry to the end of the change journal.
-func (j *journal) append(entry journalEntry) {
- j.entries = append(j.entries, entry)
- if addr := entry.dirtied(); addr != nil {
- j.dirties[*addr]++
- }
-}
-
-// revert undoes a batch of journalled modifications along with any reverted
-// dirty handling too.
-func (j *journal) revert(statedb *StateDB, snapshot int) {
- for i := len(j.entries) - 1; i >= snapshot; i-- {
- // Undo the changes made by the operation
- j.entries[i].revert(statedb)
-
- // Drop any dirty tracking induced by the change
- if addr := j.entries[i].dirtied(); addr != nil {
- if j.dirties[*addr]--; j.dirties[*addr] == 0 {
- delete(j.dirties, *addr)
- }
- }
- }
- j.entries = j.entries[:snapshot]
-}
-
-// dirty explicitly sets an address to dirty, even if the change entries would
-// otherwise suggest it as clean. This method is an ugly hack to handle the RIPEMD
-// precompile consensus exception.
-func (j *journal) dirty(addr common.Address) {
- j.dirties[addr]++
-}
-
-// length returns the current number of entries in the journal.
-func (j *journal) length() int {
- return len(j.entries)
-}
-
-type (
- // Changes to the account trie.
- createObjectChange struct {
- account *common.Address
- }
- resetObjectChange struct {
- account *common.Address
- prev *stateObject
- prevdestruct bool
- prevAccount []byte
- prevStorage map[common.Hash][]byte
-
- prevAccountOriginExist bool
- prevAccountOrigin []byte
- prevStorageOrigin map[common.Hash][]byte
- }
- selfDestructChange struct {
- account *common.Address
- prev bool // whether account had already self-destructed
- prevbalance *uint256.Int
- }
-
- // Changes to individual accounts.
- balanceChange struct {
- account *common.Address
- prev *uint256.Int
- }
- multiCoinEnable struct {
- account *common.Address
- }
- nonceChange struct {
- account *common.Address
- prev uint64
- }
- storageChange struct {
- account *common.Address
- key, prevalue common.Hash
- }
- codeChange struct {
- account *common.Address
- prevcode, prevhash []byte
- }
-
- // Changes to other state values.
- refundChange struct {
- prev uint64
- }
- addLogChange struct {
- txhash common.Hash
- }
- addPreimageChange struct {
- hash common.Hash
- }
- touchChange struct {
- account *common.Address
- }
- // Changes to the access list
- accessListAddAccountChange struct {
- address *common.Address
- }
- accessListAddSlotChange struct {
- address *common.Address
- slot *common.Hash
- }
-
- transientStorageChange struct {
- account *common.Address
- key, prevalue common.Hash
- }
-)
-
-func (ch createObjectChange) revert(s *StateDB) {
- delete(s.stateObjects, *ch.account)
- delete(s.stateObjectsDirty, *ch.account)
-}
-
-func (ch createObjectChange) dirtied() *common.Address {
- return ch.account
-}
-
-func (ch resetObjectChange) revert(s *StateDB) {
- s.setStateObject(ch.prev)
- if !ch.prevdestruct {
- delete(s.stateObjectsDestruct, ch.prev.address)
- }
- if ch.prevAccount != nil {
- s.accounts[ch.prev.addrHash] = ch.prevAccount
- }
- if ch.prevStorage != nil {
- s.storages[ch.prev.addrHash] = ch.prevStorage
- }
- if ch.prevAccountOriginExist {
- s.accountsOrigin[ch.prev.address] = ch.prevAccountOrigin
- }
- if ch.prevStorageOrigin != nil {
- s.storagesOrigin[ch.prev.address] = ch.prevStorageOrigin
- }
-}
-
-func (ch resetObjectChange) dirtied() *common.Address {
- return ch.account
-}
-
-func (ch selfDestructChange) revert(s *StateDB) {
- obj := s.getStateObject(*ch.account)
- if obj != nil {
- obj.selfDestructed = ch.prev
- obj.setBalance(ch.prevbalance)
- }
-}
-
-func (ch selfDestructChange) dirtied() *common.Address {
- return ch.account
-}
-
-var ripemd = common.HexToAddress("0000000000000000000000000000000000000003")
-
-func (ch touchChange) revert(s *StateDB) {
-}
-
-func (ch touchChange) dirtied() *common.Address {
- return ch.account
-}
-
-func (ch balanceChange) revert(s *StateDB) {
- s.getStateObject(*ch.account).setBalance(ch.prev)
-}
-
-func (ch balanceChange) dirtied() *common.Address {
- return ch.account
-}
-
-func (ch multiCoinEnable) revert(s *StateDB) {
- s.getStateObject(*ch.account).data.IsMultiCoin = false
-}
-
-func (ch multiCoinEnable) dirtied() *common.Address {
- return ch.account
-}
-
-func (ch nonceChange) revert(s *StateDB) {
- s.getStateObject(*ch.account).setNonce(ch.prev)
-}
-
-func (ch nonceChange) dirtied() *common.Address {
- return ch.account
-}
-
-func (ch codeChange) revert(s *StateDB) {
- s.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode)
-}
-
-func (ch codeChange) dirtied() *common.Address {
- return ch.account
-}
-
-func (ch storageChange) revert(s *StateDB) {
- s.getStateObject(*ch.account).setState(ch.key, ch.prevalue)
-}
-
-func (ch storageChange) dirtied() *common.Address {
- return ch.account
-}
-
-func (ch transientStorageChange) revert(s *StateDB) {
- s.setTransientState(*ch.account, ch.key, ch.prevalue)
-}
-
-func (ch transientStorageChange) dirtied() *common.Address {
- return nil
-}
-
-func (ch refundChange) revert(s *StateDB) {
- s.refund = ch.prev
-}
-
-func (ch refundChange) dirtied() *common.Address {
- return nil
-}
-
-func (ch addLogChange) revert(s *StateDB) {
- logs := s.logs[ch.txhash]
- if len(logs) == 1 {
- delete(s.logs, ch.txhash)
- } else {
- s.logs[ch.txhash] = logs[:len(logs)-1]
- }
- s.logSize--
-}
-
-func (ch addLogChange) dirtied() *common.Address {
- return nil
-}
-
-func (ch addPreimageChange) revert(s *StateDB) {
- delete(s.preimages, ch.hash)
-}
-
-func (ch addPreimageChange) dirtied() *common.Address {
- return nil
-}
-
-func (ch accessListAddAccountChange) revert(s *StateDB) {
- /*
- One important invariant here, is that whenever a (addr, slot) is added, if the
- addr is not already present, the add causes two journal entries:
- - one for the address,
- - one for the (address,slot)
- Therefore, when unrolling the change, we can always blindly delete the
- (addr) at this point, since no storage adds can remain when come upon
- a single (addr) change.
- */
- s.accessList.DeleteAddress(*ch.address)
-}
-
-func (ch accessListAddAccountChange) dirtied() *common.Address {
- return nil
-}
-
-func (ch accessListAddSlotChange) revert(s *StateDB) {
- s.accessList.DeleteSlot(*ch.address, *ch.slot)
-}
-
-func (ch accessListAddSlotChange) dirtied() *common.Address {
- return nil
-}
diff --git a/core/state/metrics.go b/core/state/metrics.go
deleted file mode 100644
index 5e2f060c3a..0000000000
--- a/core/state/metrics.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// (c) 2019-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2021 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package state
-
-import "github.com/ava-labs/coreth/metrics"
-
-var (
- accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil)
- storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil)
- accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil)
- storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil)
- accountTrieUpdatedMeter = metrics.NewRegisteredMeter("state/update/accountnodes", nil)
- storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil)
- accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil)
- storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil)
-
- slotDeletionMaxCount = metrics.NewRegisteredGauge("state/delete/storage/max/slot", nil)
- slotDeletionMaxSize = metrics.NewRegisteredGauge("state/delete/storage/max/size", nil)
- slotDeletionTimer = metrics.NewRegisteredResettingTimer("state/delete/storage/timer", nil)
- slotDeletionCount = metrics.NewRegisteredMeter("state/delete/storage/slot", nil)
- slotDeletionSize = metrics.NewRegisteredMeter("state/delete/storage/size", nil)
- slotDeletionSkip = metrics.NewRegisteredGauge("state/delete/storage/skip", nil)
-)
diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go
index 79ce9f3815..659c31195e 100644
--- a/core/state/pruner/pruner.go
+++ b/core/state/pruner/pruner.go
@@ -40,12 +40,12 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/state/snapshot"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
)
const (
diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go
index efe115a952..ea2b8f7c79 100644
--- a/core/state/snapshot/conversion.go
+++ b/core/state/snapshot/conversion.go
@@ -37,11 +37,11 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
)
// trieKV represents a trie key-value pair
@@ -92,7 +92,7 @@ func GenerateTrie(snaptree *Tree, root common.Hash, src ethdb.Database, dst ethd
rawdb.WriteCode(dst, codeHash, code)
}
// Then migrate all storage trie nodes into the tmp db.
- storageIt, err := snaptree.StorageIterator(root, accountHash, common.Hash{}, false)
+ storageIt, err := snaptree.StorageIterator(root, accountHash, common.Hash{})
if err != nil {
return common.Hash{}, err
}
diff --git a/core/state/snapshot/disklayer.go b/core/state/snapshot/disklayer.go
index 684b740d85..87fc3f4e2f 100644
--- a/core/state/snapshot/disklayer.go
+++ b/core/state/snapshot/disklayer.go
@@ -33,11 +33,11 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/triedb"
)
// diskLayer is a low level persistent snapshot built on top of a key-value store.
diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go
index dadd54ef07..1f76dbf1e0 100644
--- a/core/state/snapshot/disklayer_test.go
+++ b/core/state/snapshot/disklayer_test.go
@@ -121,7 +121,7 @@ func TestDiskMerge(t *testing.T) {
base.Storage(conNukeCache, conNukeCacheSlot)
// Modify or delete some accounts, flatten everything onto disk
- if err := snaps.Update(diffBlockHash, diffRoot, baseBlockHash, map[common.Hash]struct{}{
+ if err := snaps.UpdateWithBlockHashes(diffBlockHash, diffRoot, baseBlockHash, map[common.Hash]struct{}{
accDelNoCache: {},
accDelCache: {},
conNukeNoCache: {},
@@ -341,7 +341,7 @@ func TestDiskPartialMerge(t *testing.T) {
assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
// Modify or delete some accounts, flatten everything onto disk
- if err := snaps.Update(diffBlockHash, diffRoot, baseBlockHash, map[common.Hash]struct{}{
+ if err := snaps.UpdateWithBlockHashes(diffBlockHash, diffRoot, baseBlockHash, map[common.Hash]struct{}{
accDelNoCache: {},
accDelCache: {},
conNukeNoCache: {},
@@ -460,7 +460,7 @@ func TestDiskGeneratorPersistence(t *testing.T) {
dl := snaps.disklayer()
dl.genMarker = genMarker
// Modify or delete some accounts, flatten everything onto disk
- if err := snaps.Update(diffBlockHash, diffRoot, baseBlockHash, nil, map[common.Hash][]byte{
+ if err := snaps.UpdateWithBlockHashes(diffBlockHash, diffRoot, baseBlockHash, nil, map[common.Hash][]byte{
accTwo: accTwo[:],
}, nil); err != nil {
t.Fatalf("failed to update snapshot tree: %v", err)
@@ -478,7 +478,7 @@ func TestDiskGeneratorPersistence(t *testing.T) {
}
// Test scenario 2, the disk layer is fully generated
// Modify or delete some accounts, flatten everything onto disk
- if err := snaps.Update(diffTwoBlockHash, diffTwoRoot, diffBlockHash, nil, map[common.Hash][]byte{
+ if err := snaps.UpdateWithBlockHashes(diffTwoBlockHash, diffTwoRoot, diffBlockHash, nil, map[common.Hash][]byte{
accThree: accThree.Bytes(),
}, map[common.Hash]map[common.Hash][]byte{
accThree: {accThreeSlot: accThreeSlot.Bytes()},
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index 7305750025..1021cd1ed6 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -33,13 +33,13 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
)
const (
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index 31a1f5271f..fe1b33f17b 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -34,15 +34,15 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/hashdb"
"github.com/ava-labs/coreth/triedb/pathdb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/triedb"
"github.com/holiman/uint256"
"golang.org/x/crypto/sha3"
)
@@ -176,9 +176,9 @@ func newHelper(scheme string) *testHelper {
diskdb := rawdb.NewMemoryDatabase()
config := &triedb.Config{}
if scheme == rawdb.PathScheme {
- config.PathDB = &pathdb.Config{} // disable caching
+ config.DBOverride = pathdb.Config{}.BackendConstructor // disable caching
} else {
- config.HashDB = &hashdb.Config{} // disable caching
+ config.DBOverride = hashdb.Config{}.BackendConstructor // disable caching
}
triedb := triedb.NewDatabase(diskdb, config)
accTrie, _ := trie.NewStateTrie(trie.StateTrieID(types.EmptyRootHash), triedb)
diff --git a/core/state/snapshot/iterator.go b/core/state/snapshot/iterator.go
index 7cc5dee33f..addae92deb 100644
--- a/core/state/snapshot/iterator.go
+++ b/core/state/snapshot/iterator.go
@@ -33,6 +33,7 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/libevm/common"
+ ethsnapshot "github.com/ava-labs/libevm/core/state/snapshot"
"github.com/ava-labs/libevm/ethdb"
)
@@ -69,13 +70,7 @@ type AccountIterator interface {
// StorageIterator is an iterator to step over the specific storage in a snapshot,
// which may or may not be composed of multiple layers.
-type StorageIterator interface {
- Iterator
-
- // Slot returns the storage slot the iterator is currently at. An error will
- // be returned if the iterator becomes invalid
- Slot() []byte
-}
+type StorageIterator = ethsnapshot.StorageIterator
// diffAccountIterator is an account iterator that steps over the accounts (both
// live and deleted) contained within a single diff layer. Higher order iterators
diff --git a/core/state/snapshot/iterator_test.go b/core/state/snapshot/iterator_test.go
index f668e4df8b..b2862407b2 100644
--- a/core/state/snapshot/iterator_test.go
+++ b/core/state/snapshot/iterator_test.go
@@ -222,13 +222,13 @@ func TestAccountIteratorTraversal(t *testing.T) {
// Create a snapshot tree with a single empty disk layer with the specified root and block hash
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
// Stack three diff layers on top with various overlaps
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil,
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
// Verify the single and multi-layer iterators
@@ -263,13 +263,13 @@ func TestStorageIteratorTraversal(t *testing.T) {
// Create an empty base layer and a snapshot tree out of it
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
// Stack three diff layers on top with various overlaps
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"),
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"),
nil, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"),
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"),
nil, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil))
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"),
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"),
nil, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
// Verify the single and multi-layer iterators
@@ -279,7 +279,7 @@ func TestStorageIteratorTraversal(t *testing.T) {
verifyIterator(t, 3, diffIter, verifyNothing)
verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
- it, _ := snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.Hash{}, false)
+ it, _ := snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 6, it, verifyStorage)
it.Release()
@@ -296,7 +296,7 @@ func TestStorageIteratorTraversal(t *testing.T) {
}
verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
- it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.Hash{}, false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 6, it, verifyStorage)
it.Release()
}
@@ -342,14 +342,14 @@ func TestAccountIteratorTraversalValues(t *testing.T) {
}
}
// Assemble a stack of snapshots from the account layers
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, a, nil)
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil, b, nil)
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil, c, nil)
- snaps.Update(common.HexToHash("0x05"), common.HexToHash("0xff05"), common.HexToHash("0x04"), nil, d, nil)
- snaps.Update(common.HexToHash("0x06"), common.HexToHash("0xff06"), common.HexToHash("0x05"), nil, e, nil)
- snaps.Update(common.HexToHash("0x07"), common.HexToHash("0xff07"), common.HexToHash("0x06"), nil, f, nil)
- snaps.Update(common.HexToHash("0x08"), common.HexToHash("0xff08"), common.HexToHash("0x07"), nil, g, nil)
- snaps.Update(common.HexToHash("0x09"), common.HexToHash("0xff09"), common.HexToHash("0x08"), nil, h, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, a, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil, b, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil, c, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x05"), common.HexToHash("0xff05"), common.HexToHash("0x04"), nil, d, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x06"), common.HexToHash("0xff06"), common.HexToHash("0x05"), nil, e, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x07"), common.HexToHash("0xff07"), common.HexToHash("0x06"), nil, f, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x08"), common.HexToHash("0xff08"), common.HexToHash("0x07"), nil, g, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x09"), common.HexToHash("0xff09"), common.HexToHash("0x08"), nil, h, nil)
it, _ := snaps.AccountIterator(common.HexToHash("0xff09"), common.Hash{}, false)
head := snaps.Snapshot(common.HexToHash("0xff09"))
@@ -437,16 +437,16 @@ func TestStorageIteratorTraversalValues(t *testing.T) {
}
}
// Assemble a stack of snapshots from the account layers
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a))
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b))
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c))
- snaps.Update(common.HexToHash("0x05"), common.HexToHash("0xff05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d))
- snaps.Update(common.HexToHash("0x06"), common.HexToHash("0xff06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e))
- snaps.Update(common.HexToHash("0x07"), common.HexToHash("0xff07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e))
- snaps.Update(common.HexToHash("0x08"), common.HexToHash("0xff08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g))
- snaps.Update(common.HexToHash("0x09"), common.HexToHash("0xff09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h))
-
- it, _ := snaps.StorageIterator(common.HexToHash("0xff09"), common.HexToHash("0xaa"), common.Hash{}, false)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a))
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b))
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c))
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x05"), common.HexToHash("0xff05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d))
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x06"), common.HexToHash("0xff06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e))
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x07"), common.HexToHash("0xff07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e))
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x08"), common.HexToHash("0xff08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g))
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x09"), common.HexToHash("0xff09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h))
+
+ it, _ := snaps.StorageIterator(common.HexToHash("0xff09"), common.HexToHash("0xaa"), common.Hash{})
head := snaps.Snapshot(common.HexToHash("0xff09"))
for it.Next() {
hash := it.Hash()
@@ -474,7 +474,7 @@ func TestStorageIteratorTraversalValues(t *testing.T) {
}
}
- it, _ = snaps.StorageIterator(common.HexToHash("0xff09"), common.HexToHash("0xaa"), common.Hash{}, false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff09"), common.HexToHash("0xaa"), common.Hash{})
for it.Next() {
hash := it.Hash()
want, err := head.Storage(common.HexToHash("0xaa"), hash)
@@ -503,7 +503,7 @@ func TestAccountIteratorLargeTraversal(t *testing.T) {
// Build up a large stack of snapshots
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
for i := 1; i < 128; i++ {
- snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0xff%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0xff%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
}
// Iterate the entire stack and ensure everything is hit only once
head := snaps.Snapshot(common.HexToHash("0xff80"))
@@ -543,13 +543,13 @@ func TestAccountIteratorFlattening(t *testing.T) {
// Create an empty base layer and a snapshot tree out of it
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
// Create a stack of diffs on top
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil,
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
// Create an iterator and flatten the data from underneath it
@@ -568,13 +568,13 @@ func TestAccountIteratorFlattening(t *testing.T) {
func TestAccountIteratorSeek(t *testing.T) {
// Create a snapshot stack with some initial data
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil,
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
// Account set is now
@@ -623,13 +623,13 @@ func TestStorageIteratorSeek(t *testing.T) {
// Create a snapshot stack with some initial data
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
// Stack three diff layers on top with various overlaps
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil))
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil,
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil))
// Account set is now
@@ -637,35 +637,35 @@ func TestStorageIteratorSeek(t *testing.T) {
// 03: 01, 02, 03, 05 (, 05), 06
// 04: 01(, 01), 02, 03, 05(, 05, 05), 06, 08
// Construct various iterators and ensure their traversal is correct
- it, _ := snaps.StorageIterator(common.HexToHash("0xff02"), common.HexToHash("0xaa"), common.HexToHash("0x01"), false)
+ it, _ := snaps.StorageIterator(common.HexToHash("0xff02"), common.HexToHash("0xaa"), common.HexToHash("0x01"))
defer it.Release()
verifyIterator(t, 3, it, verifyStorage) // expected: 01, 03, 05
- it, _ = snaps.StorageIterator(common.HexToHash("0xff02"), common.HexToHash("0xaa"), common.HexToHash("0x02"), false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff02"), common.HexToHash("0xaa"), common.HexToHash("0x02"))
defer it.Release()
verifyIterator(t, 2, it, verifyStorage) // expected: 03, 05
- it, _ = snaps.StorageIterator(common.HexToHash("0xff02"), common.HexToHash("0xaa"), common.HexToHash("0x5"), false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff02"), common.HexToHash("0xaa"), common.HexToHash("0x5"))
defer it.Release()
verifyIterator(t, 1, it, verifyStorage) // expected: 05
- it, _ = snaps.StorageIterator(common.HexToHash("0xff02"), common.HexToHash("0xaa"), common.HexToHash("0x6"), false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff02"), common.HexToHash("0xaa"), common.HexToHash("0x6"))
defer it.Release()
verifyIterator(t, 0, it, verifyStorage) // expected: nothing
- it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.HexToHash("0x01"), false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.HexToHash("0x01"))
defer it.Release()
verifyIterator(t, 6, it, verifyStorage) // expected: 01, 02, 03, 05, 06, 08
- it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.HexToHash("0x05"), false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.HexToHash("0x05"))
defer it.Release()
verifyIterator(t, 3, it, verifyStorage) // expected: 05, 06, 08
- it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.HexToHash("0x08"), false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.HexToHash("0x08"))
defer it.Release()
verifyIterator(t, 1, it, verifyStorage) // expected: 08
- it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.HexToHash("0x09"), false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.HexToHash("0x09"))
defer it.Release()
verifyIterator(t, 0, it, verifyStorage) // expected: nothing
}
@@ -677,17 +677,17 @@ func TestAccountIteratorDeletions(t *testing.T) {
// Create an empty base layer and a snapshot tree out of it
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
// Stack three diff layers on top with various overlaps
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"),
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"),
nil, randomAccountSet("0x11", "0x22", "0x33"), nil)
deleted := common.HexToHash("0x22")
destructed := map[common.Hash]struct{}{
deleted: {},
}
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"),
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"),
destructed, randomAccountSet("0x11", "0x33"), nil)
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"),
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"),
nil, randomAccountSet("0x33", "0x44", "0x55"), nil)
// The output should be 11,33,44,55
@@ -714,19 +714,19 @@ func TestStorageIteratorDeletions(t *testing.T) {
// Create an empty base layer and a snapshot tree out of it
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
// Stack three diff layers on top with various overlaps
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}))
// The output should be 02,04,05,06
- it, _ := snaps.StorageIterator(common.HexToHash("0xff03"), common.HexToHash("0xaa"), common.Hash{}, false)
+ it, _ := snaps.StorageIterator(common.HexToHash("0xff03"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 4, it, verifyStorage)
it.Release()
// The output should be 04,05,06
- it, _ = snaps.StorageIterator(common.HexToHash("0xff03"), common.HexToHash("0xaa"), common.HexToHash("0x03"), false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff03"), common.HexToHash("0xaa"), common.HexToHash("0x03"))
verifyIterator(t, 3, it, verifyStorage)
it.Release()
@@ -734,24 +734,24 @@ func TestStorageIteratorDeletions(t *testing.T) {
destructed := map[common.Hash]struct{}{
common.HexToHash("0xaa"): {},
}
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), destructed, nil, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), destructed, nil, nil)
- it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.Hash{}, false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 0, it, verifyStorage)
it.Release()
// Re-insert the slots of the same account
- snaps.Update(common.HexToHash("0x05"), common.HexToHash("0xff05"), common.HexToHash("0x04"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x05"), common.HexToHash("0xff05"), common.HexToHash("0x04"), nil,
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil))
// The output should be 07,08,09
- it, _ = snaps.StorageIterator(common.HexToHash("0xff05"), common.HexToHash("0xaa"), common.Hash{}, false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff05"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 3, it, verifyStorage)
it.Release()
// Destruct the whole storage but re-create the account in the same layer
- snaps.Update(common.HexToHash("0x06"), common.HexToHash("0xff06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil))
- it, _ = snaps.StorageIterator(common.HexToHash("0xff06"), common.HexToHash("0xaa"), common.Hash{}, false)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x06"), common.HexToHash("0xff06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil))
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff06"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
it.Release()
@@ -783,7 +783,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) {
// Build up a large stack of snapshots
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
for i := 1; i <= 100; i++ {
- snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0xff%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0xff%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
}
// We call this once before the benchmark, so the creation of
// sorted accountlists are not included in the results.
@@ -869,9 +869,9 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
}
// Build up a large stack of snapshots
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil)
for i := 2; i <= 100; i++ {
- snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0xff%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0xff%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil)
}
// We call this once before the benchmark, so the creation of
// sorted accountlists are not included in the results.
diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go
index 374de3b71d..f793ffb832 100644
--- a/core/state/snapshot/journal.go
+++ b/core/state/snapshot/journal.go
@@ -33,11 +33,11 @@ import (
"time"
"github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/triedb"
)
// journalGenerator is a disk layer entry containing the generator progress marker.
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index a60e28eacf..b025498e0b 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -35,12 +35,13 @@ import (
"time"
"github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
+ ethsnapshot "github.com/ava-labs/libevm/core/state/snapshot"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/libevm/stateconf"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/triedb"
)
const (
@@ -118,28 +119,7 @@ var (
)
// Snapshot represents the functionality supported by a snapshot storage layer.
-type Snapshot interface {
- // Root returns the root hash for which this snapshot was made.
- Root() common.Hash
-
- // Account directly retrieves the account associated with a particular hash in
- // the snapshot slim data format.
- Account(hash common.Hash) (*types.SlimAccount, error)
-
- // AccountRLP directly retrieves the account RLP associated with a particular
- // hash in the snapshot slim data format.
- AccountRLP(hash common.Hash) ([]byte, error)
-
- // Storage directly retrieves the storage data associated with a particular hash,
- // within a particular account.
- Storage(accountHash, storageHash common.Hash) ([]byte, error)
-
- // AccountIterator creates an account iterator over the account trie given by the provided root hash.
- AccountIterator(seek common.Hash) AccountIterator
-
- // StorageIterator creates a storage iterator over the storage trie given by the provided root hash.
- StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool)
-}
+type Snapshot = ethsnapshot.Snapshot
// snapshot is the internal version of the snapshot data layer that supports some
// additional methods compared to the public API.
@@ -164,6 +144,12 @@ type snapshot interface {
// Stale return whether this layer has become stale (was flattened across) or
// if it's still live.
Stale() bool
+
+ // AccountIterator creates an account iterator over an arbitrary layer.
+ AccountIterator(seek common.Hash) AccountIterator
+
+ // StorageIterator creates a storage iterator over an arbitrary layer.
+ StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool)
}
// Config includes the configurations for snapshots.
@@ -321,9 +307,44 @@ func (t *Tree) Snapshots(blockHash common.Hash, limits int, nodisk bool) []Snaps
return ret
}
+type blockHashes struct {
+ blockHash common.Hash
+ parentBlockHash common.Hash
+}
+
+func WithBlockHashes(blockHash, parentBlockHash common.Hash) stateconf.SnapshotUpdateOption {
+ return stateconf.WithUpdatePayload(blockHashes{blockHash, parentBlockHash})
+}
+
// Update adds a new snapshot into the tree, if that can be linked to an existing
// old parent. It is disallowed to insert a disk layer (the origin of all).
-func (t *Tree) Update(blockHash, blockRoot, parentBlockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
+func (t *Tree) Update(
+ blockRoot common.Hash,
+ parentRoot common.Hash,
+ destructs map[common.Hash]struct{},
+ accounts map[common.Hash][]byte,
+ storage map[common.Hash]map[common.Hash][]byte,
+ opts ...stateconf.SnapshotUpdateOption,
+) error {
+ if len(opts) == 0 {
+ return fmt.Errorf("missing block hashes")
+ }
+
+ payload := stateconf.ExtractUpdatePayload(opts[0])
+ p, ok := payload.(blockHashes)
+ if !ok {
+ return fmt.Errorf("invalid block hashes payload type: %T", payload)
+ }
+
+ return t.UpdateWithBlockHashes(p.blockHash, blockRoot, p.parentBlockHash, destructs, accounts, storage)
+}
+
+func (t *Tree) UpdateWithBlockHashes(
+ blockHash, blockRoot, parentBlockHash common.Hash,
+ destructs map[common.Hash]struct{},
+ accounts map[common.Hash][]byte,
+ storage map[common.Hash]map[common.Hash][]byte,
+) error {
t.lock.Lock()
defer t.lock.Unlock()
@@ -381,6 +402,10 @@ func (t *Tree) verifyIntegrity(base *diskLayer, waitBuild bool) error {
return nil
}
+func (t *Tree) Cap(root common.Hash, layers int) error {
+ return nil // No-op as this code uses Flatten on block accept instead
+}
+
// Flatten flattens the snapshot for [blockHash] into its parent. if its
// parent is not a disk layer, Flatten will return an error.
// Note: a blockHash is used instead of a state root so that the exact state
@@ -823,7 +848,11 @@ func (t *Tree) AccountIterator(root common.Hash, seek common.Hash, force bool) (
// account. The iterator will be move to the specific start position. When [force]
// is true, a new account iterator is created without acquiring the [snapTree]
// lock and without confirming that the snapshot on the disk layer is fully generated.
-func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek common.Hash, force bool) (StorageIterator, error) {
+func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
+ return t.StorageIteratorWithForce(root, account, seek, false)
+}
+
+func (t *Tree) StorageIteratorWithForce(root common.Hash, account common.Hash, seek common.Hash, force bool) (StorageIterator, error) {
if !force {
ok, err := t.generating()
if err != nil {
@@ -854,7 +883,7 @@ func (t *Tree) verify(root common.Hash, force bool) error {
defer acctIt.Release()
got, err := generateTrieRoot(nil, "", acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
- storageIt, err := t.StorageIterator(root, accountHash, common.Hash{}, force)
+ storageIt, err := t.StorageIteratorWithForce(root, accountHash, common.Hash{}, force)
if err != nil {
return common.Hash{}, err
}
diff --git a/core/state/snapshot/snapshot_ext.go b/core/state/snapshot/snapshot_ext.go
index 829e242d39..8b73f83fcc 100644
--- a/core/state/snapshot/snapshot_ext.go
+++ b/core/state/snapshot/snapshot_ext.go
@@ -23,9 +23,19 @@ func (t *Tree) DiskStorageIterator(account common.Hash, seek common.Hash) Storag
return it
}
+type SnapshotIterable interface {
+ Snapshot
+
+ // AccountIterator creates an account iterator over an arbitrary layer.
+ AccountIterator(seek common.Hash) AccountIterator
+
+ // StorageIterator creates a storage iterator over an arbitrary layer.
+ StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool)
+}
+
// NewDiskLayer creates a diskLayer for direct access to the contents of the on-disk
// snapshot. Does not perform any validation.
-func NewDiskLayer(diskdb ethdb.KeyValueStore) Snapshot {
+func NewDiskLayer(diskdb ethdb.KeyValueStore) SnapshotIterable {
return &diskLayer{
diskdb: diskdb,
created: time.Now(),
diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go
index 45c502f96b..cc98830e13 100644
--- a/core/state/snapshot/snapshot_test.go
+++ b/core/state/snapshot/snapshot_test.go
@@ -106,7 +106,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
accounts := map[common.Hash][]byte{
common.HexToHash("0xa1"): randomAccount(),
}
- if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err)
}
if n := snaps.NumStateLayers(); n != 2 {
@@ -147,10 +147,10 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
accounts := map[common.Hash][]byte{
common.HexToHash("0xa1"): randomAccount(),
}
- if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err)
}
- if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err)
}
if n := snaps.NumBlockLayers(); n != 3 {
@@ -196,13 +196,13 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) {
accounts := map[common.Hash][]byte{
common.HexToHash("0xa1"): randomAccount(),
}
- if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err)
}
- if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err)
}
- if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil, accounts, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil, accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err)
}
if n := snaps.NumStateLayers(); n != 4 {
@@ -244,12 +244,12 @@ func TestPostFlattenBasicDataAccess(t *testing.T) {
// Create a starting base layer and a snapshot tree out of it
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
// The lowest difflayer
- snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0xffa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil)
- snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xffa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil)
- snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xffb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0xa1"), common.HexToHash("0xffa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0xa2"), common.HexToHash("0xffa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0xb2"), common.HexToHash("0xffb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil)
- snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xffa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil)
- snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xffb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0xa3"), common.HexToHash("0xffa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0xb3"), common.HexToHash("0xffb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil)
// checkExist verifies if an account exists in a snapshot
checkExist := func(layer Snapshot, key string) error {
@@ -434,10 +434,10 @@ func TestTreeFlattenDoesNotDropPendingLayers(t *testing.T) {
diffBlockAHash := common.Hash{0xee, 0xee, byte(i)}
diffBlockBHash := common.Hash{0xdd, 0xdd, byte(i)}
diffBlockRoot := common.Hash{0xff, 0xff, byte(i)}
- if err := snaps.Update(diffBlockAHash, diffBlockRoot, parentAHash, nil, accounts, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockAHash, diffBlockRoot, parentAHash, nil, accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err)
}
- if err := snaps.Update(diffBlockBHash, diffBlockRoot, parentBHash, nil, accounts, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockBHash, diffBlockRoot, parentBHash, nil, accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err)
}
@@ -509,7 +509,7 @@ func TestStaleOriginLayer(t *testing.T) {
}
// Create diff layer A containing account 0xa1
- if err := snaps.Update(diffBlockHashA, diffRootA, baseBlockHash, nil, accountsA, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockHashA, diffRootA, baseBlockHash, nil, accountsA, nil); err != nil {
t.Errorf("failed to create diff layer A: %v", err)
}
// Flatten account 0xa1 to disk
@@ -519,12 +519,12 @@ func TestStaleOriginLayer(t *testing.T) {
}
// Create diff layer B containing account 0xa2
// The bloom filter should contain only 0xa2.
- if err := snaps.Update(diffBlockHashB, diffRootB, diffBlockHashA, nil, accountsB, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockHashB, diffRootB, diffBlockHashA, nil, accountsB, nil); err != nil {
t.Errorf("failed to create diff layer B: %v", err)
}
// Create diff layer C containing account 0xa3
// The bloom filter should contain 0xa2 and 0xa3
- if err := snaps.Update(diffBlockHashC, diffRootC, diffBlockHashB, nil, accountsC, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockHashC, diffRootC, diffBlockHashB, nil, accountsC, nil); err != nil {
t.Errorf("failed to create diff layer C: %v", err)
}
@@ -591,16 +591,16 @@ func TestRebloomOnFlatten(t *testing.T) {
}
// Build the tree
- if err := snaps.Update(diffBlockHashA, diffRootA, baseBlockHash, nil, accountsA, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockHashA, diffRootA, baseBlockHash, nil, accountsA, nil); err != nil {
t.Errorf("failed to create diff layer A: %v", err)
}
- if err := snaps.Update(diffBlockHashB, diffRootB, diffBlockHashA, nil, accountsB, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockHashB, diffRootB, diffBlockHashA, nil, accountsB, nil); err != nil {
t.Errorf("failed to create diff layer B: %v", err)
}
- if err := snaps.Update(diffBlockHashC, diffRootC, diffBlockHashB, nil, accountsC, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockHashC, diffRootC, diffBlockHashB, nil, accountsC, nil); err != nil {
t.Errorf("failed to create diff layer C: %v", err)
}
- if err := snaps.Update(diffBlockHashD, diffRootD, diffBlockHashB, nil, accountsD, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockHashD, diffRootD, diffBlockHashB, nil, accountsD, nil); err != nil {
t.Errorf("failed to create diff layer D: %v", err)
}
@@ -687,9 +687,9 @@ func TestReadStateDuringFlattening(t *testing.T) {
snaps := NewTestTree(rawdb.NewMemoryDatabase(), baseBlockHash, baseRoot)
// 4 layers in total, 3 diff layers and 1 disk layers
- snaps.Update(diffBlockHashA, diffRootA, baseBlockHash, nil, setAccount("0xa1"), nil)
- snaps.Update(diffBlockHashB, diffRootB, diffBlockHashA, nil, setAccount("0xa2"), nil)
- snaps.Update(diffBlockHashC, diffRootC, diffBlockHashB, nil, setAccount("0xa3"), nil)
+ snaps.UpdateWithBlockHashes(diffBlockHashA, diffRootA, baseBlockHash, nil, setAccount("0xa1"), nil)
+ snaps.UpdateWithBlockHashes(diffBlockHashB, diffRootB, diffBlockHashA, nil, setAccount("0xa2"), nil)
+ snaps.UpdateWithBlockHashes(diffBlockHashC, diffRootC, diffBlockHashB, nil, setAccount("0xa3"), nil)
// Obtain the topmost snapshot handler for state accessing
snap := snaps.Snapshot(diffRootC)
diff --git a/core/state/state_object.go b/core/state/state_object.go
deleted file mode 100644
index dd3d966157..0000000000
--- a/core/state/state_object.go
+++ /dev/null
@@ -1,621 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package state
-
-import (
- "bytes"
- "fmt"
- "io"
- "math/big"
- "time"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/rlp"
- "github.com/holiman/uint256"
-)
-
-type Code []byte
-
-func (c Code) String() string {
- return string(c) //strings.Join(Disassemble(c), " ")
-}
-
-type Storage map[common.Hash]common.Hash
-
-func (s Storage) String() (str string) {
- for key, value := range s {
- str += fmt.Sprintf("%X : %X\n", key, value)
- }
- return
-}
-
-func (s Storage) Copy() Storage {
- cpy := make(Storage, len(s))
- for key, value := range s {
- cpy[key] = value
- }
- return cpy
-}
-
-// stateObject represents an Ethereum account which is being modified.
-//
-// The usage pattern is as follows:
-// - First you need to obtain a state object.
-// - Account values as well as storages can be accessed and modified through the object.
-// - Finally, call commit to return the changes of storage trie and update account data.
-type stateObject struct {
- db *StateDB
- address common.Address // address of ethereum account
- addrHash common.Hash // hash of ethereum address of the account
- origin *types.StateAccount // Account original data without any change applied, nil means it was not existent
- data types.StateAccount // Account data with all mutations applied in the scope of block
-
- // Write caches.
- trie Trie // storage trie, which becomes non-nil on first access
- code Code // contract bytecode, which gets set when code is loaded
-
- originStorage Storage // Storage cache of original entries to dedup rewrites
- pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block
- dirtyStorage Storage // Storage entries that have been modified in the current transaction execution, reset for every transaction
-
- // Cache flags.
- dirtyCode bool // true if the code was updated
-
- // Flag whether the account was marked as self-destructed. The self-destructed account
- // is still accessible in the scope of same transaction.
- selfDestructed bool
-
- // Flag whether the account was marked as deleted. A self-destructed account
- // or an account that is considered as empty will be marked as deleted at
- // the end of transaction and no longer accessible anymore.
- deleted bool
-
- // Flag whether the object was created in the current transaction
- created bool
-}
-
-// empty returns whether the account is considered empty.
-func (s *stateObject) empty() bool {
- return s.data.Nonce == 0 && s.data.Balance.IsZero() && bytes.Equal(s.data.CodeHash, types.EmptyCodeHash.Bytes()) && !s.data.IsMultiCoin
-}
-
-// newObject creates a state object.
-func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *stateObject {
- var (
- origin = acct
- created = acct == nil // true if the account was not existent
- )
- if acct == nil {
- acct = types.NewEmptyStateAccount()
- }
- return &stateObject{
- db: db,
- address: address,
- addrHash: crypto.Keccak256Hash(address[:]),
- origin: origin,
- data: *acct,
- originStorage: make(Storage),
- pendingStorage: make(Storage),
- dirtyStorage: make(Storage),
- created: created,
- }
-}
-
-// EncodeRLP implements rlp.Encoder.
-func (s *stateObject) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, &s.data)
-}
-
-func (s *stateObject) markSelfdestructed() {
- s.selfDestructed = true
-}
-
-func (s *stateObject) touch() {
- s.db.journal.append(touchChange{
- account: &s.address,
- })
- if s.address == ripemd {
- // Explicitly put it in the dirty-cache, which is otherwise generated from
- // flattened journals.
- s.db.journal.dirty(s.address)
- }
-}
-
-// getTrie returns the associated storage trie. The trie will be opened
-// if it's not loaded previously. An error will be returned if trie can't
-// be loaded.
-func (s *stateObject) getTrie() (Trie, error) {
- if s.trie == nil {
- // Try fetching from prefetcher first
- if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil {
- // When the miner is creating the pending state, there is no prefetcher
- s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root)
- }
- if s.trie == nil {
- tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root, s.db.trie)
- if err != nil {
- return nil, err
- }
- s.trie = tr
- }
- }
- return s.trie, nil
-}
-
-// GetState retrieves a value from the account storage trie.
-func (s *stateObject) GetState(key common.Hash) common.Hash {
- // If we have a dirty value for this state entry, return it
- value, dirty := s.dirtyStorage[key]
- if dirty {
- return value
- }
- // Otherwise return the entry's original value
- return s.GetCommittedState(key)
-}
-
-// GetCommittedState retrieves a value from the committed account storage trie.
-func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
- // If we have a pending write or clean cached, return that
- if value, pending := s.pendingStorage[key]; pending {
- return value
- }
- if value, cached := s.originStorage[key]; cached {
- return value
- }
- // If the object was destructed in *this* block (and potentially resurrected),
- // the storage has been cleared out, and we should *not* consult the previous
- // database about any storage values. The only possible alternatives are:
- // 1) resurrect happened, and new slot values were set -- those should
- // have been handles via pendingStorage above.
- // 2) we don't have new values, and can deliver empty response back
- if _, destructed := s.db.stateObjectsDestruct[s.address]; destructed {
- return common.Hash{}
- }
- // If no live objects are available, attempt to use snapshots
- var (
- enc []byte
- err error
- value common.Hash
- )
- if s.db.snap != nil {
- start := time.Now()
- enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes()))
- if metrics.EnabledExpensive {
- s.db.SnapshotStorageReads += time.Since(start)
- }
- if len(enc) > 0 {
- _, content, _, err := rlp.Split(enc)
- if err != nil {
- s.db.setError(err)
- }
- value.SetBytes(content)
- }
- }
- // If the snapshot is unavailable or reading from it fails, load from the database.
- if s.db.snap == nil || err != nil {
- start := time.Now()
- tr, err := s.getTrie()
- if err != nil {
- s.db.setError(err)
- return common.Hash{}
- }
- val, err := tr.GetStorage(s.address, key.Bytes())
- if metrics.EnabledExpensive {
- s.db.StorageReads += time.Since(start)
- }
- if err != nil {
- s.db.setError(err)
- return common.Hash{}
- }
- value.SetBytes(val)
- }
- s.originStorage[key] = value
- return value
-}
-
-// SetState updates a value in account storage.
-func (s *stateObject) SetState(key, value common.Hash) {
- // If the new value is the same as old, don't set
- prev := s.GetState(key)
- if prev == value {
- return
- }
- // New value is different, update and journal the change
- s.db.journal.append(storageChange{
- account: &s.address,
- key: key,
- prevalue: prev,
- })
- s.setState(key, value)
-}
-
-func (s *stateObject) setState(key, value common.Hash) {
- s.dirtyStorage[key] = value
-}
-
-// finalise moves all dirty storage slots into the pending area to be hashed or
-// committed later. It is invoked at the end of every transaction.
-func (s *stateObject) finalise(prefetch bool) {
- slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage))
- for key, value := range s.dirtyStorage {
- s.pendingStorage[key] = value
- if value != s.originStorage[key] {
- slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure
- }
- }
- if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash {
- s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, slotsToPrefetch)
- }
- if len(s.dirtyStorage) > 0 {
- s.dirtyStorage = make(Storage)
- }
-}
-
-// updateTrie is responsible for persisting cached storage changes into the
-// object's storage trie. In case the storage trie is not yet loaded, this
-// function will load the trie automatically. If any issues arise during the
-// loading or updating of the trie, an error will be returned. Furthermore,
-// this function will return the mutated storage trie, or nil if there is no
-// storage change at all.
-func (s *stateObject) updateTrie() (Trie, error) {
- // Make sure all dirty slots are finalized into the pending storage area
- s.finalise(false)
-
- // Short circuit if nothing changed, don't bother with hashing anything
- if len(s.pendingStorage) == 0 {
- return s.trie, nil
- }
- // Track the amount of time wasted on updating the storage trie
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now())
- }
- // The snapshot storage map for the object
- var (
- storage map[common.Hash][]byte
- origin map[common.Hash][]byte
- )
- tr, err := s.getTrie()
- if err != nil {
- s.db.setError(err)
- return nil, err
- }
- // Insert all the pending storage updates into the trie
- usedStorage := make([][]byte, 0, len(s.pendingStorage))
- for key, value := range s.pendingStorage {
- // Skip noop changes, persist actual changes
- if value == s.originStorage[key] {
- continue
- }
- prev := s.originStorage[key]
- s.originStorage[key] = value
-
- var encoded []byte // rlp-encoded value to be used by the snapshot
- if (value == common.Hash{}) {
- if err := tr.DeleteStorage(s.address, key[:]); err != nil {
- s.db.setError(err)
- return nil, err
- }
- s.db.StorageDeleted += 1
- } else {
- // Encoding []byte cannot fail, ok to ignore the error.
- trimmed := common.TrimLeftZeroes(value[:])
- encoded, _ = rlp.EncodeToBytes(trimmed)
- if err := tr.UpdateStorage(s.address, key[:], trimmed); err != nil {
- s.db.setError(err)
- return nil, err
- }
- s.db.StorageUpdated += 1
- }
- // Cache the mutated storage slots until commit
- if storage == nil {
- if storage = s.db.storages[s.addrHash]; storage == nil {
- storage = make(map[common.Hash][]byte)
- s.db.storages[s.addrHash] = storage
- }
- }
- khash := crypto.HashData(s.db.hasher, key[:])
- storage[khash] = encoded // encoded will be nil if it's deleted
-
- // Cache the original value of mutated storage slots
- if origin == nil {
- if origin = s.db.storagesOrigin[s.address]; origin == nil {
- origin = make(map[common.Hash][]byte)
- s.db.storagesOrigin[s.address] = origin
- }
- }
- // Track the original value of slot only if it's mutated first time
- if _, ok := origin[khash]; !ok {
- if prev == (common.Hash{}) {
- origin[khash] = nil // nil if it was not present previously
- } else {
- // Encoding []byte cannot fail, ok to ignore the error.
- b, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(prev[:]))
- origin[khash] = b
- }
- }
- // Cache the items for preloading
- usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure
- }
- if s.db.prefetcher != nil {
- s.db.prefetcher.used(s.addrHash, s.data.Root, usedStorage)
- }
- s.pendingStorage = make(Storage) // reset pending map
- return tr, nil
-}
-
-// updateRoot flushes all cached storage mutations to trie, recalculating the
-// new storage trie root.
-func (s *stateObject) updateRoot() {
- // Flush cached storage mutations into trie, short circuit if any error
- // is occurred or there is not change in the trie.
- tr, err := s.updateTrie()
- if err != nil || tr == nil {
- return
- }
- // Track the amount of time wasted on hashing the storage trie
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.db.StorageHashes += time.Since(start) }(time.Now())
- }
- s.data.Root = tr.Hash()
-}
-
-// commit obtains a set of dirty storage trie nodes and updates the account data.
-// The returned set can be nil if nothing to commit. This function assumes all
-// storage mutations have already been flushed into trie by updateRoot.
-func (s *stateObject) commit() (*trienode.NodeSet, error) {
- // Short circuit if trie is not even loaded, don't bother with committing anything
- if s.trie == nil {
- s.origin = s.data.Copy()
- return nil, nil
- }
- // Track the amount of time wasted on committing the storage trie
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now())
- }
- // The trie is currently in an open state and could potentially contain
- // cached mutations. Call commit to acquire a set of nodes that have been
- // modified, the set can be nil if nothing to commit.
- root, nodes, err := s.trie.Commit(false)
- if err != nil {
- return nil, err
- }
- s.data.Root = root
-
- // Update original account data after commit
- s.origin = s.data.Copy()
- return nodes, nil
-}
-
-// AddBalance adds amount to s's balance.
-// It is used to add funds to the destination account of a transfer.
-func (s *stateObject) AddBalance(amount *uint256.Int) {
- // EIP161: We must check emptiness for the objects such that the account
- // clearing (0,0,0 objects) can take effect.
- if amount.IsZero() {
- if s.empty() {
- s.touch()
- }
- return
- }
- s.SetBalance(new(uint256.Int).Add(s.Balance(), amount))
-}
-
-// SubBalance removes amount from s's balance.
-// It is used to remove funds from the origin account of a transfer.
-func (s *stateObject) SubBalance(amount *uint256.Int) {
- if amount.IsZero() {
- return
- }
- s.SetBalance(new(uint256.Int).Sub(s.Balance(), amount))
-}
-
-func (s *stateObject) SetBalance(amount *uint256.Int) {
- s.db.journal.append(balanceChange{
- account: &s.address,
- prev: new(uint256.Int).Set(s.data.Balance),
- })
- s.setBalance(amount)
-}
-
-// AddBalanceMultiCoin adds amount of coinID to s's balance.
-// It is used to add multicoin funds to the destination account of a transfer.
-func (s *stateObject) AddBalanceMultiCoin(coinID common.Hash, amount *big.Int, db Database) {
- if amount.Sign() == 0 {
- if s.empty() {
- s.touch()
- }
-
- return
- }
- s.SetBalanceMultiCoin(coinID, new(big.Int).Add(s.BalanceMultiCoin(coinID, db), amount), db)
-}
-
-// SubBalanceMultiCoin removes amount of coinID from s's balance.
-// It is used to remove multicoin funds from the origin account of a transfer.
-func (s *stateObject) SubBalanceMultiCoin(coinID common.Hash, amount *big.Int, db Database) {
- if amount.Sign() == 0 {
- return
- }
- s.SetBalanceMultiCoin(coinID, new(big.Int).Sub(s.BalanceMultiCoin(coinID, db), amount), db)
-}
-
-func (s *stateObject) SetBalanceMultiCoin(coinID common.Hash, amount *big.Int, db Database) {
- s.EnableMultiCoin()
- NormalizeCoinID(&coinID)
- s.SetState(coinID, common.BigToHash(amount))
-}
-
-func (s *stateObject) setBalance(amount *uint256.Int) {
- s.data.Balance = amount
-}
-
-func (s *stateObject) enableMultiCoin() {
- s.data.IsMultiCoin = true
-}
-
-func (s *stateObject) deepCopy(db *StateDB) *stateObject {
- obj := &stateObject{
- db: db,
- address: s.address,
- addrHash: s.addrHash,
- origin: s.origin,
- data: s.data,
- }
- if s.trie != nil {
- obj.trie = db.db.CopyTrie(s.trie)
- }
- obj.code = s.code
- obj.dirtyStorage = s.dirtyStorage.Copy()
- obj.originStorage = s.originStorage.Copy()
- obj.pendingStorage = s.pendingStorage.Copy()
- obj.selfDestructed = s.selfDestructed
- obj.dirtyCode = s.dirtyCode
- obj.deleted = s.deleted
- return obj
-}
-
-//
-// Attribute accessors
-//
-
-// Address returns the address of the contract/account
-func (s *stateObject) Address() common.Address {
- return s.address
-}
-
-// Code returns the contract code associated with this object, if any.
-func (s *stateObject) Code() []byte {
- if s.code != nil {
- return s.code
- }
- if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) {
- return nil
- }
- code, err := s.db.db.ContractCode(s.address, common.BytesToHash(s.CodeHash()))
- if err != nil {
- s.db.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err))
- }
- s.code = code
- return code
-}
-
-// CodeSize returns the size of the contract code associated with this object,
-// or zero if none. This method is an almost mirror of Code, but uses a cache
-// inside the database to avoid loading codes seen recently.
-func (s *stateObject) CodeSize() int {
- if s.code != nil {
- return len(s.code)
- }
- if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) {
- return 0
- }
- size, err := s.db.db.ContractCodeSize(s.address, common.BytesToHash(s.CodeHash()))
- if err != nil {
- s.db.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err))
- }
- return size
-}
-
-func (s *stateObject) SetCode(codeHash common.Hash, code []byte) {
- prevcode := s.Code()
- s.db.journal.append(codeChange{
- account: &s.address,
- prevhash: s.CodeHash(),
- prevcode: prevcode,
- })
- s.setCode(codeHash, code)
-}
-
-func (s *stateObject) setCode(codeHash common.Hash, code []byte) {
- s.code = code
- s.data.CodeHash = codeHash[:]
- s.dirtyCode = true
-}
-
-func (s *stateObject) SetNonce(nonce uint64) {
- s.db.journal.append(nonceChange{
- account: &s.address,
- prev: s.data.Nonce,
- })
- s.setNonce(nonce)
-}
-
-func (s *stateObject) setNonce(nonce uint64) {
- s.data.Nonce = nonce
-}
-
-func (s *stateObject) CodeHash() []byte {
- return s.data.CodeHash
-}
-
-func (s *stateObject) Balance() *uint256.Int {
- return s.data.Balance
-}
-
-// NormalizeCoinID ORs the 0th bit of the first byte in
-// [coinID], which ensures this bit will be 1 and all other
-// bits are left the same.
-// This partitions multicoin storage from normal state storage.
-func NormalizeCoinID(coinID *common.Hash) {
- coinID[0] |= 0x01
-}
-
-// NormalizeStateKey ANDs the 0th bit of the first byte in
-// [key], which ensures this bit will be 0 and all other bits
-// are left the same.
-// This partitions normal state storage from multicoin storage.
-func NormalizeStateKey(key *common.Hash) {
- key[0] &= 0xfe
-}
-
-func (s *stateObject) BalanceMultiCoin(coinID common.Hash, db Database) *big.Int {
- NormalizeCoinID(&coinID)
- return s.GetState(coinID).Big()
-}
-
-func (s *stateObject) EnableMultiCoin() bool {
- if s.data.IsMultiCoin {
- return false
- }
- s.db.journal.append(multiCoinEnable{
- account: &s.address,
- })
- s.enableMultiCoin()
- return true
-}
-
-func (s *stateObject) Nonce() uint64 {
- return s.data.Nonce
-}
-
-func (s *stateObject) Root() common.Hash {
- return s.data.Root
-}
diff --git a/core/state/state_test.go b/core/state/state_test.go
index f5cb0ca363..1b9484514b 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -27,17 +27,12 @@
package state
import (
- "bytes"
- "encoding/json"
"testing"
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/ethdb"
- "github.com/holiman/uint256"
)
type stateEnv struct {
@@ -51,103 +46,6 @@ func newStateEnv() *stateEnv {
return &stateEnv{db: db, state: sdb}
}
-func TestDump(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
- tdb := NewDatabaseWithConfig(db, &triedb.Config{Preimages: true})
- sdb, _ := New(types.EmptyRootHash, tdb, nil)
- s := &stateEnv{db: db, state: sdb}
-
- // generate a few entries
- obj1 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01}))
- obj1.AddBalance(uint256.NewInt(22))
- obj2 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01, 0x02}))
- obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3})
- obj3 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x02}))
- obj3.SetBalance(uint256.NewInt(44))
-
- // write some of them to the trie
- s.state.updateStateObject(obj1)
- s.state.updateStateObject(obj2)
- root, _ := s.state.Commit(0, false, false)
-
- // check that DumpToCollector contains the state objects that are in trie
- s.state, _ = New(root, tdb, nil)
- got := string(s.state.Dump(nil))
- want := `{
- "root": "1d75ab73e172edb7c3b3c0fd004d9896992fb96b617f6f954641d7618159e5e4",
- "accounts": {
- "0x0000000000000000000000000000000000000001": {
- "balance": "22",
- "nonce": 0,
- "root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
- "codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
- "isMultiCoin": false,
- "address": "0x0000000000000000000000000000000000000001",
- "key": "0x1468288056310c82aa4c01a7e12a10f8111a0560e72b700555479031b86c357d"
- },
- "0x0000000000000000000000000000000000000002": {
- "balance": "44",
- "nonce": 0,
- "root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
- "codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
- "isMultiCoin": false,
- "address": "0x0000000000000000000000000000000000000002",
- "key": "0xd52688a8f926c816ca1e079067caba944f158e764817b83fc43594370ca9cf62"
- },
- "0x0000000000000000000000000000000000000102": {
- "balance": "0",
- "nonce": 0,
- "root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
- "codeHash": "0x87874902497a5bb968da31a2998d8f22e949d1ef6214bcdedd8bae24cca4b9e3",
- "code": "0x03030303030303",
- "isMultiCoin": false,
- "address": "0x0000000000000000000000000000000000000102",
- "key": "0xa17eacbc25cda025e81db9c5c62868822c73ce097cee2a63e33a2e41268358a1"
- }
- }
-}`
- if got != want {
- t.Errorf("DumpToCollector mismatch:\ngot: %s\nwant: %s\n", got, want)
- }
-}
-
-func TestIterativeDump(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
- tdb := NewDatabaseWithConfig(db, &triedb.Config{Preimages: true})
- sdb, _ := New(types.EmptyRootHash, tdb, nil)
- s := &stateEnv{db: db, state: sdb}
-
- // generate a few entries
- obj1 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01}))
- obj1.AddBalance(uint256.NewInt(22))
- obj2 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01, 0x02}))
- obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3})
- obj3 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x02}))
- obj3.SetBalance(uint256.NewInt(44))
- obj4 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x00}))
- obj4.AddBalance(uint256.NewInt(1337))
-
- // write some of them to the trie
- s.state.updateStateObject(obj1)
- s.state.updateStateObject(obj2)
- root, _ := s.state.Commit(0, false, false)
- s.state, _ = New(root, tdb, nil)
-
- b := &bytes.Buffer{}
- s.state.IterativeDump(nil, json.NewEncoder(b))
- // check that DumpToCollector contains the state objects that are in trie
- got := b.String()
- want := `{"root":"0x0ffca661efa3b7504ac015083994c94fd7d0d24db60354c717c936afcced762a"}
-{"balance":"22","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000001","key":"0x1468288056310c82aa4c01a7e12a10f8111a0560e72b700555479031b86c357d"}
-{"balance":"1337","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000000","key":"0x5380c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312a"}
-{"balance":"0","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0x87874902497a5bb968da31a2998d8f22e949d1ef6214bcdedd8bae24cca4b9e3","code":"0x03030303030303","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000102","key":"0xa17eacbc25cda025e81db9c5c62868822c73ce097cee2a63e33a2e41268358a1"}
-{"balance":"44","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000002","key":"0xd52688a8f926c816ca1e079067caba944f158e764817b83fc43594370ca9cf62"}
-`
- if got != want {
- t.Errorf("DumpToCollector mismatch:\ngot: %s\nwant: %s\n", got, want)
- }
-}
-
func TestNull(t *testing.T) {
s := newStateEnv()
address := common.HexToAddress("0x823140710bf13990e4500136726d8b55")
@@ -156,7 +54,7 @@ func TestNull(t *testing.T) {
var value common.Hash
s.state.SetState(address, common.Hash{}, value)
- s.state.Commit(0, false, false)
+ s.state.Commit(0, false)
if value := s.state.GetState(address, common.Hash{}); value != (common.Hash{}) {
t.Errorf("expected empty current value, got %x", value)
@@ -205,107 +103,3 @@ func TestSnapshotEmpty(t *testing.T) {
s := newStateEnv()
s.state.RevertToSnapshot(s.state.Snapshot())
}
-
-func TestSnapshot2(t *testing.T) {
- state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
-
- stateobjaddr0 := common.BytesToAddress([]byte("so0"))
- stateobjaddr1 := common.BytesToAddress([]byte("so1"))
- var storageaddr common.Hash
-
- data0 := common.BytesToHash([]byte{17})
- data1 := common.BytesToHash([]byte{18})
-
- state.SetState(stateobjaddr0, storageaddr, data0)
- state.SetState(stateobjaddr1, storageaddr, data1)
-
- // db, trie are already non-empty values
- so0 := state.getStateObject(stateobjaddr0)
- so0.SetBalance(uint256.NewInt(42))
- so0.SetNonce(43)
- so0.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e'}), []byte{'c', 'a', 'f', 'e'})
- so0.selfDestructed = false
- so0.deleted = false
- state.setStateObject(so0)
-
- root, _ := state.Commit(0, false, false)
- state, _ = New(root, state.db, nil)
-
- // and one with deleted == true
- so1 := state.getStateObject(stateobjaddr1)
- so1.SetBalance(uint256.NewInt(52))
- so1.SetNonce(53)
- so1.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e', '2'}), []byte{'c', 'a', 'f', 'e', '2'})
- so1.selfDestructed = true
- so1.deleted = true
- state.setStateObject(so1)
-
- so1 = state.getStateObject(stateobjaddr1)
- if so1 != nil {
- t.Fatalf("deleted object not nil when getting")
- }
-
- snapshot := state.Snapshot()
- state.RevertToSnapshot(snapshot)
-
- so0Restored := state.getStateObject(stateobjaddr0)
- // Update lazily-loaded values before comparing.
- so0Restored.GetState(storageaddr)
- so0Restored.Code()
- // non-deleted is equal (restored)
- compareStateObjects(so0Restored, so0, t)
-
- // deleted should be nil, both before and after restore of state copy
- so1Restored := state.getStateObject(stateobjaddr1)
- if so1Restored != nil {
- t.Fatalf("deleted object not nil after restoring snapshot: %+v", so1Restored)
- }
-}
-
-func compareStateObjects(so0, so1 *stateObject, t *testing.T) {
- if so0.Address() != so1.Address() {
- t.Fatalf("Address mismatch: have %v, want %v", so0.address, so1.address)
- }
- if so0.Balance().Cmp(so1.Balance()) != 0 {
- t.Fatalf("Balance mismatch: have %v, want %v", so0.Balance(), so1.Balance())
- }
- if so0.Nonce() != so1.Nonce() {
- t.Fatalf("Nonce mismatch: have %v, want %v", so0.Nonce(), so1.Nonce())
- }
- if so0.data.Root != so1.data.Root {
- t.Errorf("Root mismatch: have %x, want %x", so0.data.Root[:], so1.data.Root[:])
- }
- if !bytes.Equal(so0.CodeHash(), so1.CodeHash()) {
- t.Fatalf("CodeHash mismatch: have %v, want %v", so0.CodeHash(), so1.CodeHash())
- }
- if !bytes.Equal(so0.code, so1.code) {
- t.Fatalf("Code mismatch: have %v, want %v", so0.code, so1.code)
- }
-
- if len(so1.dirtyStorage) != len(so0.dirtyStorage) {
- t.Errorf("Dirty storage size mismatch: have %d, want %d", len(so1.dirtyStorage), len(so0.dirtyStorage))
- }
- for k, v := range so1.dirtyStorage {
- if so0.dirtyStorage[k] != v {
- t.Errorf("Dirty storage key %x mismatch: have %v, want %v", k, so0.dirtyStorage[k], v)
- }
- }
- for k, v := range so0.dirtyStorage {
- if so1.dirtyStorage[k] != v {
- t.Errorf("Dirty storage key %x mismatch: have %v, want none.", k, v)
- }
- }
- if len(so1.originStorage) != len(so0.originStorage) {
- t.Errorf("Origin storage size mismatch: have %d, want %d", len(so1.originStorage), len(so0.originStorage))
- }
- for k, v := range so1.originStorage {
- if so0.originStorage[k] != v {
- t.Errorf("Origin storage key %x mismatch: have %v, want %v", k, so0.originStorage[k], v)
- }
- }
- for k, v := range so0.originStorage {
- if so1.originStorage[k] != v {
- t.Errorf("Origin storage key %x mismatch: have %v, want none.", k, v)
- }
- }
-}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index e3699be04d..887da56899 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -28,36 +28,15 @@
package state
import (
- "fmt"
"math/big"
- "sort"
- "time"
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/state/snapshot"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
+ "github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/log"
- "github.com/ava-labs/libevm/params"
+ ethstate "github.com/ava-labs/libevm/core/state"
"github.com/holiman/uint256"
)
-const (
- // storageDeleteLimit denotes the highest permissible memory allocation
- // employed for contract storage deletion.
- storageDeleteLimit = 512 * 1024 * 1024
-)
-
-type revision struct {
- id int
- journalIndex int
-}
-
// StateDB structs within the ethereum protocol are used to store anything
// within the merkle trie. StateDBs take care of caching and storing
// nested states. It's the general query interface to retrieve:
@@ -70,940 +49,99 @@ type revision struct {
// must be created with new root and updated database for accessing post-
// commit states.
type StateDB struct {
- db Database
- prefetcher *triePrefetcher
- trie Trie
- hasher crypto.KeccakState
- snap snapshot.Snapshot // Nil if snapshot is not available
-
- // originalRoot is the pre-state root, before any changes were made.
- // It will be updated when the Commit is called.
- originalRoot common.Hash
+ *ethstate.StateDB
- // These maps hold the state changes (including the corresponding
- // original value) that occurred in this **block**.
- accounts map[common.Hash][]byte // The mutated accounts in 'slim RLP' encoding
- storages map[common.Hash]map[common.Hash][]byte // The mutated slots in prefix-zero trimmed rlp format
- accountsOrigin map[common.Address][]byte // The original value of mutated accounts in 'slim RLP' encoding
- storagesOrigin map[common.Address]map[common.Hash][]byte // The original value of mutated slots in prefix-zero trimmed rlp format
-
- // This map holds 'live' objects, which will get modified while processing
- // a state transition.
- stateObjects map[common.Address]*stateObject
- stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
- stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution
- stateObjectsDestruct map[common.Address]*types.StateAccount // State objects destructed in the block along with its previous value
-
- // DB error.
- // State objects are used by the consensus core and VM which are
- // unable to deal with database-level errors. Any error that occurs
- // during a database read is memoized here and will eventually be
- // returned by StateDB.Commit. Notably, this error is also shared
- // by all cached state objects in case the database failure occurs
- // when accessing state of accounts.
- dbErr error
-
- // The refund counter, also used by state transitioning.
- refund uint64
-
- // The tx context and all occurred logs in the scope of transaction.
+ // The tx context
thash common.Hash
txIndex int
- logs map[common.Hash][]*types.Log
- logSize uint
-
- // Preimages occurred seen by VM in the scope of block.
- preimages map[common.Hash][]byte
- // Per-transaction access list
- accessList *accessList
-
- // Transient storage
- transientStorage transientStorage
-
- // Journal of state modifications. This is the backbone of
- // Snapshot and RevertToSnapshot.
- journal *journal
- validRevisions []revision
- nextRevisionId int
-
- // Measurements gathered during execution for debugging purposes
- AccountReads time.Duration
- AccountHashes time.Duration
- AccountUpdates time.Duration
- AccountCommits time.Duration
- StorageReads time.Duration
- StorageHashes time.Duration
- StorageUpdates time.Duration
- StorageCommits time.Duration
- SnapshotAccountReads time.Duration
- SnapshotStorageReads time.Duration
- SnapshotCommits time.Duration
- TrieDBCommits time.Duration
-
- AccountUpdated int
- StorageUpdated int
- AccountDeleted int
- StorageDeleted int
-
- // Testing hooks
- onCommit func(states *triestate.Set) // Hook invoked when commit is performed
+ // Some fields remembered as they are used in tests
+ db Database
+ snaps ethstate.SnapshotTree
}
// New creates a new state from a given trie.
-func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) {
- var snap snapshot.Snapshot
- if snaps != nil {
- snap = snaps.Snapshot(root)
- }
- return NewWithSnapshot(root, db, snap)
-}
-
-// NewWithSnapshot creates a new state from a given trie with the specified [snap]
-// If [snap] doesn't have the same root as [root], then NewWithSnapshot will return
-// an error. If snap is nil, then no snapshot will be used and CommitWithSnapshot
-// cannot be called on the returned StateDB.
-func NewWithSnapshot(root common.Hash, db Database, snap snapshot.Snapshot) (*StateDB, error) {
- tr, err := db.OpenTrie(root)
+func New(root common.Hash, db Database, snaps ethstate.SnapshotTree) (*StateDB, error) {
+ stateDB, err := ethstate.New(root, db, snaps)
if err != nil {
return nil, err
}
- sdb := &StateDB{
- db: db,
- trie: tr,
- originalRoot: root,
- accounts: make(map[common.Hash][]byte),
- storages: make(map[common.Hash]map[common.Hash][]byte),
- accountsOrigin: make(map[common.Address][]byte),
- storagesOrigin: make(map[common.Address]map[common.Hash][]byte),
- stateObjects: make(map[common.Address]*stateObject),
- stateObjectsPending: make(map[common.Address]struct{}),
- stateObjectsDirty: make(map[common.Address]struct{}),
- stateObjectsDestruct: make(map[common.Address]*types.StateAccount),
- logs: make(map[common.Hash][]*types.Log),
- preimages: make(map[common.Hash][]byte),
- journal: newJournal(),
- accessList: newAccessList(),
- transientStorage: newTransientStorage(),
- hasher: crypto.NewKeccakState(),
- }
- if snap != nil {
- if snap.Root() != root {
- return nil, fmt.Errorf("cannot create new statedb for root: %s, using snapshot with mismatched root: %s", root, snap.Root().Hex())
- }
- sdb.snap = snap
- }
- return sdb, nil
-}
-
-// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
-// state trie concurrently while the state is mutated so that when we reach the
-// commit phase, most of the needed data is already hot.
-func (s *StateDB) StartPrefetcher(namespace string, maxConcurrency int) {
- if s.prefetcher != nil {
- s.prefetcher.close()
- s.prefetcher = nil
- }
- if s.snap != nil {
- s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace, maxConcurrency)
- }
+ return &StateDB{
+ StateDB: stateDB,
+ db: db,
+ snaps: snaps,
+ }, nil
}
-// StopPrefetcher terminates a running prefetcher and reports any leftover stats
-// from the gathered metrics.
-func (s *StateDB) StopPrefetcher() {
- if s.prefetcher != nil {
- s.prefetcher.close()
- s.prefetcher = nil
- }
+type workerPool struct {
+ *utils.BoundedWorkers
}
-// setError remembers the first non-nil error it is called with.
-func (s *StateDB) setError(err error) {
- if s.dbErr == nil {
- s.dbErr = err
- }
-}
-
-// Error returns the memorized database failure occurred earlier.
-func (s *StateDB) Error() error {
- return s.dbErr
-}
-
-// AddLog adds a log with the specified parameters to the statedb
-// Note: blockNumber is a required argument because StateDB does not
-// know the current block number.
-func (s *StateDB) AddLog(log *types.Log) {
- s.journal.append(addLogChange{txhash: s.thash})
-
- log.TxHash = s.thash
- log.TxIndex = uint(s.txIndex)
- log.Index = s.logSize
- s.logs[s.thash] = append(s.logs[s.thash], log)
- s.logSize++
-}
-
-// GetLogs returns the logs matching the specified transaction hash, and annotates
-// them with the given blockNumber and blockHash.
-func (s *StateDB) GetLogs(hash common.Hash, blockNumber uint64, blockHash common.Hash) []*types.Log {
- logs := s.logs[hash]
- for _, l := range logs {
- l.BlockNumber = blockNumber
- l.BlockHash = blockHash
- }
- return logs
+func (wp *workerPool) Done() {
+ // Done is guaranteed to only be called after all work is already complete,
+ // so we call Wait for goroutines to finish before returning.
+ wp.BoundedWorkers.Wait()
}
-func (s *StateDB) Logs() []*types.Log {
- var logs []*types.Log
- for _, lgs := range s.logs {
- logs = append(logs, lgs...)
+func withConcurrentWorkers(prefetchers int) ethstate.PrefetcherOption {
+ pool := &workerPool{
+ BoundedWorkers: utils.NewBoundedWorkers(prefetchers),
}
- return logs
+ return ethstate.WithWorkerPools(func() ethstate.WorkerPool { return pool })
}
-// GetLogData returns the underlying topics and data from each log included in the StateDB
-// Test helper function.
-func (s *StateDB) GetLogData() ([][]common.Hash, [][]byte) {
- var logData [][]byte
- var topics [][]common.Hash
- for _, lgs := range s.logs {
- for _, log := range lgs {
- topics = append(topics, log.Topics)
- logData = append(logData, common.CopyBytes(log.Data))
- }
- }
- return topics, logData
-}
-
-// AddPreimage records a SHA3 preimage seen by the VM.
-func (s *StateDB) AddPreimage(hash common.Hash, preimage []byte) {
- if _, ok := s.preimages[hash]; !ok {
- s.journal.append(addPreimageChange{hash: hash})
- pi := make([]byte, len(preimage))
- copy(pi, preimage)
- s.preimages[hash] = pi
- }
-}
-
-// Preimages returns a list of SHA3 preimages that have been submitted.
-func (s *StateDB) Preimages() map[common.Hash][]byte {
- return s.preimages
-}
-
-// AddRefund adds gas to the refund counter
-func (s *StateDB) AddRefund(gas uint64) {
- s.journal.append(refundChange{prev: s.refund})
- s.refund += gas
-}
-
-// SubRefund removes gas from the refund counter.
-// This method will set the refund counter to 0 if the gas is greater than the current refund.
-func (s *StateDB) SubRefund(gas uint64) {
- s.journal.append(refundChange{prev: s.refund})
- if gas > s.refund {
- log.Warn("Setting refund to 0", "currentRefund", s.refund, "gas", gas)
- s.refund = 0
- return
- }
- s.refund -= gas
-}
-
-// Exist reports whether the given account address exists in the state.
-// Notably this also returns true for self-destructed accounts.
-func (s *StateDB) Exist(addr common.Address) bool {
- return s.getStateObject(addr) != nil
-}
-
-// Empty returns whether the state object is either non-existent
-// or empty according to the EIP161 specification (balance = nonce = code = 0)
-func (s *StateDB) Empty(addr common.Address) bool {
- so := s.getStateObject(addr)
- return so == nil || so.empty()
-}
-
-// GetBalance retrieves the balance from the given address or 0 if object not found
-func (s *StateDB) GetBalance(addr common.Address) *uint256.Int {
- stateObject := s.getStateObject(addr)
- if stateObject != nil {
- return stateObject.Balance()
- }
- return common.U2560
+// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
+// state trie concurrently while the state is mutated so that when we reach the
+// commit phase, most of the needed data is already hot.
+func (s *StateDB) StartPrefetcher(namespace string, maxConcurrency int) {
+ s.StateDB.StartPrefetcher(namespace, withConcurrentWorkers(maxConcurrency))
}
// Retrieve the balance from the given address or 0 if object not found
func (s *StateDB) GetBalanceMultiCoin(addr common.Address, coinID common.Hash) *big.Int {
- stateObject := s.getStateObject(addr)
- if stateObject != nil {
- return stateObject.BalanceMultiCoin(coinID, s.db)
- }
- return new(big.Int).Set(common.Big0)
-}
-
-// GetNonce retrieves the nonce from the given address or 0 if object not found
-func (s *StateDB) GetNonce(addr common.Address) uint64 {
- stateObject := s.getStateObject(addr)
- if stateObject != nil {
- return stateObject.Nonce()
- }
-
- return 0
-}
-
-// GetStorageRoot retrieves the storage root from the given address or empty
-// if object not found.
-func (s *StateDB) GetStorageRoot(addr common.Address) common.Hash {
- stateObject := s.getStateObject(addr)
- if stateObject != nil {
- return stateObject.Root()
- }
- return common.Hash{}
-}
-
-// TxIndex returns the current transaction index set by Prepare.
-func (s *StateDB) TxIndex() int {
- return s.txIndex
-}
-
-func (s *StateDB) GetCode(addr common.Address) []byte {
- stateObject := s.getStateObject(addr)
- if stateObject != nil {
- return stateObject.Code()
- }
- return nil
-}
-
-func (s *StateDB) GetCodeSize(addr common.Address) int {
- stateObject := s.getStateObject(addr)
- if stateObject != nil {
- return stateObject.CodeSize()
- }
- return 0
-}
-
-func (s *StateDB) GetCodeHash(addr common.Address) common.Hash {
- stateObject := s.getStateObject(addr)
- if stateObject != nil {
- return common.BytesToHash(stateObject.CodeHash())
- }
- return common.Hash{}
+ NormalizeCoinID(&coinID)
+ return s.StateDB.GetState(addr, coinID).Big()
}
// GetState retrieves a value from the given account's storage trie.
func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
- stateObject := s.getStateObject(addr)
- if stateObject != nil {
- NormalizeStateKey(&hash)
- return stateObject.GetState(hash)
- }
- return common.Hash{}
-}
-
-// GetCommittedState retrieves a value from the given account's committed storage trie.
-func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
- stateObject := s.getStateObject(addr)
- if stateObject != nil {
- return stateObject.GetCommittedState(hash)
- }
- return common.Hash{}
-}
-
-// Database retrieves the low level database supporting the lower level trie ops.
-func (s *StateDB) Database() Database {
- return s.db
-}
-
-func (s *StateDB) HasSelfDestructed(addr common.Address) bool {
- stateObject := s.getStateObject(addr)
- if stateObject != nil {
- return stateObject.selfDestructed
- }
- return false
-}
-
-/*
- * SETTERS
- */
-
-// AddBalance adds amount to the account associated with addr.
-func (s *StateDB) AddBalance(addr common.Address, amount *uint256.Int) {
- stateObject := s.getOrNewStateObject(addr)
- if stateObject != nil {
- stateObject.AddBalance(amount)
- }
-}
-
-// SubBalance subtracts amount from the account associated with addr.
-func (s *StateDB) SubBalance(addr common.Address, amount *uint256.Int) {
- stateObject := s.getOrNewStateObject(addr)
- if stateObject != nil {
- stateObject.SubBalance(amount)
- }
-}
-
-func (s *StateDB) SetBalance(addr common.Address, amount *uint256.Int) {
- stateObject := s.getOrNewStateObject(addr)
- if stateObject != nil {
- stateObject.SetBalance(amount)
- }
+ NormalizeStateKey(&hash)
+ return s.StateDB.GetState(addr, hash)
}
// AddBalance adds amount to the account associated with addr.
func (s *StateDB) AddBalanceMultiCoin(addr common.Address, coinID common.Hash, amount *big.Int) {
- stateObject := s.getOrNewStateObject(addr)
- if stateObject != nil {
- stateObject.AddBalanceMultiCoin(coinID, amount, s.db)
- }
-}
-
-// SubBalance subtracts amount from the account associated with addr.
-func (s *StateDB) SubBalanceMultiCoin(addr common.Address, coinID common.Hash, amount *big.Int) {
- stateObject := s.getOrNewStateObject(addr)
- if stateObject != nil {
- stateObject.SubBalanceMultiCoin(coinID, amount, s.db)
- }
-}
-
-func (s *StateDB) SetBalanceMultiCoin(addr common.Address, coinID common.Hash, amount *big.Int) {
- stateObject := s.getOrNewStateObject(addr)
- if stateObject != nil {
- stateObject.SetBalanceMultiCoin(coinID, amount, s.db)
- }
-}
-
-func (s *StateDB) SetNonce(addr common.Address, nonce uint64) {
- stateObject := s.getOrNewStateObject(addr)
- if stateObject != nil {
- stateObject.SetNonce(nonce)
- }
-}
-
-func (s *StateDB) SetCode(addr common.Address, code []byte) {
- stateObject := s.getOrNewStateObject(addr)
- if stateObject != nil {
- stateObject.SetCode(crypto.Keccak256Hash(code), code)
- }
-}
-
-func (s *StateDB) SetState(addr common.Address, key, value common.Hash) {
- stateObject := s.getOrNewStateObject(addr)
- if stateObject != nil {
- NormalizeStateKey(&key)
- stateObject.SetState(key, value)
- }
-}
-
-// SetStorage replaces the entire storage for the specified account with given
-// storage. This function should only be used for debugging and the mutations
-// must be discarded afterwards.
-func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) {
- // SetStorage needs to wipe existing storage. We achieve this by pretending
- // that the account self-destructed earlier in this block, by flagging
- // it in stateObjectsDestruct. The effect of doing so is that storage lookups
- // will not hit disk, since it is assumed that the disk-data is belonging
- // to a previous incarnation of the object.
- //
- // TODO(rjl493456442) this function should only be supported by 'unwritable'
- // state and all mutations made should all be discarded afterwards.
- if _, ok := s.stateObjectsDestruct[addr]; !ok {
- s.stateObjectsDestruct[addr] = nil
- }
- stateObject := s.getOrNewStateObject(addr)
- for k, v := range storage {
- stateObject.SetState(k, v)
- }
-}
-
-// SelfDestruct marks the given account as selfdestructed.
-// This clears the account balance.
-//
-// The account's state object is still available until the state is committed,
-// getStateObject will return a non-nil account after SelfDestruct.
-func (s *StateDB) SelfDestruct(addr common.Address) {
- stateObject := s.getStateObject(addr)
- if stateObject == nil {
+ if amount.Sign() == 0 {
+ s.AddBalance(addr, new(uint256.Int)) // used to cause touch
return
}
- s.journal.append(selfDestructChange{
- account: &addr,
- prev: stateObject.selfDestructed,
- prevbalance: new(uint256.Int).Set(stateObject.Balance()),
- })
- stateObject.markSelfdestructed()
- stateObject.data.Balance = new(uint256.Int)
-}
-
-func (s *StateDB) Selfdestruct6780(addr common.Address) {
- stateObject := s.getStateObject(addr)
- if stateObject == nil {
- return
- }
-
- if stateObject.created {
- s.SelfDestruct(addr)
+ if !ethstate.GetExtra(s.StateDB, types.IsMultiCoinPayloads, addr) {
+ ethstate.SetExtra(s.StateDB, types.IsMultiCoinPayloads, addr, true)
}
+ newAmount := new(big.Int).Add(s.GetBalanceMultiCoin(addr, coinID), amount)
+ NormalizeCoinID(&coinID)
+ s.StateDB.SetState(addr, coinID, common.BigToHash(newAmount))
}
-// SetTransientState sets transient storage for a given account. It
-// adds the change to the journal so that it can be rolled back
-// to its previous value if there is a revert.
-func (s *StateDB) SetTransientState(addr common.Address, key, value common.Hash) {
- prev := s.GetTransientState(addr, key)
- if prev == value {
+// SubBalance subtracts amount from the account associated with addr.
+func (s *StateDB) SubBalanceMultiCoin(addr common.Address, coinID common.Hash, amount *big.Int) {
+ if amount.Sign() == 0 {
return
}
- s.journal.append(transientStorageChange{
- account: &addr,
- key: key,
- prevalue: prev,
- })
- s.setTransientState(addr, key, value)
-}
-
-// setTransientState is a lower level setter for transient storage. It
-// is called during a revert to prevent modifications to the journal.
-func (s *StateDB) setTransientState(addr common.Address, key, value common.Hash) {
- s.transientStorage.Set(addr, key, value)
-}
-
-// GetTransientState gets transient storage for a given account.
-func (s *StateDB) GetTransientState(addr common.Address, key common.Hash) common.Hash {
- return s.transientStorage.Get(addr, key)
-}
-
-//
-// Setting, updating & deleting state object methods.
-//
-
-// updateStateObject writes the given object to the trie.
-func (s *StateDB) updateStateObject(obj *stateObject) {
- // Track the amount of time wasted on updating the account from the trie
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
- }
- // Encode the account and update the account trie
- addr := obj.Address()
- if err := s.trie.UpdateAccount(addr, &obj.data); err != nil {
- s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
- }
- if obj.dirtyCode {
- s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code)
- }
- // Cache the data until commit. Note, this update mechanism is not symmetric
- // to the deletion, because whereas it is enough to track account updates
- // at commit time, deletions need tracking at transaction boundary level to
- // ensure we capture state clearing.
- s.accounts[obj.addrHash] = types.SlimAccountRLP(obj.data)
-
- // Track the original value of mutated account, nil means it was not present.
- // Skip if it has been tracked (because updateStateObject may be called
- // multiple times in a block).
- if _, ok := s.accountsOrigin[obj.address]; !ok {
- if obj.origin == nil {
- s.accountsOrigin[obj.address] = nil
- } else {
- s.accountsOrigin[obj.address] = types.SlimAccountRLP(*obj.origin)
- }
+ // Note: It's not needed to set the IsMultiCoin (extras) flag here, as this
+ // call would always be preceded by a call to AddBalanceMultiCoin, which would
+ // set the extra flag. Seems we should remove the redundant code.
+ if !ethstate.GetExtra(s.StateDB, types.IsMultiCoinPayloads, addr) {
+ ethstate.SetExtra(s.StateDB, types.IsMultiCoinPayloads, addr, true)
}
+ newAmount := new(big.Int).Sub(s.GetBalanceMultiCoin(addr, coinID), amount)
+ NormalizeCoinID(&coinID)
+ s.StateDB.SetState(addr, coinID, common.BigToHash(newAmount))
}
-// deleteStateObject removes the given object from the state trie.
-func (s *StateDB) deleteStateObject(obj *stateObject) {
- // Track the amount of time wasted on deleting the account from the trie
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
- }
- // Delete the account from the trie
- addr := obj.Address()
- if err := s.trie.DeleteAccount(addr); err != nil {
- s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err))
- }
-}
-
-// getStateObject retrieves a state object given by the address, returning nil if
-// the object is not found or was deleted in this execution context. If you need
-// to differentiate between non-existent/just-deleted, use getDeletedStateObject.
-func (s *StateDB) getStateObject(addr common.Address) *stateObject {
- if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted {
- return obj
- }
- return nil
-}
-
-// getDeletedStateObject is similar to getStateObject, but instead of returning
-// nil for a deleted state object, it returns the actual object with the deleted
-// flag set. This is needed by the state journal to revert to the correct s-
-// destructed object instead of wiping all knowledge about the state object.
-func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
- // Prefer live objects if any is available
- if obj := s.stateObjects[addr]; obj != nil {
- return obj
- }
- // If no live objects are available, attempt to use snapshots
- var data *types.StateAccount
- if s.snap != nil {
- start := time.Now()
- acc, err := s.snap.Account(crypto.HashData(s.hasher, addr.Bytes()))
- if metrics.EnabledExpensive {
- s.SnapshotAccountReads += time.Since(start)
- }
- if err == nil {
- if acc == nil {
- return nil
- }
- data = &types.StateAccount{
- Nonce: acc.Nonce,
- Balance: acc.Balance,
- CodeHash: acc.CodeHash,
- IsMultiCoin: acc.IsMultiCoin,
- Root: common.BytesToHash(acc.Root),
- }
- if len(data.CodeHash) == 0 {
- data.CodeHash = types.EmptyCodeHash.Bytes()
- }
- if data.Root == (common.Hash{}) {
- data.Root = types.EmptyRootHash
- }
- }
- }
- // If snapshot unavailable or reading from it failed, load from the database
- if data == nil {
- start := time.Now()
- var err error
- data, err = s.trie.GetAccount(addr)
- if metrics.EnabledExpensive {
- s.AccountReads += time.Since(start)
- }
- if err != nil {
- s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %w", addr.Bytes(), err))
- return nil
- }
- if data == nil {
- return nil
- }
- }
- // Insert into the live set
- obj := newObject(s, addr, data)
- s.setStateObject(obj)
- return obj
-}
-
-func (s *StateDB) setStateObject(object *stateObject) {
- s.stateObjects[object.Address()] = object
-}
-
-// getOrNewStateObject retrieves a state object or create a new state object if nil.
-func (s *StateDB) getOrNewStateObject(addr common.Address) *stateObject {
- stateObject := s.getStateObject(addr)
- if stateObject == nil {
- stateObject, _ = s.createObject(addr)
- }
- return stateObject
-}
-
-// createObject creates a new state object. If there is an existing account with
-// the given address, it is overwritten and returned as the second return value.
-func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) {
- prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that!
- newobj = newObject(s, addr, nil)
- if prev == nil {
- s.journal.append(createObjectChange{account: &addr})
- } else {
- // The original account should be marked as destructed and all cached
- // account and storage data should be cleared as well. Note, it must
- // be done here, otherwise the destruction event of "original account"
- // will be lost.
- _, prevdestruct := s.stateObjectsDestruct[prev.address]
- if !prevdestruct {
- s.stateObjectsDestruct[prev.address] = prev.origin
- }
- // There may be some cached account/storage data already since IntermediateRoot
- // will be called for each transaction before byzantium fork which will always
- // cache the latest account/storage data.
- prevAccount, ok := s.accountsOrigin[prev.address]
- s.journal.append(resetObjectChange{
- account: &addr,
- prev: prev,
- prevdestruct: prevdestruct,
- prevAccount: s.accounts[prev.addrHash],
- prevStorage: s.storages[prev.addrHash],
- prevAccountOriginExist: ok,
- prevAccountOrigin: prevAccount,
- prevStorageOrigin: s.storagesOrigin[prev.address],
- })
- delete(s.accounts, prev.addrHash)
- delete(s.storages, prev.addrHash)
- delete(s.accountsOrigin, prev.address)
- delete(s.storagesOrigin, prev.address)
- }
- s.setStateObject(newobj)
- if prev != nil && !prev.deleted {
- return newobj, prev
- }
- return newobj, nil
-}
-
-// CreateAccount explicitly creates a state object. If a state object with the address
-// already exists the balance is carried over to the new account.
-//
-// CreateAccount is called during the EVM CREATE operation. The situation might arise that
-// a contract does the following:
-//
-// 1. sends funds to sha(account ++ (nonce + 1))
-// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1)
-//
-// Carrying over the balance ensures that Ether doesn't disappear.
-func (s *StateDB) CreateAccount(addr common.Address) {
- newObj, prev := s.createObject(addr)
- if prev != nil {
- newObj.setBalance(prev.data.Balance)
- }
-}
-
-// Copy creates a deep, independent copy of the state.
-// Snapshots of the copied state cannot be applied to the copy.
-func (s *StateDB) Copy() *StateDB {
- // Copy all the basic fields, initialize the memory ones
- state := &StateDB{
- db: s.db,
- trie: s.db.CopyTrie(s.trie),
- originalRoot: s.originalRoot,
- accounts: make(map[common.Hash][]byte),
- storages: make(map[common.Hash]map[common.Hash][]byte),
- accountsOrigin: make(map[common.Address][]byte),
- storagesOrigin: make(map[common.Address]map[common.Hash][]byte),
- stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)),
- stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)),
- stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)),
- stateObjectsDestruct: make(map[common.Address]*types.StateAccount, len(s.stateObjectsDestruct)),
- refund: s.refund,
- logs: make(map[common.Hash][]*types.Log, len(s.logs)),
- logSize: s.logSize,
- preimages: make(map[common.Hash][]byte, len(s.preimages)),
- journal: newJournal(),
- hasher: crypto.NewKeccakState(),
-
- // In order for the block producer to be able to use and make additions
- // to the snapshot tree, we need to copy that as well. Otherwise, any
- // block mined by ourselves will cause gaps in the tree, and force the
- // miner to operate trie-backed only.
- snap: s.snap,
- }
- // Copy the dirty states, logs, and preimages
- for addr := range s.journal.dirties {
- // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527),
- // and in the Finalise-method, there is a case where an object is in the journal but not
- // in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for
- // nil
- if object, exist := s.stateObjects[addr]; exist {
- // Even though the original object is dirty, we are not copying the journal,
- // so we need to make sure that any side-effect the journal would have caused
- // during a commit (or similar op) is already applied to the copy.
- state.stateObjects[addr] = object.deepCopy(state)
-
- state.stateObjectsDirty[addr] = struct{}{} // Mark the copy dirty to force internal (code/state) commits
- state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits
- }
- }
- // Above, we don't copy the actual journal. This means that if the copy
- // is copied, the loop above will be a no-op, since the copy's journal
- // is empty. Thus, here we iterate over stateObjects, to enable copies
- // of copies.
- for addr := range s.stateObjectsPending {
- if _, exist := state.stateObjects[addr]; !exist {
- state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state)
- }
- state.stateObjectsPending[addr] = struct{}{}
- }
- for addr := range s.stateObjectsDirty {
- if _, exist := state.stateObjects[addr]; !exist {
- state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state)
- }
- state.stateObjectsDirty[addr] = struct{}{}
- }
- // Deep copy the destruction markers.
- for addr, value := range s.stateObjectsDestruct {
- state.stateObjectsDestruct[addr] = value
- }
- // Deep copy the state changes made in the scope of block
- // along with their original values.
- state.accounts = copySet(s.accounts)
- state.storages = copy2DSet(s.storages)
- state.accountsOrigin = copySet(state.accountsOrigin)
- state.storagesOrigin = copy2DSet(state.storagesOrigin)
-
- // Deep copy the logs occurred in the scope of block
- for hash, logs := range s.logs {
- cpy := make([]*types.Log, len(logs))
- for i, l := range logs {
- cpy[i] = new(types.Log)
- *cpy[i] = *l
- }
- state.logs[hash] = cpy
- }
- // Deep copy the preimages occurred in the scope of block
- for hash, preimage := range s.preimages {
- state.preimages[hash] = preimage
- }
- // Do we need to copy the access list and transient storage?
- // In practice: No. At the start of a transaction, these two lists are empty.
- // In practice, we only ever copy state _between_ transactions/blocks, never
- // in the middle of a transaction. However, it doesn't cost us much to copy
- // empty lists, so we do it anyway to not blow up if we ever decide copy them
- // in the middle of a transaction.
- state.accessList = s.accessList.Copy()
- state.transientStorage = s.transientStorage.Copy()
-
- // If there's a prefetcher running, make an inactive copy of it that can
- // only access data but does not actively preload (since the user will not
- // know that they need to explicitly terminate an active copy).
- if s.prefetcher != nil {
- state.prefetcher = s.prefetcher.copy()
- }
- return state
-}
-
-// Snapshot returns an identifier for the current revision of the state.
-func (s *StateDB) Snapshot() int {
- id := s.nextRevisionId
- s.nextRevisionId++
- s.validRevisions = append(s.validRevisions, revision{id, s.journal.length()})
- return id
-}
-
-// RevertToSnapshot reverts all state changes made since the given revision.
-func (s *StateDB) RevertToSnapshot(revid int) {
- // Find the snapshot in the stack of valid snapshots.
- idx := sort.Search(len(s.validRevisions), func(i int) bool {
- return s.validRevisions[i].id >= revid
- })
- if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid {
- panic(fmt.Errorf("revision id %v cannot be reverted", revid))
- }
- snapshot := s.validRevisions[idx].journalIndex
-
- // Replay the journal to undo changes and remove invalidated snapshots
- s.journal.revert(s, snapshot)
- s.validRevisions = s.validRevisions[:idx]
-}
-
-// GetRefund returns the current value of the refund counter.
-func (s *StateDB) GetRefund() uint64 {
- return s.refund
-}
-
-// Finalise finalises the state by removing the destructed objects and clears
-// the journal as well as the refunds. Finalise, however, will not push any updates
-// into the tries just yet. Only IntermediateRoot or Commit will do that.
-func (s *StateDB) Finalise(deleteEmptyObjects bool) {
- addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties))
- for addr := range s.journal.dirties {
- obj, exist := s.stateObjects[addr]
- if !exist {
- // ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2
- // That tx goes out of gas, and although the notion of 'touched' does not exist there, the
- // touch-event will still be recorded in the journal. Since ripeMD is a special snowflake,
- // it will persist in the journal even though the journal is reverted. In this special circumstance,
- // it may exist in `s.journal.dirties` but not in `s.stateObjects`.
- // Thus, we can safely ignore it here
- continue
- }
- if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) {
- obj.deleted = true
-
- // We need to maintain account deletions explicitly (will remain
- // set indefinitely). Note only the first occurred self-destruct
- // event is tracked.
- if _, ok := s.stateObjectsDestruct[obj.address]; !ok {
- s.stateObjectsDestruct[obj.address] = obj.origin
- }
- // Note, we can't do this only at the end of a block because multiple
- // transactions within the same block might self destruct and then
- // resurrect an account; but the snapshotter needs both events.
- delete(s.accounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect)
- delete(s.storages, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect)
- delete(s.accountsOrigin, obj.address) // Clear out any previously updated account data (may be recreated via a resurrect)
- delete(s.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect)
- } else {
- obj.finalise(true) // Prefetch slots in the background
- }
- obj.created = false
- s.stateObjectsPending[addr] = struct{}{}
- s.stateObjectsDirty[addr] = struct{}{}
-
- // At this point, also ship the address off to the precacher. The precacher
- // will start loading tries, and when the change is eventually committed,
- // the commit-phase will be a lot faster
- addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure
- }
- if s.prefetcher != nil && len(addressesToPrefetch) > 0 {
- s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch)
- }
- // Invalidate journal because reverting across transactions is not allowed.
- s.clearJournalAndRefund()
-}
-
-// IntermediateRoot computes the current root hash of the state trie.
-// It is called in between transactions to get the root hash that
-// goes into transaction receipts.
-func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
- // Finalise all the dirty storage states and write them into the tries
- s.Finalise(deleteEmptyObjects)
-
- // If there was a trie prefetcher operating, it gets aborted and irrevocably
- // modified after we start retrieving tries. Remove it from the statedb after
- // this round of use.
- //
- // This is weird pre-byzantium since the first tx runs with a prefetcher and
- // the remainder without, but pre-byzantium even the initial prefetcher is
- // useless, so no sleep lost.
- prefetcher := s.prefetcher
- if s.prefetcher != nil {
- defer func() {
- s.prefetcher.close()
- s.prefetcher = nil
- }()
- }
- // Although naively it makes sense to retrieve the account trie and then do
- // the contract storage and account updates sequentially, that short circuits
- // the account prefetcher. Instead, let's process all the storage updates
- // first, giving the account prefetches just a few more milliseconds of time
- // to pull useful data from disk.
- for addr := range s.stateObjectsPending {
- if obj := s.stateObjects[addr]; !obj.deleted {
- obj.updateRoot()
- }
- }
- // Now we're about to start to write changes to the trie. The trie is so far
- // _untouched_. We can check with the prefetcher, if it can give us a trie
- // which has the same root, but also has some content loaded into it.
- if prefetcher != nil {
- if trie := prefetcher.trie(common.Hash{}, s.originalRoot); trie != nil {
- s.trie = trie
- }
- }
- usedAddrs := make([][]byte, 0, len(s.stateObjectsPending))
- for addr := range s.stateObjectsPending {
- if obj := s.stateObjects[addr]; obj.deleted {
- s.deleteStateObject(obj)
- s.AccountDeleted += 1
- } else {
- s.updateStateObject(obj)
- s.AccountUpdated += 1
- }
- usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure
- }
- if prefetcher != nil {
- prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs)
- }
- if len(s.stateObjectsPending) > 0 {
- s.stateObjectsPending = make(map[common.Address]struct{})
- }
- // Track the amount of time wasted on hashing the account trie
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now())
- }
- return s.trie.Hash()
+func (s *StateDB) SetState(addr common.Address, key, value common.Hash) {
+ NormalizeStateKey(&key)
+ s.StateDB.SetState(addr, key, value)
}
// SetTxContext sets the current transaction hash and index which are
@@ -1012,449 +150,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
func (s *StateDB) SetTxContext(thash common.Hash, ti int) {
s.thash = thash
s.txIndex = ti
-}
-
-func (s *StateDB) clearJournalAndRefund() {
- if len(s.journal.entries) > 0 {
- s.journal = newJournal()
- s.refund = 0
- }
- s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entries
-}
-
-// fastDeleteStorage is the function that efficiently deletes the storage trie
-// of a specific account. It leverages the associated state snapshot for fast
-// storage iteration and constructs trie node deletion markers by creating
-// stack trie with iterated slots.
-func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) {
- iter, _ := s.snap.StorageIterator(addrHash, common.Hash{})
- defer iter.Release()
-
- var (
- size common.StorageSize
- nodes = trienode.NewNodeSet(addrHash)
- slots = make(map[common.Hash][]byte)
- )
- options := trie.NewStackTrieOptions()
- options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
- nodes.AddNode(path, trienode.NewDeleted())
- size += common.StorageSize(len(path))
- })
- stack := trie.NewStackTrie(options)
- for iter.Next() {
- if size > storageDeleteLimit {
- return true, size, nil, nil, nil
- }
- slot := common.CopyBytes(iter.Slot())
- if err := iter.Error(); err != nil { // error might occur after Slot function
- return false, 0, nil, nil, err
- }
- size += common.StorageSize(common.HashLength + len(slot))
- slots[iter.Hash()] = slot
-
- if err := stack.Update(iter.Hash().Bytes(), slot); err != nil {
- return false, 0, nil, nil, err
- }
- }
- if err := iter.Error(); err != nil { // error might occur during iteration
- return false, 0, nil, nil, err
- }
- if stack.Hash() != root {
- return false, 0, nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash())
- }
- return false, size, slots, nodes, nil
-}
-
-// slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage,"
-// employed when the associated state snapshot is not available. It iterates the
-// storage slots along with all internal trie nodes via trie directly.
-func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) {
- tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie)
- if err != nil {
- return false, 0, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
- }
- it, err := tr.NodeIterator(nil)
- if err != nil {
- return false, 0, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err)
- }
- var (
- size common.StorageSize
- nodes = trienode.NewNodeSet(addrHash)
- slots = make(map[common.Hash][]byte)
- )
- for it.Next(true) {
- if size > storageDeleteLimit {
- return true, size, nil, nil, nil
- }
- if it.Leaf() {
- slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob())
- size += common.StorageSize(common.HashLength + len(it.LeafBlob()))
- continue
- }
- if it.Hash() == (common.Hash{}) {
- continue
- }
- size += common.StorageSize(len(it.Path()))
- nodes.AddNode(it.Path(), trienode.NewDeleted())
- }
- if err := it.Error(); err != nil {
- return false, 0, nil, nil, err
- }
- return false, size, slots, nodes, nil
-}
-
-// deleteStorage is designed to delete the storage trie of a designated account.
-// It could potentially be terminated if the storage size is excessively large,
-// potentially leading to an out-of-memory panic. The function will make an attempt
-// to utilize an efficient strategy if the associated state snapshot is reachable;
-// otherwise, it will resort to a less-efficient approach.
-func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) {
- var (
- start = time.Now()
- err error
- aborted bool
- size common.StorageSize
- slots map[common.Hash][]byte
- nodes *trienode.NodeSet
- )
- // The fast approach can be failed if the snapshot is not fully
- // generated, or it's internally corrupted. Fallback to the slow
- // one just in case.
- if s.snap != nil {
- aborted, size, slots, nodes, err = s.fastDeleteStorage(addrHash, root)
- }
- if s.snap == nil || err != nil {
- aborted, size, slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root)
- }
- if err != nil {
- return false, nil, nil, err
- }
- if metrics.EnabledExpensive {
- if aborted {
- slotDeletionSkip.Inc(1)
- }
- n := int64(len(slots))
-
- slotDeletionMaxCount.UpdateIfGt(int64(len(slots)))
- slotDeletionMaxSize.UpdateIfGt(int64(size))
-
- slotDeletionTimer.UpdateSince(start)
- slotDeletionCount.Mark(n)
- slotDeletionSize.Mark(int64(size))
- }
- return aborted, slots, nodes, nil
-}
-
-// handleDestruction processes all destruction markers and deletes the account
-// and associated storage slots if necessary. There are four possible situations
-// here:
-//
-// - the account was not existent and be marked as destructed
-//
-// - the account was not existent and be marked as destructed,
-// however, it's resurrected later in the same block.
-//
-// - the account was existent and be marked as destructed
-//
-// - the account was existent and be marked as destructed,
-// however it's resurrected later in the same block.
-//
-// In case (a), nothing needs be deleted, nil to nil transition can be ignored.
-//
-// In case (b), nothing needs be deleted, nil is used as the original value for
-// newly created account and storages
-//
-// In case (c), **original** account along with its storages should be deleted,
-// with their values be tracked as original value.
-//
-// In case (d), **original** account along with its storages should be deleted,
-// with their values be tracked as original value.
-func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.Address]struct{}, error) {
- // Short circuit if geth is running with hash mode. This procedure can consume
- // considerable time and storage deletion isn't supported in hash mode, thus
- // preemptively avoiding unnecessary expenses.
- incomplete := make(map[common.Address]struct{})
- if s.db.TrieDB().Scheme() == rawdb.HashScheme {
- return incomplete, nil
- }
- for addr, prev := range s.stateObjectsDestruct {
- // The original account was non-existing, and it's marked as destructed
- // in the scope of block. It can be case (a) or (b).
- // - for (a), skip it without doing anything.
- // - for (b), track account's original value as nil. It may overwrite
- // the data cached in s.accountsOrigin set by 'updateStateObject'.
- addrHash := crypto.Keccak256Hash(addr[:])
- if prev == nil {
- if _, ok := s.accounts[addrHash]; ok {
- s.accountsOrigin[addr] = nil // case (b)
- }
- continue
- }
- // It can overwrite the data in s.accountsOrigin set by 'updateStateObject'.
- s.accountsOrigin[addr] = types.SlimAccountRLP(*prev) // case (c) or (d)
-
- // Short circuit if the storage was empty.
- if prev.Root == types.EmptyRootHash {
- continue
- }
- // Remove storage slots belong to the account.
- aborted, slots, set, err := s.deleteStorage(addr, addrHash, prev.Root)
- if err != nil {
- return nil, fmt.Errorf("failed to delete storage, err: %w", err)
- }
- // The storage is too huge to handle, skip it but mark as incomplete.
- // For case (d), the account is resurrected might with a few slots
- // created. In this case, wipe the entire storage state diff because
- // of aborted deletion.
- if aborted {
- incomplete[addr] = struct{}{}
- delete(s.storagesOrigin, addr)
- continue
- }
- if s.storagesOrigin[addr] == nil {
- s.storagesOrigin[addr] = slots
- } else {
- // It can overwrite the data in s.storagesOrigin[addrHash] set by
- // 'object.updateTrie'.
- for key, val := range slots {
- s.storagesOrigin[addr][key] = val
- }
- }
- if err := nodes.Merge(set); err != nil {
- return nil, err
- }
- }
- return incomplete, nil
-}
-
-// Commit writes the state to the underlying in-memory trie database.
-func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, referenceRoot bool) (common.Hash, error) {
- return s.commit(block, deleteEmptyObjects, nil, common.Hash{}, common.Hash{}, referenceRoot)
-}
-
-// CommitWithSnap writes the state to the underlying in-memory trie database and
-// generates a snapshot layer for the newly committed state.
-func (s *StateDB) CommitWithSnap(block uint64, deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) {
- return s.commit(block, deleteEmptyObjects, snaps, blockHash, parentHash, referenceRoot)
-}
-
-// Once the state is committed, tries cached in stateDB (including account
-// trie, storage tries) will no longer be functional. A new state instance
-// must be created with new root and updated database for accessing post-
-// commit states.
-//
-// The associated block number of the state transition is also provided
-// for more chain context.
-func (s *StateDB) commit(block uint64, deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) {
- // Short circuit in case any database failure occurred earlier.
- if s.dbErr != nil {
- return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
- }
- // Finalize any pending changes and merge everything into the tries
- s.IntermediateRoot(deleteEmptyObjects)
-
- // Commit objects to the trie, measuring the elapsed time
- var (
- accountTrieNodesUpdated int
- accountTrieNodesDeleted int
- storageTrieNodesUpdated int
- storageTrieNodesDeleted int
- nodes = trienode.NewMergedNodeSet()
- codeWriter = s.db.DiskDB().NewBatch()
- )
- // Handle all state deletions first
- incomplete, err := s.handleDestruction(nodes)
- if err != nil {
- return common.Hash{}, err
- }
- // Handle all state updates afterwards
- for addr := range s.stateObjectsDirty {
- obj := s.stateObjects[addr]
- if obj.deleted {
- continue
- }
- // Write any contract code associated with the state object
- if obj.code != nil && obj.dirtyCode {
- rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
- obj.dirtyCode = false
- }
- // Write any storage changes in the state object to its storage trie
- set, err := obj.commit()
- if err != nil {
- return common.Hash{}, err
- }
- // Merge the dirty nodes of storage trie into global set. It is possible
- // that the account was destructed and then resurrected in the same block.
- // In this case, the node set is shared by both accounts.
- if set != nil {
- if err := nodes.Merge(set); err != nil {
- return common.Hash{}, err
- }
- updates, deleted := set.Size()
- storageTrieNodesUpdated += updates
- storageTrieNodesDeleted += deleted
- }
- }
- if codeWriter.ValueSize() > 0 {
- if err := codeWriter.Write(); err != nil {
- log.Crit("Failed to commit dirty codes", "error", err)
- }
- }
- // Write the account trie changes, measuring the amount of wasted time
- var start time.Time
- if metrics.EnabledExpensive {
- start = time.Now()
- }
- root, set, err := s.trie.Commit(true)
- if err != nil {
- return common.Hash{}, err
- }
- // Merge the dirty nodes of account trie into global set
- if set != nil {
- if err := nodes.Merge(set); err != nil {
- return common.Hash{}, err
- }
- accountTrieNodesUpdated, accountTrieNodesDeleted = set.Size()
- }
- if metrics.EnabledExpensive {
- s.AccountCommits += time.Since(start)
-
- accountUpdatedMeter.Mark(int64(s.AccountUpdated))
- storageUpdatedMeter.Mark(int64(s.StorageUpdated))
- accountDeletedMeter.Mark(int64(s.AccountDeleted))
- storageDeletedMeter.Mark(int64(s.StorageDeleted))
- accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated))
- accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted))
- storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated))
- storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted))
- s.AccountUpdated, s.AccountDeleted = 0, 0
- s.StorageUpdated, s.StorageDeleted = 0, 0
- }
- // If snapshotting is enabled, update the snapshot tree with this new version
- if snaps != nil {
- start := time.Now()
- if s.snap == nil {
- log.Error(fmt.Sprintf("cannot commit with snaps without a pre-existing snap layer, parentHash: %s, blockHash: %s", parentHash, blockHash))
- }
- if err := snaps.Update(blockHash, root, parentHash, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages); err != nil {
- log.Warn("Failed to update snapshot tree", "to", root, "err", err)
- }
- if metrics.EnabledExpensive {
- s.SnapshotCommits += time.Since(start)
- }
- s.snap = nil
- }
- if root == (common.Hash{}) {
- root = types.EmptyRootHash
- }
- origin := s.originalRoot
- if origin == (common.Hash{}) {
- origin = types.EmptyRootHash
- }
- if root != origin {
- start := time.Now()
- set := triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete)
- if referenceRoot {
- if err := s.db.TrieDB().UpdateAndReferenceRoot(root, origin, block, nodes, set); err != nil {
- return common.Hash{}, err
- }
- } else {
- if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil {
- return common.Hash{}, err
- }
- }
- s.originalRoot = root
- if metrics.EnabledExpensive {
- s.TrieDBCommits += time.Since(start)
- }
- if s.onCommit != nil {
- s.onCommit(set)
- }
- }
- // Clear all internal flags at the end of commit operation.
- s.accounts = make(map[common.Hash][]byte)
- s.storages = make(map[common.Hash]map[common.Hash][]byte)
- s.accountsOrigin = make(map[common.Address][]byte)
- s.storagesOrigin = make(map[common.Address]map[common.Hash][]byte)
- s.stateObjectsDirty = make(map[common.Address]struct{})
- s.stateObjectsDestruct = make(map[common.Address]*types.StateAccount)
- return root, nil
-}
-
-// Prepare handles the preparatory steps for executing a state transition with.
-// This method must be invoked before state transition.
-//
-// Berlin fork (aka ApricotPhase2):
-// - Add sender to access list (2929)
-// - Add destination to access list (2929)
-// - Add precompiles to access list (2929)
-// - Add the contents of the optional tx access list (2930)
-//
-// Potential EIPs:
-// - Reset access list (Berlin/ApricotPhase2)
-// - Add coinbase to access list (EIP-3651/Durango)
-// - Reset transient storage (EIP-1153)
-func (s *StateDB) Prepare(rules params.Rules, sender, coinbase common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) {
- if rules.IsBerlin {
- // Clear out any leftover from previous executions
- al := newAccessList()
- s.accessList = al
-
- al.AddAddress(sender)
- if dst != nil {
- al.AddAddress(*dst)
- // If it's a create-tx, the destination will be added inside evm.create
- }
- for _, addr := range precompiles {
- al.AddAddress(addr)
- }
- for _, el := range list {
- al.AddAddress(el.Address)
- for _, key := range el.StorageKeys {
- al.AddSlot(el.Address, key)
- }
- }
- if rules.IsShanghai { // EIP-3651: warm coinbase
- al.AddAddress(coinbase)
- }
- }
- // Reset transient storage at the beginning of transaction execution
- s.transientStorage = newTransientStorage()
-}
-
-// AddAddressToAccessList adds the given address to the access list
-func (s *StateDB) AddAddressToAccessList(addr common.Address) {
- if s.accessList.AddAddress(addr) {
- s.journal.append(accessListAddAccountChange{&addr})
- }
-}
-
-// AddSlotToAccessList adds the given (address, slot)-tuple to the access list
-func (s *StateDB) AddSlotToAccessList(addr common.Address, slot common.Hash) {
- addrMod, slotMod := s.accessList.AddSlot(addr, slot)
- if addrMod {
- // In practice, this should not happen, since there is no way to enter the
- // scope of 'address' without having the 'address' become already added
- // to the access list (via call-variant, create, etc).
- // Better safe than sorry, though
- s.journal.append(accessListAddAccountChange{&addr})
- }
- if slotMod {
- s.journal.append(accessListAddSlotChange{
- address: &addr,
- slot: &slot,
- })
- }
-}
-
-// AddressInAccessList returns true if the given address is in the access list.
-func (s *StateDB) AddressInAccessList(addr common.Address) bool {
- return s.accessList.ContainsAddress(addr)
-}
-
-// SlotInAccessList returns true if the given (address, slot)-tuple is in the access list.
-func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) {
- return s.accessList.Contains(addr, slot)
+ s.StateDB.SetTxContext(thash, ti)
}
// GetTxHash returns the current tx hash on the StateDB set by SetTxContext.
@@ -1462,37 +158,28 @@ func (s *StateDB) GetTxHash() common.Hash {
return s.thash
}
-// convertAccountSet converts a provided account set from address keyed to hash keyed.
-func (s *StateDB) convertAccountSet(set map[common.Address]*types.StateAccount) map[common.Hash]struct{} {
- ret := make(map[common.Hash]struct{}, len(set))
- for addr := range set {
- obj, exist := s.stateObjects[addr]
- if !exist {
- ret[crypto.Keccak256Hash(addr[:])] = struct{}{}
- } else {
- ret[obj.addrHash] = struct{}{}
- }
+func (s *StateDB) Copy() *StateDB {
+ return &StateDB{
+ StateDB: s.StateDB.Copy(),
+ db: s.db,
+ snaps: s.snaps,
+ thash: s.thash,
+ txIndex: s.txIndex,
}
- return ret
}
-// copySet returns a deep-copied set.
-func copySet[k comparable](set map[k][]byte) map[k][]byte {
- copied := make(map[k][]byte, len(set))
- for key, val := range set {
- copied[key] = common.CopyBytes(val)
- }
- return copied
+// NormalizeCoinID ORs the 0th bit of the first byte in
+// [coinID], which ensures this bit will be 1 and all other
+// bits are left the same.
+// This partitions multicoin storage from normal state storage.
+func NormalizeCoinID(coinID *common.Hash) {
+ coinID[0] |= 0x01
}
-// copy2DSet returns a two-dimensional deep-copied set.
-func copy2DSet[k comparable](set map[k]map[common.Hash][]byte) map[k]map[common.Hash][]byte {
- copied := make(map[k]map[common.Hash][]byte, len(set))
- for addr, subset := range set {
- copied[addr] = make(map[common.Hash][]byte, len(subset))
- for key, val := range subset {
- copied[addr][key] = common.CopyBytes(val)
- }
- }
- return copied
+// NormalizeStateKey ANDs the 0th bit of the first byte in
+// [key], which ensures this bit will be 0 and all other bits
+// are left the same.
+// This partitions normal state storage from multicoin storage.
+func NormalizeStateKey(key *common.Hash) {
+ key[0] &= 0xfe
}
diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go
deleted file mode 100644
index aa5e69fa66..0000000000
--- a/core/state/statedb_fuzz_test.go
+++ /dev/null
@@ -1,402 +0,0 @@
-// (c) 2024, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2023 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see
-
-package state
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
- "math"
- "math/rand"
- "reflect"
- "strings"
- "testing"
- "testing/quick"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/state/snapshot"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/trie/triestate"
- "github.com/ava-labs/coreth/triedb"
- "github.com/ava-labs/coreth/triedb/pathdb"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/rlp"
- "github.com/holiman/uint256"
-)
-
-// A stateTest checks that the state changes are correctly captured. Instances
-// of this test with pseudorandom content are created by Generate.
-//
-// The test works as follows:
-//
-// A list of states are created by applying actions. The state changes between
-// each state instance are tracked and be verified.
-type stateTest struct {
- addrs []common.Address // all account addresses
- actions [][]testAction // modifications to the state, grouped by block
- chunk int // The number of actions per chunk
- err error // failure details are reported through this field
-}
-
-// newStateTestAction creates a random action that changes state.
-func newStateTestAction(addr common.Address, r *rand.Rand, index int) testAction {
- actions := []testAction{
- {
- name: "SetBalance",
- fn: func(a testAction, s *StateDB) {
- s.SetBalance(addr, uint256.NewInt(uint64(a.args[0])))
- },
- args: make([]int64, 1),
- },
- {
- name: "SetNonce",
- fn: func(a testAction, s *StateDB) {
- s.SetNonce(addr, uint64(a.args[0]))
- },
- args: make([]int64, 1),
- },
- {
- name: "SetState",
- fn: func(a testAction, s *StateDB) {
- var key, val common.Hash
- binary.BigEndian.PutUint16(key[:], uint16(a.args[0]))
- binary.BigEndian.PutUint16(val[:], uint16(a.args[1]))
- s.SetState(addr, key, val)
- },
- args: make([]int64, 2),
- },
- {
- name: "SetCode",
- fn: func(a testAction, s *StateDB) {
- code := make([]byte, 16)
- binary.BigEndian.PutUint64(code, uint64(a.args[0]))
- binary.BigEndian.PutUint64(code[8:], uint64(a.args[1]))
- s.SetCode(addr, code)
- },
- args: make([]int64, 2),
- },
- {
- name: "CreateAccount",
- fn: func(a testAction, s *StateDB) {
- s.CreateAccount(addr)
- },
- },
- {
- name: "Selfdestruct",
- fn: func(a testAction, s *StateDB) {
- s.SelfDestruct(addr)
- },
- },
- }
- var nonRandom = index != -1
- if index == -1 {
- index = r.Intn(len(actions))
- }
- action := actions[index]
- var names []string
- if !action.noAddr {
- names = append(names, addr.Hex())
- }
- for i := range action.args {
- if nonRandom {
- action.args[i] = rand.Int63n(10000) + 1 // set balance to non-zero
- } else {
- action.args[i] = rand.Int63n(10000)
- }
- names = append(names, fmt.Sprint(action.args[i]))
- }
- action.name += " " + strings.Join(names, ", ")
- return action
-}
-
-// Generate returns a new snapshot test of the given size. All randomness is
-// derived from r.
-func (*stateTest) Generate(r *rand.Rand, size int) reflect.Value {
- addrs := make([]common.Address, 5)
- for i := range addrs {
- addrs[i][0] = byte(i)
- }
- actions := make([][]testAction, rand.Intn(5)+1)
-
- for i := 0; i < len(actions); i++ {
- actions[i] = make([]testAction, size)
- for j := range actions[i] {
- if j == 0 {
- // Always include a set balance action to make sure
- // the state changes are not empty.
- actions[i][j] = newStateTestAction(common.HexToAddress("0xdeadbeef"), r, 0)
- continue
- }
- actions[i][j] = newStateTestAction(addrs[r.Intn(len(addrs))], r, -1)
- }
- }
- chunk := int(math.Sqrt(float64(size)))
- if size > 0 && chunk == 0 {
- chunk = 1
- }
- return reflect.ValueOf(&stateTest{
- addrs: addrs,
- actions: actions,
- chunk: chunk,
- })
-}
-
-func (test *stateTest) String() string {
- out := new(bytes.Buffer)
- for i, actions := range test.actions {
- fmt.Fprintf(out, "---- block %d ----\n", i)
- for j, action := range actions {
- if j%test.chunk == 0 {
- fmt.Fprintf(out, "---- transaction %d ----\n", j/test.chunk)
- }
- fmt.Fprintf(out, "%4d: %s\n", j%test.chunk, action.name)
- }
- }
- return out.String()
-}
-
-func (test *stateTest) run() bool {
- var (
- roots []common.Hash
- accountList []map[common.Address][]byte
- storageList []map[common.Address]map[common.Hash][]byte
- onCommit = func(states *triestate.Set) {
- accountList = append(accountList, copySet(states.Accounts))
- storageList = append(storageList, copy2DSet(states.Storages))
- }
- disk = rawdb.NewMemoryDatabase()
- tdb = triedb.NewDatabase(disk, &triedb.Config{PathDB: pathdb.Defaults})
- sdb = NewDatabaseWithNodeDB(disk, tdb)
- byzantium = rand.Intn(2) == 0
- )
- defer disk.Close()
- defer tdb.Close()
-
- var snaps *snapshot.Tree
- if rand.Intn(3) == 0 {
- snaps, _ = snapshot.New(snapshot.Config{
- CacheSize: 1,
- NoBuild: false,
- AsyncBuild: false,
- }, disk, tdb, common.Hash{}, types.EmptyRootHash)
- }
- for i, actions := range test.actions {
- root := types.EmptyRootHash
- if i != 0 {
- root = roots[len(roots)-1]
- }
- state, err := New(root, sdb, snaps)
- if err != nil {
- panic(err)
- }
- state.onCommit = onCommit
-
- for i, action := range actions {
- if i%test.chunk == 0 && i != 0 {
- if byzantium {
- state.Finalise(true) // call finalise at the transaction boundary
- } else {
- state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary
- }
- }
- action.fn(action, state)
- }
- if byzantium {
- state.Finalise(true) // call finalise at the transaction boundary
- } else {
- state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary
- }
- nroot, err := state.Commit(0, true, false) // call commit at the block boundary
- if err != nil {
- panic(err)
- }
- if nroot == root {
- return true // filter out non-change state transition
- }
- roots = append(roots, nroot)
- }
- for i := 0; i < len(test.actions); i++ {
- root := types.EmptyRootHash
- if i != 0 {
- root = roots[i-1]
- }
- test.err = test.verify(root, roots[i], tdb, accountList[i], storageList[i])
- if test.err != nil {
- return false
- }
- }
- return true
-}
-
-// verifyAccountCreation this function is called once the state diff says that
-// specific account was not present. A serial of checks will be performed to
-// ensure the state diff is correct, includes:
-//
-// - the account was indeed not present in trie
-// - the account is present in new trie, nil->nil is regarded as invalid
-// - the slots transition is correct
-func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, slots map[common.Hash][]byte) error {
- // Verify account change
- addrHash := crypto.Keccak256Hash(addr.Bytes())
- oBlob, err := otr.Get(addrHash.Bytes())
- if err != nil {
- return err
- }
- nBlob, err := ntr.Get(addrHash.Bytes())
- if err != nil {
- return err
- }
- if len(oBlob) != 0 {
- return fmt.Errorf("unexpected account in old trie, %x", addrHash)
- }
- if len(nBlob) == 0 {
- return fmt.Errorf("missing account in new trie, %x", addrHash)
- }
-
- // Verify storage changes
- var nAcct types.StateAccount
- if err := rlp.DecodeBytes(nBlob, &nAcct); err != nil {
- return err
- }
- // Account has no slot, empty slot set is expected
- if nAcct.Root == types.EmptyRootHash {
- if len(slots) != 0 {
- return fmt.Errorf("unexpected slot changes %x", addrHash)
- }
- return nil
- }
- // Account has slots, ensure all new slots are contained
- st, err := trie.New(trie.StorageTrieID(next, addrHash, nAcct.Root), db)
- if err != nil {
- return err
- }
- for key, val := range slots {
- st.Update(key.Bytes(), val)
- }
- if st.Hash() != types.EmptyRootHash {
- return errors.New("invalid slot changes")
- }
- return nil
-}
-
-// verifyAccountUpdate this function is called once the state diff says that
-// specific account was present. A serial of checks will be performed to
-// ensure the state diff is correct, includes:
-//
-// - the account was indeed present in trie
-// - the account in old trie matches the provided value
-// - the slots transition is correct
-func (test *stateTest) verifyAccountUpdate(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, origin []byte, slots map[common.Hash][]byte) error {
- // Verify account change
- addrHash := crypto.Keccak256Hash(addr.Bytes())
- oBlob, err := otr.Get(addrHash.Bytes())
- if err != nil {
- return err
- }
- nBlob, err := ntr.Get(addrHash.Bytes())
- if err != nil {
- return err
- }
- if len(oBlob) == 0 {
- return fmt.Errorf("missing account in old trie, %x", addrHash)
- }
- full, err := types.FullAccountRLP(origin)
- if err != nil {
- return err
- }
- if !bytes.Equal(full, oBlob) {
- return fmt.Errorf("account value is not matched, %x", addrHash)
- }
-
- // Decode accounts
- var (
- oAcct types.StateAccount
- nAcct types.StateAccount
- nRoot common.Hash
- )
- if err := rlp.DecodeBytes(oBlob, &oAcct); err != nil {
- return err
- }
- if len(nBlob) == 0 {
- nRoot = types.EmptyRootHash
- } else {
- if err := rlp.DecodeBytes(nBlob, &nAcct); err != nil {
- return err
- }
- nRoot = nAcct.Root
- }
-
- // Verify storage
- st, err := trie.New(trie.StorageTrieID(next, addrHash, nRoot), db)
- if err != nil {
- return err
- }
- for key, val := range slots {
- st.Update(key.Bytes(), val)
- }
- if st.Hash() != oAcct.Root {
- return errors.New("invalid slot changes")
- }
- return nil
-}
-
-func (test *stateTest) verify(root common.Hash, next common.Hash, db *triedb.Database, accountsOrigin map[common.Address][]byte, storagesOrigin map[common.Address]map[common.Hash][]byte) error {
- otr, err := trie.New(trie.StateTrieID(root), db)
- if err != nil {
- return err
- }
- ntr, err := trie.New(trie.StateTrieID(next), db)
- if err != nil {
- return err
- }
- for addr, account := range accountsOrigin {
- var err error
- if len(account) == 0 {
- err = test.verifyAccountCreation(next, db, otr, ntr, addr, storagesOrigin[addr])
- } else {
- err = test.verifyAccountUpdate(next, db, otr, ntr, addr, accountsOrigin[addr], storagesOrigin[addr])
- }
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func TestStateChanges(t *testing.T) {
- config := &quick.Config{MaxCount: 1000}
- err := quick.Check((*stateTest).run, config)
- if cerr, ok := err.(*quick.CheckError); ok {
- test := cerr.In[0].(*stateTest)
- t.Errorf("%v:\n%s", test.err, test)
- } else if err != nil {
- t.Error(err)
- }
-}
diff --git a/core/state/statedb_multicoin_test.go b/core/state/statedb_multicoin_test.go
new file mode 100644
index 0000000000..52410edb59
--- /dev/null
+++ b/core/state/statedb_multicoin_test.go
@@ -0,0 +1,173 @@
+// (c) 2019-2024, Ava Labs, Inc. All rights reserved.
+// See the file LICENSE for licensing terms.
+
+package state
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/ava-labs/coreth/core/rawdb"
+ "github.com/ava-labs/coreth/core/state/snapshot"
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/libevm/common"
+ "github.com/ava-labs/libevm/crypto"
+ "github.com/ava-labs/libevm/rlp"
+ "github.com/holiman/uint256"
+)
+
+func TestMultiCoinOperations(t *testing.T) {
+ s := newStateEnv()
+ addr := common.Address{1}
+ assetID := common.Hash{2}
+
+ root, _ := s.state.Commit(0, false)
+ s.state, _ = New(root, s.state.db, s.state.snaps)
+
+ s.state.AddBalance(addr, new(uint256.Int))
+
+ balance := s.state.GetBalanceMultiCoin(addr, assetID)
+ if balance.Cmp(big.NewInt(0)) != 0 {
+ t.Fatal("expected zero multicoin balance")
+ }
+
+ s.state.AddBalanceMultiCoin(addr, assetID, big.NewInt(10))
+ s.state.SubBalanceMultiCoin(addr, assetID, big.NewInt(5))
+ s.state.AddBalanceMultiCoin(addr, assetID, big.NewInt(3))
+
+ balance = s.state.GetBalanceMultiCoin(addr, assetID)
+ if balance.Cmp(big.NewInt(8)) != 0 {
+ t.Fatal("expected multicoin balance to be 8")
+ }
+}
+
+func TestMultiCoinSnapshot(t *testing.T) {
+ db := rawdb.NewMemoryDatabase()
+ sdb := NewDatabase(db)
+
+ // Create empty snapshot.Tree and StateDB
+ root := common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+ // Use the root as both the stateRoot and blockHash for this test.
+ snapTree := snapshot.NewTestTree(db, root, root)
+
+ addr := common.Address{1}
+ assetID1 := common.Hash{1}
+ assetID2 := common.Hash{2}
+
+ var stateDB *StateDB
+ assertBalances := func(regular, multicoin1, multicoin2 int64) {
+ balance := stateDB.GetBalance(addr)
+ if balance.Cmp(uint256.NewInt(uint64(regular))) != 0 {
+ t.Fatal("incorrect non-multicoin balance")
+ }
+ balanceBig := stateDB.GetBalanceMultiCoin(addr, assetID1)
+ if balanceBig.Cmp(big.NewInt(multicoin1)) != 0 {
+ t.Fatal("incorrect multicoin1 balance")
+ }
+ balanceBig = stateDB.GetBalanceMultiCoin(addr, assetID2)
+ if balanceBig.Cmp(big.NewInt(multicoin2)) != 0 {
+ t.Fatal("incorrect multicoin2 balance")
+ }
+ }
+
+ // Create new state
+ stateDB, _ = New(root, sdb, snapTree)
+ assertBalances(0, 0, 0)
+
+ stateDB.AddBalance(addr, uint256.NewInt(10))
+ assertBalances(10, 0, 0)
+
+ // Commit and get the new root
+ root, _ = stateDB.Commit(0, false, snapshot.WithBlockHashes(common.Hash{}, common.Hash{}))
+ assertBalances(10, 0, 0)
+
+ // Create a new state from the latest root, add a multicoin balance, and
+ // commit it to the tree.
+ stateDB, _ = New(root, sdb, snapTree)
+ stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(10))
+ root, _ = stateDB.Commit(0, false, snapshot.WithBlockHashes(common.Hash{}, common.Hash{}))
+ assertBalances(10, 10, 0)
+
+ // Add more layers than the cap and ensure the balances and layers are correct
+ for i := 0; i < 256; i++ {
+ stateDB, _ = New(root, sdb, snapTree)
+ stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(1))
+ stateDB.AddBalanceMultiCoin(addr, assetID2, big.NewInt(2))
+ root, _ = stateDB.Commit(0, false, snapshot.WithBlockHashes(common.Hash{}, common.Hash{}))
+ }
+ assertBalances(10, 266, 512)
+
+ // Do one more add, including the regular balance which is now in the
+ // collapsed snapshot
+ stateDB, _ = New(root, sdb, snapTree)
+ stateDB.AddBalance(addr, uint256.NewInt(1))
+ stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(1))
+ root, _ = stateDB.Commit(0, false, snapshot.WithBlockHashes(common.Hash{}, common.Hash{}))
+ stateDB, _ = New(root, sdb, snapTree)
+ assertBalances(11, 267, 512)
+}
+
+func TestGenerateMultiCoinAccounts(t *testing.T) {
+ var (
+ diskdb = rawdb.NewMemoryDatabase()
+ database = NewDatabase(diskdb)
+
+ addr = common.BytesToAddress([]byte("addr1"))
+ addrHash = crypto.Keccak256Hash(addr[:])
+
+ assetID = common.BytesToHash([]byte("coin1"))
+ assetBalance = big.NewInt(10)
+ )
+
+ stateDB, err := New(common.Hash{}, database, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ stateDB.AddBalanceMultiCoin(addr, assetID, assetBalance)
+ root, err := stateDB.Commit(0, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ triedb := database.TrieDB()
+ if err := triedb.Commit(root, true); err != nil {
+ t.Fatal(err)
+ }
+ // Build snapshot from scratch
+ snapConfig := snapshot.Config{
+ CacheSize: 16,
+ AsyncBuild: false,
+ NoBuild: false,
+ SkipVerify: true,
+ }
+ snaps, err := snapshot.New(snapConfig, diskdb, triedb, common.Hash{}, root)
+ if err != nil {
+ t.Error("Unexpected error while rebuilding snapshot:", err)
+ }
+
+ // Get latest snapshot and make sure it has the correct account and storage
+ snap := snaps.Snapshot(root)
+ snapAccount, err := snap.AccountRLP(addrHash)
+ if err != nil {
+ t.Fatal(err)
+ }
+ account := new(types.StateAccount)
+ if err := rlp.DecodeBytes(snapAccount, account); err != nil {
+ t.Fatal(err)
+ }
+ if !types.IsMultiCoin(account) {
+ t.Fatalf("Expected SnapAccount to return IsMultiCoin: true, found: %v", types.IsMultiCoin(account))
+ }
+
+ NormalizeCoinID(&assetID)
+ assetHash := crypto.Keccak256Hash(assetID.Bytes())
+ storageBytes, err := snap.Storage(addrHash, assetHash)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ actualAssetBalance := new(big.Int).SetBytes(storageBytes)
+ if actualAssetBalance.Cmp(assetBalance) != 0 {
+ t.Fatalf("Expected asset balance: %v, found %v", assetBalance, actualAssetBalance)
+ }
+}
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
deleted file mode 100644
index e5d36fcc26..0000000000
--- a/core/state/statedb_test.go
+++ /dev/null
@@ -1,1355 +0,0 @@
-// (c) 2019-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package state
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
- "math"
- "math/big"
- "math/rand"
- "reflect"
- "strings"
- "sync"
- "testing"
- "testing/quick"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/state/snapshot"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/triedb"
- "github.com/ava-labs/coreth/triedb/hashdb"
- "github.com/ava-labs/coreth/triedb/pathdb"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/rlp"
- "github.com/holiman/uint256"
-)
-
-// Tests that updating a state trie does not leak any database writes prior to
-// actually committing the state.
-func TestUpdateLeaks(t *testing.T) {
- // Create an empty state database
- var (
- db = rawdb.NewMemoryDatabase()
- tdb = triedb.NewDatabase(db, nil)
- )
- state, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(db, tdb), nil)
-
- // Update it with some accounts
- for i := byte(0); i < 255; i++ {
- addr := common.BytesToAddress([]byte{i})
- state.AddBalance(addr, uint256.NewInt(uint64(11*i)))
- state.SetNonce(addr, uint64(42*i))
- if i%2 == 0 {
- state.SetState(addr, common.BytesToHash([]byte{i, i, i}), common.BytesToHash([]byte{i, i, i, i}))
- }
- if i%3 == 0 {
- state.SetCode(addr, []byte{i, i, i, i, i})
- }
- }
-
- root := state.IntermediateRoot(false)
- if err := tdb.Commit(root, false); err != nil {
- t.Errorf("can not commit trie %v to persistent database", root.Hex())
- }
-
- // Ensure that no data was leaked into the database
- it := db.NewIterator(nil, nil)
- for it.Next() {
- t.Errorf("State leaked into database: %x -> %x", it.Key(), it.Value())
- }
- it.Release()
-}
-
-// Tests that no intermediate state of an object is stored into the database,
-// only the one right before the commit.
-func TestIntermediateLeaks(t *testing.T) {
- // Create two state databases, one transitioning to the final state, the other final from the beginning
- transDb := rawdb.NewMemoryDatabase()
- finalDb := rawdb.NewMemoryDatabase()
- transNdb := triedb.NewDatabase(transDb, nil)
- finalNdb := triedb.NewDatabase(finalDb, nil)
- transState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(transDb, transNdb), nil)
- finalState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(finalDb, finalNdb), nil)
-
- modify := func(state *StateDB, addr common.Address, i, tweak byte) {
- state.SetBalance(addr, uint256.NewInt(uint64(11*i)+uint64(tweak)))
- state.SetNonce(addr, uint64(42*i+tweak))
- if i%2 == 0 {
- state.SetState(addr, common.Hash{i, i, i, 0}, common.Hash{})
- state.SetState(addr, common.Hash{i, i, i, tweak}, common.Hash{i, i, i, i, tweak})
- }
- if i%3 == 0 {
- state.SetCode(addr, []byte{i, i, i, i, i, tweak})
- }
- }
-
- // Modify the transient state.
- for i := byte(0); i < 255; i++ {
- modify(transState, common.Address{i}, i, 0)
- }
- // Write modifications to trie.
- transState.IntermediateRoot(false)
-
- // Overwrite all the data with new values in the transient database.
- for i := byte(0); i < 255; i++ {
- modify(transState, common.Address{i}, i, 99)
- modify(finalState, common.Address{i}, i, 99)
- }
-
- // Commit and cross check the databases.
- transRoot, err := transState.Commit(0, false, false)
- if err != nil {
- t.Fatalf("failed to commit transition state: %v", err)
- }
- if err = transNdb.Commit(transRoot, false); err != nil {
- t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
- }
-
- finalRoot, err := finalState.Commit(0, false, false)
- if err != nil {
- t.Fatalf("failed to commit final state: %v", err)
- }
- if err = finalNdb.Commit(finalRoot, false); err != nil {
- t.Errorf("can not commit trie %v to persistent database", finalRoot.Hex())
- }
-
- it := finalDb.NewIterator(nil, nil)
- for it.Next() {
- key, fvalue := it.Key(), it.Value()
- tvalue, err := transDb.Get(key)
- if err != nil {
- t.Errorf("entry missing from the transition database: %x -> %x", key, fvalue)
- }
- if !bytes.Equal(fvalue, tvalue) {
- t.Errorf("value mismatch at key %x: %x in transition database, %x in final database", key, tvalue, fvalue)
- }
- }
- it.Release()
-
- it = transDb.NewIterator(nil, nil)
- for it.Next() {
- key, tvalue := it.Key(), it.Value()
- fvalue, err := finalDb.Get(key)
- if err != nil {
- t.Errorf("extra entry in the transition database: %x -> %x", key, it.Value())
- }
- if !bytes.Equal(fvalue, tvalue) {
- t.Errorf("value mismatch at key %x: %x in transition database, %x in final database", key, tvalue, fvalue)
- }
- }
-}
-
-// TestCopy tests that copying a StateDB object indeed makes the original and
-// the copy independent of each other. This test is a regression test against
-// https://github.com/ethereum/go-ethereum/pull/15549.
-func TestCopy(t *testing.T) {
- // Create a random state test to copy and modify "independently"
- orig, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
-
- for i := byte(0); i < 255; i++ {
- obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i}))
- obj.AddBalance(uint256.NewInt(uint64(i)))
- orig.updateStateObject(obj)
- }
- orig.Finalise(false)
-
- // Copy the state
- copy := orig.Copy()
-
- // Copy the copy state
- ccopy := copy.Copy()
-
- // modify all in memory
- for i := byte(0); i < 255; i++ {
- origObj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i}))
- copyObj := copy.getOrNewStateObject(common.BytesToAddress([]byte{i}))
- ccopyObj := ccopy.getOrNewStateObject(common.BytesToAddress([]byte{i}))
-
- origObj.AddBalance(uint256.NewInt(2 * uint64(i)))
- copyObj.AddBalance(uint256.NewInt(3 * uint64(i)))
- ccopyObj.AddBalance(uint256.NewInt(4 * uint64(i)))
-
- orig.updateStateObject(origObj)
- copy.updateStateObject(copyObj)
- ccopy.updateStateObject(copyObj)
- }
-
- // Finalise the changes on all concurrently
- finalise := func(wg *sync.WaitGroup, db *StateDB) {
- defer wg.Done()
- db.Finalise(true)
- }
-
- var wg sync.WaitGroup
- wg.Add(3)
- go finalise(&wg, orig)
- go finalise(&wg, copy)
- go finalise(&wg, ccopy)
- wg.Wait()
-
- // Verify that the three states have been updated independently
- for i := byte(0); i < 255; i++ {
- origObj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i}))
- copyObj := copy.getOrNewStateObject(common.BytesToAddress([]byte{i}))
- ccopyObj := ccopy.getOrNewStateObject(common.BytesToAddress([]byte{i}))
-
- if want := uint256.NewInt(3 * uint64(i)); origObj.Balance().Cmp(want) != 0 {
- t.Errorf("orig obj %d: balance mismatch: have %v, want %v", i, origObj.Balance(), want)
- }
- if want := uint256.NewInt(4 * uint64(i)); copyObj.Balance().Cmp(want) != 0 {
- t.Errorf("copy obj %d: balance mismatch: have %v, want %v", i, copyObj.Balance(), want)
- }
- if want := uint256.NewInt(5 * uint64(i)); ccopyObj.Balance().Cmp(want) != 0 {
- t.Errorf("copy obj %d: balance mismatch: have %v, want %v", i, ccopyObj.Balance(), want)
- }
- }
-}
-
-func TestSnapshotRandom(t *testing.T) {
- config := &quick.Config{MaxCount: 1000}
- err := quick.Check((*snapshotTest).run, config)
- if cerr, ok := err.(*quick.CheckError); ok {
- test := cerr.In[0].(*snapshotTest)
- t.Errorf("%v:\n%s", test.err, test)
- } else if err != nil {
- t.Error(err)
- }
-}
-
-// A snapshotTest checks that reverting StateDB snapshots properly undoes all changes
-// captured by the snapshot. Instances of this test with pseudorandom content are created
-// by Generate.
-//
-// The test works as follows:
-//
-// A new state is created and all actions are applied to it. Several snapshots are taken
-// in between actions. The test then reverts each snapshot. For each snapshot the actions
-// leading up to it are replayed on a fresh, empty state. The behaviour of all public
-// accessor methods on the reverted state must match the return value of the equivalent
-// methods on the replayed state.
-type snapshotTest struct {
- addrs []common.Address // all account addresses
- actions []testAction // modifications to the state
- snapshots []int // actions indexes at which snapshot is taken
- err error // failure details are reported through this field
-}
-
-type testAction struct {
- name string
- fn func(testAction, *StateDB)
- args []int64
- noAddr bool
-}
-
-// newTestAction creates a random action that changes state.
-func newTestAction(addr common.Address, r *rand.Rand) testAction {
- actions := []testAction{
- {
- name: "SetBalance",
- fn: func(a testAction, s *StateDB) {
- s.SetBalance(addr, uint256.NewInt(uint64(a.args[0])))
- },
- args: make([]int64, 1),
- },
- {
- name: "AddBalance",
- fn: func(a testAction, s *StateDB) {
- s.AddBalance(addr, uint256.NewInt(uint64(a.args[0])))
- },
- args: make([]int64, 1),
- },
- {
- name: "SetNonce",
- fn: func(a testAction, s *StateDB) {
- s.SetNonce(addr, uint64(a.args[0]))
- },
- args: make([]int64, 1),
- },
- {
- name: "SetState",
- fn: func(a testAction, s *StateDB) {
- var key, val common.Hash
- binary.BigEndian.PutUint16(key[:], uint16(a.args[0]))
- binary.BigEndian.PutUint16(val[:], uint16(a.args[1]))
- s.SetState(addr, key, val)
- },
- args: make([]int64, 2),
- },
- {
- name: "SetCode",
- fn: func(a testAction, s *StateDB) {
- code := make([]byte, 16)
- binary.BigEndian.PutUint64(code, uint64(a.args[0]))
- binary.BigEndian.PutUint64(code[8:], uint64(a.args[1]))
- s.SetCode(addr, code)
- },
- args: make([]int64, 2),
- },
- {
- name: "CreateAccount",
- fn: func(a testAction, s *StateDB) {
- s.CreateAccount(addr)
- },
- },
- {
- name: "SelfDestruct",
- fn: func(a testAction, s *StateDB) {
- s.SelfDestruct(addr)
- },
- },
- {
- name: "AddRefund",
- fn: func(a testAction, s *StateDB) {
- s.AddRefund(uint64(a.args[0]))
- },
- args: make([]int64, 1),
- noAddr: true,
- },
- {
- name: "AddLog",
- fn: func(a testAction, s *StateDB) {
- data := make([]byte, 2)
- binary.BigEndian.PutUint16(data, uint16(a.args[0]))
- s.AddLog(&types.Log{Address: addr, Data: data})
- },
- args: make([]int64, 1),
- },
- {
- name: "AddPreimage",
- fn: func(a testAction, s *StateDB) {
- preimage := []byte{1}
- hash := common.BytesToHash(preimage)
- s.AddPreimage(hash, preimage)
- },
- args: make([]int64, 1),
- },
- {
- name: "AddAddressToAccessList",
- fn: func(a testAction, s *StateDB) {
- s.AddAddressToAccessList(addr)
- },
- },
- {
- name: "AddSlotToAccessList",
- fn: func(a testAction, s *StateDB) {
- s.AddSlotToAccessList(addr,
- common.Hash{byte(a.args[0])})
- },
- args: make([]int64, 1),
- },
- {
- name: "SetTransientState",
- fn: func(a testAction, s *StateDB) {
- var key, val common.Hash
- binary.BigEndian.PutUint16(key[:], uint16(a.args[0]))
- binary.BigEndian.PutUint16(val[:], uint16(a.args[1]))
- s.SetTransientState(addr, key, val)
- },
- args: make([]int64, 2),
- },
- }
- action := actions[r.Intn(len(actions))]
- var nameargs []string
- if !action.noAddr {
- nameargs = append(nameargs, addr.Hex())
- }
- for i := range action.args {
- action.args[i] = rand.Int63n(100)
- nameargs = append(nameargs, fmt.Sprint(action.args[i]))
- }
- action.name += strings.Join(nameargs, ", ")
- return action
-}
-
-// Generate returns a new snapshot test of the given size. All randomness is
-// derived from r.
-func (*snapshotTest) Generate(r *rand.Rand, size int) reflect.Value {
- // Generate random actions.
- addrs := make([]common.Address, 50)
- for i := range addrs {
- addrs[i][0] = byte(i)
- }
- actions := make([]testAction, size)
- for i := range actions {
- addr := addrs[r.Intn(len(addrs))]
- actions[i] = newTestAction(addr, r)
- }
- // Generate snapshot indexes.
- nsnapshots := int(math.Sqrt(float64(size)))
- if size > 0 && nsnapshots == 0 {
- nsnapshots = 1
- }
- snapshots := make([]int, nsnapshots)
- snaplen := len(actions) / nsnapshots
- for i := range snapshots {
- // Try to place the snapshots some number of actions apart from each other.
- snapshots[i] = (i * snaplen) + r.Intn(snaplen)
- }
- return reflect.ValueOf(&snapshotTest{addrs, actions, snapshots, nil})
-}
-
-func (test *snapshotTest) String() string {
- out := new(bytes.Buffer)
- sindex := 0
- for i, action := range test.actions {
- if len(test.snapshots) > sindex && i == test.snapshots[sindex] {
- fmt.Fprintf(out, "---- snapshot %d ----\n", sindex)
- sindex++
- }
- fmt.Fprintf(out, "%4d: %s\n", i, action.name)
- }
- return out.String()
-}
-
-func (test *snapshotTest) run() bool {
- // Run all actions and create snapshots.
- var (
- state, _ = New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
- snapshotRevs = make([]int, len(test.snapshots))
- sindex = 0
- checkstates = make([]*StateDB, len(test.snapshots))
- )
- for i, action := range test.actions {
- if len(test.snapshots) > sindex && i == test.snapshots[sindex] {
- snapshotRevs[sindex] = state.Snapshot()
- checkstates[sindex] = state.Copy()
- sindex++
- }
- action.fn(action, state)
- }
- // Revert all snapshots in reverse order. Each revert must yield a state
- // that is equivalent to fresh state with all actions up the snapshot applied.
- for sindex--; sindex >= 0; sindex-- {
- state.RevertToSnapshot(snapshotRevs[sindex])
- if err := test.checkEqual(state, checkstates[sindex]); err != nil {
- test.err = fmt.Errorf("state mismatch after revert to snapshot %d\n%v", sindex, err)
- return false
- }
- }
- return true
-}
-
-func forEachStorage(s *StateDB, addr common.Address, cb func(key, value common.Hash) bool) error {
- so := s.getStateObject(addr)
- if so == nil {
- return nil
- }
- tr, err := so.getTrie()
- if err != nil {
- return err
- }
- trieIt, err := tr.NodeIterator(nil)
- if err != nil {
- return err
- }
- it := trie.NewIterator(trieIt)
-
- for it.Next() {
- key := common.BytesToHash(s.trie.GetKey(it.Key))
- if value, dirty := so.dirtyStorage[key]; dirty {
- if !cb(key, value) {
- return nil
- }
- continue
- }
-
- if len(it.Value) > 0 {
- _, content, _, err := rlp.Split(it.Value)
- if err != nil {
- return err
- }
- if !cb(key, common.BytesToHash(content)) {
- return nil
- }
- }
- }
- return nil
-}
-
-// checkEqual checks that methods of state and checkstate return the same values.
-func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
- for _, addr := range test.addrs {
- var err error
- checkeq := func(op string, a, b interface{}) bool {
- if err == nil && !reflect.DeepEqual(a, b) {
- err = fmt.Errorf("got %s(%s) == %v, want %v", op, addr.Hex(), a, b)
- return false
- }
- return true
- }
- // Check basic accessor methods.
- checkeq("Exist", state.Exist(addr), checkstate.Exist(addr))
- checkeq("HasSelfdestructed", state.HasSelfDestructed(addr), checkstate.HasSelfDestructed(addr))
- checkeq("GetBalance", state.GetBalance(addr), checkstate.GetBalance(addr))
- checkeq("GetNonce", state.GetNonce(addr), checkstate.GetNonce(addr))
- checkeq("GetCode", state.GetCode(addr), checkstate.GetCode(addr))
- checkeq("GetCodeHash", state.GetCodeHash(addr), checkstate.GetCodeHash(addr))
- checkeq("GetCodeSize", state.GetCodeSize(addr), checkstate.GetCodeSize(addr))
- // Check storage.
- if obj := state.getStateObject(addr); obj != nil {
- forEachStorage(state, addr, func(key, value common.Hash) bool {
- return checkeq("GetState("+key.Hex()+")", checkstate.GetState(addr, key), value)
- })
- forEachStorage(checkstate, addr, func(key, value common.Hash) bool {
- return checkeq("GetState("+key.Hex()+")", checkstate.GetState(addr, key), value)
- })
- }
- if err != nil {
- return err
- }
- }
-
- if state.GetRefund() != checkstate.GetRefund() {
- return fmt.Errorf("got GetRefund() == %d, want GetRefund() == %d",
- state.GetRefund(), checkstate.GetRefund())
- }
- if !reflect.DeepEqual(state.GetLogs(common.Hash{}, 0, common.Hash{}), checkstate.GetLogs(common.Hash{}, 0, common.Hash{})) {
- return fmt.Errorf("got GetLogs(common.Hash{}) == %v, want GetLogs(common.Hash{}) == %v",
- state.GetLogs(common.Hash{}, 0, common.Hash{}), checkstate.GetLogs(common.Hash{}, 0, common.Hash{}))
- }
- return nil
-}
-
-func TestTouchDelete(t *testing.T) {
- s := newStateEnv()
- s.state.getOrNewStateObject(common.Address{})
- root, _ := s.state.Commit(0, false, false)
- s.state, _ = NewWithSnapshot(root, s.state.db, s.state.snap)
-
- snapshot := s.state.Snapshot()
- s.state.AddBalance(common.Address{}, new(uint256.Int))
-
- if len(s.state.journal.dirties) != 1 {
- t.Fatal("expected one dirty state object")
- }
- s.state.RevertToSnapshot(snapshot)
- if len(s.state.journal.dirties) != 0 {
- t.Fatal("expected no dirty state object")
- }
-}
-
-// TestCopyOfCopy tests that modified objects are carried over to the copy, and the copy of the copy.
-// See https://github.com/ethereum/go-ethereum/pull/15225#issuecomment-380191512
-func TestCopyOfCopy(t *testing.T) {
- state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
- addr := common.HexToAddress("aaaa")
- state.SetBalance(addr, uint256.NewInt(42))
-
- if got := state.Copy().GetBalance(addr).Uint64(); got != 42 {
- t.Fatalf("1st copy fail, expected 42, got %v", got)
- }
- if got := state.Copy().Copy().GetBalance(addr).Uint64(); got != 42 {
- t.Fatalf("2nd copy fail, expected 42, got %v", got)
- }
-}
-
-// Tests a regression where committing a copy lost some internal meta information,
-// leading to corrupted subsequent copies.
-//
-// See https://github.com/ethereum/go-ethereum/issues/20106.
-func TestCopyCommitCopy(t *testing.T) {
- tdb := NewDatabase(rawdb.NewMemoryDatabase())
- state, _ := New(types.EmptyRootHash, tdb, nil)
-
- // Create an account and check if the retrieved balance is correct
- addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
- skey := common.HexToHash("aaa")
- sval := common.HexToHash("bbb")
-
- state.SetBalance(addr, uint256.NewInt(42)) // Change the account trie
- state.SetCode(addr, []byte("hello")) // Change an external metadata
- state.SetState(addr, skey, sval) // Change the storage trie
-
- if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
- t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
- }
- if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
- t.Fatalf("initial code mismatch: have %x, want %x", code, []byte("hello"))
- }
- if val := state.GetState(addr, skey); val != sval {
- t.Fatalf("initial non-committed storage slot mismatch: have %x, want %x", val, sval)
- }
- if val := state.GetCommittedState(addr, skey); val != (common.Hash{}) {
- t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
- }
- // Copy the non-committed state database and check pre/post commit balance
- copyOne := state.Copy()
- if balance := copyOne.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
- t.Fatalf("first copy pre-commit balance mismatch: have %v, want %v", balance, 42)
- }
- if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
- t.Fatalf("first copy pre-commit code mismatch: have %x, want %x", code, []byte("hello"))
- }
- if val := copyOne.GetState(addr, skey); val != sval {
- t.Fatalf("first copy pre-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
- }
- if val := copyOne.GetCommittedState(addr, skey); val != (common.Hash{}) {
- t.Fatalf("first copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
- }
- // Copy the copy and check the balance once more
- copyTwo := copyOne.Copy()
- if balance := copyTwo.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
- t.Fatalf("second copy balance mismatch: have %v, want %v", balance, 42)
- }
- if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
- t.Fatalf("second copy code mismatch: have %x, want %x", code, []byte("hello"))
- }
- if val := copyTwo.GetState(addr, skey); val != sval {
- t.Fatalf("second copy non-committed storage slot mismatch: have %x, want %x", val, sval)
- }
- if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) {
- t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, sval)
- }
- // Commit state, ensure states can be loaded from disk
- root, _ := state.Commit(0, false, false)
- state, _ = New(root, tdb, nil)
- if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
- t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42)
- }
- if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
- t.Fatalf("state post-commit code mismatch: have %x, want %x", code, []byte("hello"))
- }
- if val := state.GetState(addr, skey); val != sval {
- t.Fatalf("state post-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
- }
- if val := state.GetCommittedState(addr, skey); val != sval {
- t.Fatalf("state post-commit committed storage slot mismatch: have %x, want %x", val, sval)
- }
-}
-
-// Tests a regression where committing a copy lost some internal meta information,
-// leading to corrupted subsequent copies.
-//
-// See https://github.com/ethereum/go-ethereum/issues/20106.
-func TestCopyCopyCommitCopy(t *testing.T) {
- state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
-
- // Create an account and check if the retrieved balance is correct
- addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
- skey := common.HexToHash("aaa")
- sval := common.HexToHash("bbb")
-
- state.SetBalance(addr, uint256.NewInt(42)) // Change the account trie
- state.SetCode(addr, []byte("hello")) // Change an external metadata
- state.SetState(addr, skey, sval) // Change the storage trie
-
- if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
- t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
- }
- if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
- t.Fatalf("initial code mismatch: have %x, want %x", code, []byte("hello"))
- }
- if val := state.GetState(addr, skey); val != sval {
- t.Fatalf("initial non-committed storage slot mismatch: have %x, want %x", val, sval)
- }
- if val := state.GetCommittedState(addr, skey); val != (common.Hash{}) {
- t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
- }
- // Copy the non-committed state database and check pre/post commit balance
- copyOne := state.Copy()
- if balance := copyOne.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
- t.Fatalf("first copy balance mismatch: have %v, want %v", balance, 42)
- }
- if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
- t.Fatalf("first copy code mismatch: have %x, want %x", code, []byte("hello"))
- }
- if val := copyOne.GetState(addr, skey); val != sval {
- t.Fatalf("first copy non-committed storage slot mismatch: have %x, want %x", val, sval)
- }
- if val := copyOne.GetCommittedState(addr, skey); val != (common.Hash{}) {
- t.Fatalf("first copy committed storage slot mismatch: have %x, want %x", val, common.Hash{})
- }
- // Copy the copy and check the balance once more
- copyTwo := copyOne.Copy()
- if balance := copyTwo.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
- t.Fatalf("second copy pre-commit balance mismatch: have %v, want %v", balance, 42)
- }
- if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
- t.Fatalf("second copy pre-commit code mismatch: have %x, want %x", code, []byte("hello"))
- }
- if val := copyTwo.GetState(addr, skey); val != sval {
- t.Fatalf("second copy pre-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
- }
- if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) {
- t.Fatalf("second copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
- }
- // Copy the copy-copy and check the balance once more
- copyThree := copyTwo.Copy()
- if balance := copyThree.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
- t.Fatalf("third copy balance mismatch: have %v, want %v", balance, 42)
- }
- if code := copyThree.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
- t.Fatalf("third copy code mismatch: have %x, want %x", code, []byte("hello"))
- }
- if val := copyThree.GetState(addr, skey); val != sval {
- t.Fatalf("third copy non-committed storage slot mismatch: have %x, want %x", val, sval)
- }
- if val := copyThree.GetCommittedState(addr, skey); val != (common.Hash{}) {
- t.Fatalf("third copy committed storage slot mismatch: have %x, want %x", val, sval)
- }
-}
-
-// TestCommitCopy tests the copy from a committed state is not functional.
-func TestCommitCopy(t *testing.T) {
- state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
-
- // Create an account and check if the retrieved balance is correct
- addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
- skey := common.HexToHash("aaa")
- sval := common.HexToHash("bbb")
-
- state.SetBalance(addr, uint256.NewInt(42)) // Change the account trie
- state.SetCode(addr, []byte("hello")) // Change an external metadata
- state.SetState(addr, skey, sval) // Change the storage trie
-
- if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
- t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
- }
- if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
- t.Fatalf("initial code mismatch: have %x, want %x", code, []byte("hello"))
- }
- if val := state.GetState(addr, skey); val != sval {
- t.Fatalf("initial non-committed storage slot mismatch: have %x, want %x", val, sval)
- }
- if val := state.GetCommittedState(addr, skey); val != (common.Hash{}) {
- t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
- }
- // Copy the committed state database, the copied one is not functional.
- state.Commit(0, true, false)
- copied := state.Copy()
- if balance := copied.GetBalance(addr); balance.Cmp(uint256.NewInt(0)) != 0 {
- t.Fatalf("unexpected balance: have %v", balance)
- }
- if code := copied.GetCode(addr); code != nil {
- t.Fatalf("unexpected code: have %x", code)
- }
- if val := copied.GetState(addr, skey); val != (common.Hash{}) {
- t.Fatalf("unexpected storage slot: have %x", val)
- }
- if val := copied.GetCommittedState(addr, skey); val != (common.Hash{}) {
- t.Fatalf("unexpected storage slot: have %x", val)
- }
- if !errors.Is(copied.Error(), trie.ErrCommitted) {
- t.Fatalf("unexpected state error, %v", copied.Error())
- }
-}
-
-// TestDeleteCreateRevert tests a weird state transition corner case that we hit
-// while changing the internals of StateDB. The workflow is that a contract is
-// self-destructed, then in a follow-up transaction (but same block) it's created
-// again and the transaction reverted.
-//
-// The original StateDB implementation flushed dirty objects to the tries after
-// each transaction, so this works ok. The rework accumulated writes in memory
-// first, but the journal wiped the entire state object on create-revert.
-func TestDeleteCreateRevert(t *testing.T) {
- // Create an initial state with a single contract
- state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
-
- addr := common.BytesToAddress([]byte("so"))
- state.SetBalance(addr, uint256.NewInt(1))
-
- root, _ := state.Commit(0, false, false)
- state, _ = NewWithSnapshot(root, state.db, state.snap)
-
- // Simulate self-destructing in one transaction, then create-reverting in another
- state.SelfDestruct(addr)
- state.Finalise(true)
-
- id := state.Snapshot()
- state.SetBalance(addr, uint256.NewInt(2))
- state.RevertToSnapshot(id)
-
- // Commit the entire state and make sure we don't crash and have the correct state
- root, _ = state.Commit(0, true, false)
- state, _ = NewWithSnapshot(root, state.db, state.snap)
-
- if state.getStateObject(addr) != nil {
- t.Fatalf("self-destructed contract came alive")
- }
-}
-
-// TestMissingTrieNodes tests that if the StateDB fails to load parts of the trie,
-// the Commit operation fails with an error
-// If we are missing trie nodes, we should not continue writing to the trie
-func TestMissingTrieNodes(t *testing.T) {
- testMissingTrieNodes(t, rawdb.HashScheme)
- testMissingTrieNodes(t, rawdb.PathScheme)
-}
-
-func testMissingTrieNodes(t *testing.T, scheme string) {
- // Create an initial state with a few accounts
- var (
- tdb *triedb.Database
- memDb = rawdb.NewMemoryDatabase()
- )
- if scheme == rawdb.PathScheme {
- tdb = triedb.NewDatabase(memDb, &triedb.Config{PathDB: &pathdb.Config{
- CleanCacheSize: 0,
- DirtyCacheSize: 0,
- }}) // disable caching
- } else {
- tdb = triedb.NewDatabase(memDb, &triedb.Config{HashDB: &hashdb.Config{
- CleanCacheSize: 0,
- }}) // disable caching
- }
- db := NewDatabaseWithNodeDB(memDb, tdb)
-
- var root common.Hash
- state, _ := New(types.EmptyRootHash, db, nil)
- addr := common.BytesToAddress([]byte("so"))
- {
- state.SetBalance(addr, uint256.NewInt(1))
- state.SetCode(addr, []byte{1, 2, 3})
- a2 := common.BytesToAddress([]byte("another"))
- state.SetBalance(a2, uint256.NewInt(100))
- state.SetCode(a2, []byte{1, 2, 4})
- root, _ = state.Commit(0, false, false)
- t.Logf("root: %x", root)
- // force-flush
- tdb.Commit(root, false)
- }
- // Create a new state on the old root
- state, _ = New(root, db, nil)
- // Now we clear out the memdb
- it := memDb.NewIterator(nil, nil)
- for it.Next() {
- k := it.Key()
- // Leave the root intact
- if !bytes.Equal(k, root[:]) {
- t.Logf("key: %x", k)
- memDb.Delete(k)
- }
- }
- balance := state.GetBalance(addr)
- // The removed elem should lead to it returning zero balance
- if exp, got := uint64(0), balance.Uint64(); got != exp {
- t.Errorf("expected %d, got %d", exp, got)
- }
- // Modify the state
- state.SetBalance(addr, uint256.NewInt(2))
- root, err := state.Commit(0, false, false)
- if err == nil {
- t.Fatalf("expected error, got root :%x", root)
- }
-}
-
-func TestStateDBAccessList(t *testing.T) {
- // Some helpers
- addr := func(a string) common.Address {
- return common.HexToAddress(a)
- }
- slot := func(a string) common.Hash {
- return common.HexToHash(a)
- }
-
- memDb := rawdb.NewMemoryDatabase()
- db := NewDatabase(memDb)
- state, _ := New(types.EmptyRootHash, db, nil)
- state.accessList = newAccessList()
-
- verifyAddrs := func(astrings ...string) {
- t.Helper()
- // convert to common.Address form
- var addresses []common.Address
- var addressMap = make(map[common.Address]struct{})
- for _, astring := range astrings {
- address := addr(astring)
- addresses = append(addresses, address)
- addressMap[address] = struct{}{}
- }
- // Check that the given addresses are in the access list
- for _, address := range addresses {
- if !state.AddressInAccessList(address) {
- t.Fatalf("expected %x to be in access list", address)
- }
- }
- // Check that only the expected addresses are present in the access list
- for address := range state.accessList.addresses {
- if _, exist := addressMap[address]; !exist {
- t.Fatalf("extra address %x in access list", address)
- }
- }
- }
- verifySlots := func(addrString string, slotStrings ...string) {
- if !state.AddressInAccessList(addr(addrString)) {
- t.Fatalf("scope missing address/slots %v", addrString)
- }
- var address = addr(addrString)
- // convert to common.Hash form
- var slots []common.Hash
- var slotMap = make(map[common.Hash]struct{})
- for _, slotString := range slotStrings {
- s := slot(slotString)
- slots = append(slots, s)
- slotMap[s] = struct{}{}
- }
- // Check that the expected items are in the access list
- for i, s := range slots {
- if _, slotPresent := state.SlotInAccessList(address, s); !slotPresent {
- t.Fatalf("input %d: scope missing slot %v (address %v)", i, s, addrString)
- }
- }
- // Check that no extra elements are in the access list
- index := state.accessList.addresses[address]
- if index >= 0 {
- stateSlots := state.accessList.slots[index]
- for s := range stateSlots {
- if _, slotPresent := slotMap[s]; !slotPresent {
- t.Fatalf("scope has extra slot %v (address %v)", s, addrString)
- }
- }
- }
- }
-
- state.AddAddressToAccessList(addr("aa")) // 1
- state.AddSlotToAccessList(addr("bb"), slot("01")) // 2,3
- state.AddSlotToAccessList(addr("bb"), slot("02")) // 4
- verifyAddrs("aa", "bb")
- verifySlots("bb", "01", "02")
-
- // Make a copy
- stateCopy1 := state.Copy()
- if exp, got := 4, state.journal.length(); exp != got {
- t.Fatalf("journal length mismatch: have %d, want %d", got, exp)
- }
-
- // same again, should cause no journal entries
- state.AddSlotToAccessList(addr("bb"), slot("01"))
- state.AddSlotToAccessList(addr("bb"), slot("02"))
- state.AddAddressToAccessList(addr("aa"))
- if exp, got := 4, state.journal.length(); exp != got {
- t.Fatalf("journal length mismatch: have %d, want %d", got, exp)
- }
- // some new ones
- state.AddSlotToAccessList(addr("bb"), slot("03")) // 5
- state.AddSlotToAccessList(addr("aa"), slot("01")) // 6
- state.AddSlotToAccessList(addr("cc"), slot("01")) // 7,8
- state.AddAddressToAccessList(addr("cc"))
- if exp, got := 8, state.journal.length(); exp != got {
- t.Fatalf("journal length mismatch: have %d, want %d", got, exp)
- }
-
- verifyAddrs("aa", "bb", "cc")
- verifySlots("aa", "01")
- verifySlots("bb", "01", "02", "03")
- verifySlots("cc", "01")
-
- // now start rolling back changes
- state.journal.revert(state, 7)
- if _, ok := state.SlotInAccessList(addr("cc"), slot("01")); ok {
- t.Fatalf("slot present, expected missing")
- }
- verifyAddrs("aa", "bb", "cc")
- verifySlots("aa", "01")
- verifySlots("bb", "01", "02", "03")
-
- state.journal.revert(state, 6)
- if state.AddressInAccessList(addr("cc")) {
- t.Fatalf("addr present, expected missing")
- }
- verifyAddrs("aa", "bb")
- verifySlots("aa", "01")
- verifySlots("bb", "01", "02", "03")
-
- state.journal.revert(state, 5)
- if _, ok := state.SlotInAccessList(addr("aa"), slot("01")); ok {
- t.Fatalf("slot present, expected missing")
- }
- verifyAddrs("aa", "bb")
- verifySlots("bb", "01", "02", "03")
-
- state.journal.revert(state, 4)
- if _, ok := state.SlotInAccessList(addr("bb"), slot("03")); ok {
- t.Fatalf("slot present, expected missing")
- }
- verifyAddrs("aa", "bb")
- verifySlots("bb", "01", "02")
-
- state.journal.revert(state, 3)
- if _, ok := state.SlotInAccessList(addr("bb"), slot("02")); ok {
- t.Fatalf("slot present, expected missing")
- }
- verifyAddrs("aa", "bb")
- verifySlots("bb", "01")
-
- state.journal.revert(state, 2)
- if _, ok := state.SlotInAccessList(addr("bb"), slot("01")); ok {
- t.Fatalf("slot present, expected missing")
- }
- verifyAddrs("aa", "bb")
-
- state.journal.revert(state, 1)
- if state.AddressInAccessList(addr("bb")) {
- t.Fatalf("addr present, expected missing")
- }
- verifyAddrs("aa")
-
- state.journal.revert(state, 0)
- if state.AddressInAccessList(addr("aa")) {
- t.Fatalf("addr present, expected missing")
- }
- if got, exp := len(state.accessList.addresses), 0; got != exp {
- t.Fatalf("expected empty, got %d", got)
- }
- if got, exp := len(state.accessList.slots), 0; got != exp {
- t.Fatalf("expected empty, got %d", got)
- }
- // Check the copy
- // Make a copy
- state = stateCopy1
- verifyAddrs("aa", "bb")
- verifySlots("bb", "01", "02")
- if got, exp := len(state.accessList.addresses), 2; got != exp {
- t.Fatalf("expected empty, got %d", got)
- }
- if got, exp := len(state.accessList.slots), 1; got != exp {
- t.Fatalf("expected empty, got %d", got)
- }
-}
-
-func TestMultiCoinOperations(t *testing.T) {
- s := newStateEnv()
- addr := common.Address{1}
- assetID := common.Hash{2}
-
- s.state.getOrNewStateObject(addr)
- root, _ := s.state.Commit(0, false, false)
- s.state, _ = NewWithSnapshot(root, s.state.db, s.state.snap)
-
- s.state.AddBalance(addr, new(uint256.Int))
-
- balance := s.state.GetBalanceMultiCoin(addr, assetID)
- if balance.Cmp(big.NewInt(0)) != 0 {
- t.Fatal("expected zero multicoin balance")
- }
-
- s.state.SetBalanceMultiCoin(addr, assetID, big.NewInt(10))
- s.state.SubBalanceMultiCoin(addr, assetID, big.NewInt(5))
- s.state.AddBalanceMultiCoin(addr, assetID, big.NewInt(3))
-
- balance = s.state.GetBalanceMultiCoin(addr, assetID)
- if balance.Cmp(big.NewInt(8)) != 0 {
- t.Fatal("expected multicoin balance to be 8")
- }
-}
-
-func TestMultiCoinSnapshot(t *testing.T) {
- db := rawdb.NewMemoryDatabase()
- sdb := NewDatabase(db)
-
- // Create empty snapshot.Tree and StateDB
- root := common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
- // Use the root as both the stateRoot and blockHash for this test.
- snapTree := snapshot.NewTestTree(db, root, root)
-
- addr := common.Address{1}
- assetID1 := common.Hash{1}
- assetID2 := common.Hash{2}
-
- var stateDB *StateDB
- assertBalances := func(regular, multicoin1, multicoin2 int64) {
- balance := stateDB.GetBalance(addr)
- if balance.Cmp(uint256.NewInt(uint64(regular))) != 0 {
- t.Fatal("incorrect non-multicoin balance")
- }
- balanceBig := stateDB.GetBalanceMultiCoin(addr, assetID1)
- if balanceBig.Cmp(big.NewInt(multicoin1)) != 0 {
- t.Fatal("incorrect multicoin1 balance")
- }
- balanceBig = stateDB.GetBalanceMultiCoin(addr, assetID2)
- if balanceBig.Cmp(big.NewInt(multicoin2)) != 0 {
- t.Fatal("incorrect multicoin2 balance")
- }
- }
-
- // Create new state
- stateDB, _ = New(root, sdb, snapTree)
- assertBalances(0, 0, 0)
-
- stateDB.AddBalance(addr, uint256.NewInt(10))
- assertBalances(10, 0, 0)
-
- // Commit and get the new root
- root, _ = stateDB.Commit(0, false, false)
- assertBalances(10, 0, 0)
-
- // Create a new state from the latest root, add a multicoin balance, and
- // commit it to the tree.
- stateDB, _ = New(root, sdb, snapTree)
- stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(10))
- root, _ = stateDB.Commit(0, false, false)
- assertBalances(10, 10, 0)
-
- // Add more layers than the cap and ensure the balances and layers are correct
- for i := 0; i < 256; i++ {
- stateDB, _ = New(root, sdb, snapTree)
- stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(1))
- stateDB.AddBalanceMultiCoin(addr, assetID2, big.NewInt(2))
- root, _ = stateDB.Commit(0, false, false)
- }
- assertBalances(10, 266, 512)
-
- // Do one more add, including the regular balance which is now in the
- // collapsed snapshot
- stateDB, _ = New(root, sdb, snapTree)
- stateDB.AddBalance(addr, uint256.NewInt(1))
- stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(1))
- root, _ = stateDB.Commit(0, false, false)
- stateDB, _ = New(root, sdb, snapTree)
- assertBalances(11, 267, 512)
-}
-
-func TestGenerateMultiCoinAccounts(t *testing.T) {
- var (
- diskdb = rawdb.NewMemoryDatabase()
- database = NewDatabase(diskdb)
-
- addr = common.BytesToAddress([]byte("addr1"))
- addrHash = crypto.Keccak256Hash(addr[:])
-
- assetID = common.BytesToHash([]byte("coin1"))
- assetBalance = big.NewInt(10)
- )
-
- stateDB, err := New(common.Hash{}, database, nil)
- if err != nil {
- t.Fatal(err)
- }
- stateDB.SetBalanceMultiCoin(addr, assetID, assetBalance)
- root, err := stateDB.Commit(0, false, false)
- if err != nil {
- t.Fatal(err)
- }
-
- triedb := database.TrieDB()
- if err := triedb.Commit(root, true); err != nil {
- t.Fatal(err)
- }
- // Build snapshot from scratch
- snapConfig := snapshot.Config{
- CacheSize: 16,
- AsyncBuild: false,
- NoBuild: false,
- SkipVerify: true,
- }
- snaps, err := snapshot.New(snapConfig, diskdb, triedb, common.Hash{}, root)
- if err != nil {
- t.Error("Unexpected error while rebuilding snapshot:", err)
- }
-
- // Get latest snapshot and make sure it has the correct account and storage
- snap := snaps.Snapshot(root)
- snapAccount, err := snap.Account(addrHash)
- if err != nil {
- t.Fatal(err)
- }
- if !snapAccount.IsMultiCoin {
- t.Fatalf("Expected SnapAccount to return IsMultiCoin: true, found: %v", snapAccount.IsMultiCoin)
- }
-
- NormalizeCoinID(&assetID)
- assetHash := crypto.Keccak256Hash(assetID.Bytes())
- storageBytes, err := snap.Storage(addrHash, assetHash)
- if err != nil {
- t.Fatal(err)
- }
-
- actualAssetBalance := new(big.Int).SetBytes(storageBytes)
- if actualAssetBalance.Cmp(assetBalance) != 0 {
- t.Fatalf("Expected asset balance: %v, found %v", assetBalance, actualAssetBalance)
- }
-}
-
-// Tests that account and storage tries are flushed in the correct order and that
-// no data loss occurs.
-func TestFlushOrderDataLoss(t *testing.T) {
- // Create a state trie with many accounts and slots
- var (
- memdb = rawdb.NewMemoryDatabase()
- triedb = triedb.NewDatabase(memdb, nil)
- statedb = NewDatabaseWithNodeDB(memdb, triedb)
- state, _ = New(types.EmptyRootHash, statedb, nil)
- )
- for a := byte(0); a < 10; a++ {
- state.CreateAccount(common.Address{a})
- for s := byte(0); s < 10; s++ {
- state.SetState(common.Address{a}, common.Hash{a, s}, common.Hash{a, s})
- }
- }
- root, err := state.Commit(0, false, false)
- if err != nil {
- t.Fatalf("failed to commit state trie: %v", err)
- }
- triedb.Reference(root, common.Hash{})
- if err := triedb.Cap(1024); err != nil {
- t.Fatalf("failed to cap trie dirty cache: %v", err)
- }
- if err := triedb.Commit(root, false); err != nil {
- t.Fatalf("failed to commit state trie: %v", err)
- }
- // Reopen the state trie from flushed disk and verify it
- state, err = New(root, NewDatabase(memdb), nil)
- if err != nil {
- t.Fatalf("failed to reopen state trie: %v", err)
- }
- for a := byte(0); a < 10; a++ {
- for s := byte(0); s < 10; s++ {
- if have := state.GetState(common.Address{a}, common.Hash{a, s}); have != (common.Hash{a, s}) {
- t.Errorf("account %d: slot %d: state mismatch: have %x, want %x", a, s, have, common.Hash{a, s})
- }
- }
- }
-}
-
-func TestStateDBTransientStorage(t *testing.T) {
- memDb := rawdb.NewMemoryDatabase()
- db := NewDatabase(memDb)
- state, _ := New(types.EmptyRootHash, db, nil)
-
- key := common.Hash{0x01}
- value := common.Hash{0x02}
- addr := common.Address{}
-
- state.SetTransientState(addr, key, value)
- if exp, got := 1, state.journal.length(); exp != got {
- t.Fatalf("journal length mismatch: have %d, want %d", got, exp)
- }
- // the retrieved value should equal what was set
- if got := state.GetTransientState(addr, key); got != value {
- t.Fatalf("transient storage mismatch: have %x, want %x", got, value)
- }
-
- // revert the transient state being set and then check that the
- // value is now the empty hash
- state.journal.revert(state, 0)
- if got, exp := state.GetTransientState(addr, key), (common.Hash{}); exp != got {
- t.Fatalf("transient storage mismatch: have %x, want %x", got, exp)
- }
-
- // set transient state and then copy the statedb and ensure that
- // the transient state is copied
- state.SetTransientState(addr, key, value)
- cpy := state.Copy()
- if got := cpy.GetTransientState(addr, key); got != value {
- t.Fatalf("transient storage mismatch: have %x, want %x", got, value)
- }
-}
-
-func TestResetObject(t *testing.T) {
- var (
- disk = rawdb.NewMemoryDatabase()
- tdb = triedb.NewDatabase(disk, nil)
- db = NewDatabaseWithNodeDB(disk, tdb)
- snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, common.Hash{}, types.EmptyRootHash)
- state, _ = New(types.EmptyRootHash, db, snaps)
- addr = common.HexToAddress("0x1")
- slotA = common.HexToHash("0x1")
- slotB = common.HexToHash("0x2")
- )
- // Initialize account with balance and storage in first transaction.
- state.SetBalance(addr, uint256.NewInt(1))
- state.SetState(addr, slotA, common.BytesToHash([]byte{0x1}))
- state.IntermediateRoot(true)
-
- // Reset account and mutate balance and storages
- state.CreateAccount(addr)
- state.SetBalance(addr, uint256.NewInt(2))
- state.SetState(addr, slotB, common.BytesToHash([]byte{0x2}))
- root, _ := state.CommitWithSnap(0, true, snaps, common.Hash{}, common.Hash{}, false)
-
- // Ensure the original account is wiped properly
- snap := snaps.Snapshot(root)
- slot, _ := snap.Storage(crypto.Keccak256Hash(addr.Bytes()), crypto.Keccak256Hash(slotA.Bytes()))
- if len(slot) != 0 {
- t.Fatalf("Unexpected storage slot")
- }
- slot, _ = snap.Storage(crypto.Keccak256Hash(addr.Bytes()), crypto.Keccak256Hash(slotB.Bytes()))
- if !bytes.Equal(slot, []byte{0x2}) {
- t.Fatalf("Unexpected storage slot value %v", slot)
- }
-}
-
-func TestDeleteStorage(t *testing.T) {
- var (
- disk = rawdb.NewMemoryDatabase()
- tdb = triedb.NewDatabase(disk, nil)
- db = NewDatabaseWithNodeDB(disk, tdb)
- snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, common.Hash{}, types.EmptyRootHash)
- state, _ = New(types.EmptyRootHash, db, snaps)
- addr = common.HexToAddress("0x1")
- )
- // Initialize account and populate storage
- state.SetBalance(addr, uint256.NewInt(1))
- state.CreateAccount(addr)
- for i := 0; i < 1000; i++ {
- slot := common.Hash(uint256.NewInt(uint64(i)).Bytes32())
- value := common.Hash(uint256.NewInt(uint64(10 * i)).Bytes32())
- state.SetState(addr, slot, value)
- }
- root, _ := state.CommitWithSnap(0, true, snaps, common.Hash{}, common.Hash{}, false)
- // Init phase done, create two states, one with snap and one without
- fastState, _ := New(root, db, snaps)
- slowState, _ := New(root, db, nil)
-
- obj := fastState.getOrNewStateObject(addr)
- storageRoot := obj.data.Root
-
- _, _, fastNodes, err := fastState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot)
- if err != nil {
- t.Fatal(err)
- }
-
- _, _, slowNodes, err := slowState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot)
- if err != nil {
- t.Fatal(err)
- }
- check := func(set *trienode.NodeSet) string {
- var a []string
- set.ForEachWithOrder(func(path string, n *trienode.Node) {
- if n.Hash != (common.Hash{}) {
- t.Fatal("delete should have empty hashes")
- }
- if len(n.Blob) != 0 {
- t.Fatal("delete should have have empty blobs")
- }
- a = append(a, fmt.Sprintf("%x", path))
- })
- return strings.Join(a, ",")
- }
- slowRes := check(slowNodes)
- fastRes := check(fastNodes)
- if slowRes != fastRes {
- t.Fatalf("difference found:\nfast: %v\nslow: %v\n", fastRes, slowRes)
- }
-}
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
deleted file mode 100644
index f7cf30c683..0000000000
--- a/core/state/sync_test.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package state
-
-import (
- "math/big"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/triedb"
- "github.com/ava-labs/coreth/triedb/hashdb"
- "github.com/ava-labs/coreth/triedb/pathdb"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/holiman/uint256"
-)
-
-// testAccount is the data associated with an account used by the state tests.
-type testAccount struct {
- address common.Address
- balance *big.Int
- nonce uint64
- code []byte
-}
-
-// makeTestState create a sample test state to test node-wise reconstruction.
-func makeTestState(scheme string) (ethdb.Database, Database, *triedb.Database, common.Hash, []*testAccount) {
- // Create an empty state
- config := &triedb.Config{Preimages: true}
- if scheme == rawdb.PathScheme {
- config.PathDB = pathdb.Defaults
- } else {
- config.HashDB = hashdb.Defaults
- }
- db := rawdb.NewMemoryDatabase()
- nodeDb := triedb.NewDatabase(db, config)
- sdb := NewDatabaseWithNodeDB(db, nodeDb)
- state, _ := New(types.EmptyRootHash, sdb, nil)
-
- // Fill it with some arbitrary data
- var accounts []*testAccount
- for i := byte(0); i < 96; i++ {
- obj := state.getOrNewStateObject(common.BytesToAddress([]byte{i}))
- acc := &testAccount{address: common.BytesToAddress([]byte{i})}
-
- obj.AddBalance(uint256.NewInt(uint64(11 * i)))
- acc.balance = big.NewInt(int64(11 * i))
-
- obj.SetNonce(uint64(42 * i))
- acc.nonce = uint64(42 * i)
-
- if i%3 == 0 {
- obj.SetCode(crypto.Keccak256Hash([]byte{i, i, i, i, i}), []byte{i, i, i, i, i})
- acc.code = []byte{i, i, i, i, i}
- }
- if i%5 == 0 {
- for j := byte(0); j < 5; j++ {
- hash := crypto.Keccak256Hash([]byte{i, i, i, i, i, j, j})
- obj.SetState(hash, hash)
- }
- }
- accounts = append(accounts, acc)
- }
- root, _ := state.Commit(0, false, false)
-
- // Return the generated state
- return db, sdb, nodeDb, root, accounts
-}
diff --git a/core/state/transient_storage.go b/core/state/transient_storage.go
deleted file mode 100644
index 285ebbc727..0000000000
--- a/core/state/transient_storage.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// (c) 2023, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package state
-
-import (
- "github.com/ava-labs/libevm/common"
-)
-
-// transientStorage is a representation of EIP-1153 "Transient Storage".
-type transientStorage map[common.Address]Storage
-
-// newTransientStorage creates a new instance of a transientStorage.
-func newTransientStorage() transientStorage {
- return make(transientStorage)
-}
-
-// Set sets the transient-storage `value` for `key` at the given `addr`.
-func (t transientStorage) Set(addr common.Address, key, value common.Hash) {
- if _, ok := t[addr]; !ok {
- t[addr] = make(Storage)
- }
- t[addr][key] = value
-}
-
-// Get gets the transient storage for `key` at the given `addr`.
-func (t transientStorage) Get(addr common.Address, key common.Hash) common.Hash {
- val, ok := t[addr]
- if !ok {
- return common.Hash{}
- }
- return val[key]
-}
-
-// Copy does a deep copy of the transientStorage
-func (t transientStorage) Copy() transientStorage {
- storage := make(transientStorage)
- for key, value := range t {
- storage[key] = value.Copy()
- }
- return storage
-}
diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go
deleted file mode 100644
index d053c78751..0000000000
--- a/core/state/trie_prefetcher.go
+++ /dev/null
@@ -1,640 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package state
-
-import (
- "sync"
- "time"
-
- "github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/coreth/utils"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/log"
-)
-
-// triePrefetchMetricsPrefix is the prefix under which to publish the metrics.
-const triePrefetchMetricsPrefix = "trie/prefetch/"
-
-// triePrefetcher is an active prefetcher, which receives accounts or storage
-// items and does trie-loading of them. The goal is to get as much useful content
-// into the caches as possible.
-//
-// Note, the prefetcher's API is not thread safe.
-type triePrefetcher struct {
- db Database // Database to fetch trie nodes through
- root common.Hash // Root hash of the account trie for metrics
- fetches map[string]Trie // Partially or fully fetched tries. Only populated for inactive copies.
- fetchers map[string]*subfetcher // Subfetchers for each trie
-
- maxConcurrency int
- workers *utils.BoundedWorkers
-
- subfetcherWorkersMeter metrics.Meter
- subfetcherWaitTimer metrics.Counter
- subfetcherCopiesMeter metrics.Meter
-
- accountLoadMeter metrics.Meter
- accountDupMeter metrics.Meter
- accountSkipMeter metrics.Meter
- accountWasteMeter metrics.Meter
-
- storageFetchersMeter metrics.Meter
- storageLoadMeter metrics.Meter
- storageLargestLoadMeter metrics.Meter
- storageDupMeter metrics.Meter
- storageSkipMeter metrics.Meter
- storageWasteMeter metrics.Meter
-}
-
-func newTriePrefetcher(db Database, root common.Hash, namespace string, maxConcurrency int) *triePrefetcher {
- prefix := triePrefetchMetricsPrefix + namespace
- return &triePrefetcher{
- db: db,
- root: root,
- fetchers: make(map[string]*subfetcher), // Active prefetchers use the fetchers map
-
- maxConcurrency: maxConcurrency,
- workers: utils.NewBoundedWorkers(maxConcurrency), // Scale up as needed to [maxConcurrency]
-
- subfetcherWorkersMeter: metrics.GetOrRegisterMeter(prefix+"/subfetcher/workers", nil),
- subfetcherWaitTimer: metrics.GetOrRegisterCounter(prefix+"/subfetcher/wait", nil),
- subfetcherCopiesMeter: metrics.GetOrRegisterMeter(prefix+"/subfetcher/copies", nil),
-
- accountLoadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load", nil),
- accountDupMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup", nil),
- accountSkipMeter: metrics.GetOrRegisterMeter(prefix+"/account/skip", nil),
- accountWasteMeter: metrics.GetOrRegisterMeter(prefix+"/account/waste", nil),
-
- storageFetchersMeter: metrics.GetOrRegisterMeter(prefix+"/storage/fetchers", nil),
- storageLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load", nil),
- storageLargestLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/lload", nil),
- storageDupMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup", nil),
- storageSkipMeter: metrics.GetOrRegisterMeter(prefix+"/storage/skip", nil),
- storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil),
- }
-}
-
-// close iterates over all the subfetchers, aborts any that were left spinning
-// and reports the stats to the metrics subsystem.
-func (p *triePrefetcher) close() {
- // If the prefetcher is an inactive one, bail out
- if p.fetches != nil {
- return
- }
-
- // Collect stats from all fetchers
- var (
- storageFetchers int64
- largestLoad int64
- )
- for _, fetcher := range p.fetchers {
- fetcher.abort() // safe to call multiple times (should be a no-op on happy path)
-
- if metrics.Enabled {
- p.subfetcherCopiesMeter.Mark(int64(fetcher.copies()))
-
- if fetcher.root == p.root {
- p.accountLoadMeter.Mark(int64(len(fetcher.seen)))
- p.accountDupMeter.Mark(int64(fetcher.dups))
- p.accountSkipMeter.Mark(int64(fetcher.skips()))
-
- for _, key := range fetcher.used {
- delete(fetcher.seen, string(key))
- }
- p.accountWasteMeter.Mark(int64(len(fetcher.seen)))
- } else {
- storageFetchers++
- oseen := int64(len(fetcher.seen))
- if oseen > largestLoad {
- largestLoad = oseen
- }
- p.storageLoadMeter.Mark(oseen)
- p.storageDupMeter.Mark(int64(fetcher.dups))
- p.storageSkipMeter.Mark(int64(fetcher.skips()))
-
- for _, key := range fetcher.used {
- delete(fetcher.seen, string(key))
- }
- p.storageWasteMeter.Mark(int64(len(fetcher.seen)))
- }
- }
- }
- if metrics.Enabled {
- p.storageFetchersMeter.Mark(storageFetchers)
- p.storageLargestLoadMeter.Mark(largestLoad)
- }
-
- // Stop all workers once fetchers are aborted (otherwise
- // could stop while waiting)
- //
- // Record number of workers that were spawned during this run
- workersUsed := int64(p.workers.Wait())
- if metrics.Enabled {
- p.subfetcherWorkersMeter.Mark(workersUsed)
- }
-
- // Clear out all fetchers (will crash on a second call, deliberate)
- p.fetchers = nil
-}
-
-// copy creates a deep-but-inactive copy of the trie prefetcher. Any trie data
-// already loaded will be copied over, but no goroutines will be started. This
-// is mostly used in the miner which creates a copy of it's actively mutated
-// state to be sealed while it may further mutate the state.
-func (p *triePrefetcher) copy() *triePrefetcher {
- copy := &triePrefetcher{
- db: p.db,
- root: p.root,
- fetches: make(map[string]Trie), // Active prefetchers use the fetchers map
-
- subfetcherWorkersMeter: p.subfetcherWorkersMeter,
- subfetcherWaitTimer: p.subfetcherWaitTimer,
- subfetcherCopiesMeter: p.subfetcherCopiesMeter,
-
- accountLoadMeter: p.accountLoadMeter,
- accountDupMeter: p.accountDupMeter,
- accountSkipMeter: p.accountSkipMeter,
- accountWasteMeter: p.accountWasteMeter,
-
- storageFetchersMeter: p.storageFetchersMeter,
- storageLoadMeter: p.storageLoadMeter,
- storageLargestLoadMeter: p.storageLargestLoadMeter,
- storageDupMeter: p.storageDupMeter,
- storageSkipMeter: p.storageSkipMeter,
- storageWasteMeter: p.storageWasteMeter,
- }
- // If the prefetcher is already a copy, duplicate the data
- if p.fetches != nil {
- for root, fetch := range p.fetches {
- if fetch == nil {
- continue
- }
- copy.fetches[root] = p.db.CopyTrie(fetch)
- }
- return copy
- }
- // Otherwise we're copying an active fetcher, retrieve the current states
- for id, fetcher := range p.fetchers {
- copy.fetches[id] = fetcher.peek()
- }
- return copy
-}
-
-// prefetch schedules a batch of trie items to prefetch.
-func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte) {
- // If the prefetcher is an inactive one, bail out
- if p.fetches != nil {
- return
- }
-
- // Active fetcher, schedule the retrievals
- id := p.trieID(owner, root)
- fetcher := p.fetchers[id]
- if fetcher == nil {
- fetcher = newSubfetcher(p, owner, root, addr)
- p.fetchers[id] = fetcher
- }
- fetcher.schedule(keys)
-}
-
-// trie returns the trie matching the root hash, or nil if the prefetcher doesn't
-// have it.
-func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie {
- // If the prefetcher is inactive, return from existing deep copies
- id := p.trieID(owner, root)
- if p.fetches != nil {
- trie := p.fetches[id]
- if trie == nil {
- return nil
- }
- return p.db.CopyTrie(trie)
- }
-
- // Otherwise the prefetcher is active, bail if no trie was prefetched for this root
- fetcher := p.fetchers[id]
- if fetcher == nil {
- return nil
- }
-
- // Wait for the fetcher to finish and shutdown orchestrator, if it exists
- start := time.Now()
- fetcher.wait()
- if metrics.Enabled {
- p.subfetcherWaitTimer.Inc(time.Since(start).Milliseconds())
- }
-
- // Return a copy of one of the prefetched tries
- trie := fetcher.peek()
- if trie == nil {
- return nil
- }
- return trie
-}
-
-// used marks a batch of state items used to allow creating statistics as to
-// how useful or wasteful the prefetcher is.
-func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte) {
- if fetcher := p.fetchers[p.trieID(owner, root)]; fetcher != nil {
- fetcher.used = used
- }
-}
-
-// trieID returns an unique trie identifier consists the trie owner and root hash.
-func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string {
- trieID := make([]byte, common.HashLength*2)
- copy(trieID, owner.Bytes())
- copy(trieID[common.HashLength:], root.Bytes())
- return string(trieID)
-}
-
-// subfetcher is a trie fetcher goroutine responsible for pulling entries for a
-// single trie. It is spawned when a new root is encountered and lives until the
-// main prefetcher is paused and either all requested items are processed or if
-// the trie being worked on is retrieved from the prefetcher.
-type subfetcher struct {
- p *triePrefetcher
-
- db Database // Database to load trie nodes through
- state common.Hash // Root hash of the state to prefetch
- owner common.Hash // Owner of the trie, usually account hash
- root common.Hash // Root hash of the trie to prefetch
- addr common.Address // Address of the account that the trie belongs to
-
- to *trieOrchestrator // Orchestrate concurrent fetching of a single trie
-
- seen map[string]struct{} // Tracks the entries already loaded
- dups int // Number of duplicate preload tasks
- used [][]byte // Tracks the entries used in the end
-}
-
-// newSubfetcher creates a goroutine to prefetch state items belonging to a
-// particular root hash.
-func newSubfetcher(p *triePrefetcher, owner common.Hash, root common.Hash, addr common.Address) *subfetcher {
- sf := &subfetcher{
- p: p,
- db: p.db,
- state: p.root,
- owner: owner,
- root: root,
- addr: addr,
- seen: make(map[string]struct{}),
- }
- sf.to = newTrieOrchestrator(sf)
- if sf.to != nil {
- go sf.to.processTasks()
- }
- // We return [sf] here to ensure we don't try to re-create if
- // we aren't able to setup a [newTrieOrchestrator] the first time.
- return sf
-}
-
-// schedule adds a batch of trie keys to the queue to prefetch.
-// This should never block, so an array is used instead of a channel.
-//
-// This is not thread-safe.
-func (sf *subfetcher) schedule(keys [][]byte) {
- // Append the tasks to the current queue
- tasks := make([][]byte, 0, len(keys))
- for _, key := range keys {
- // Check if keys already seen
- sk := string(key)
- if _, ok := sf.seen[sk]; ok {
- sf.dups++
- continue
- }
- sf.seen[sk] = struct{}{}
- tasks = append(tasks, key)
- }
-
- // After counting keys, exit if they can't be prefetched
- if sf.to == nil {
- return
- }
-
- // Add tasks to queue for prefetching
- sf.to.enqueueTasks(tasks)
-}
-
-// peek tries to retrieve a deep copy of the fetcher's trie in whatever form it
-// is currently.
-func (sf *subfetcher) peek() Trie {
- if sf.to == nil {
- return nil
- }
- return sf.to.copyBase()
-}
-
-// wait must only be called if [triePrefetcher] has not been closed. If this happens,
-// workers will not finish.
-func (sf *subfetcher) wait() {
- if sf.to == nil {
- // Unable to open trie
- return
- }
- sf.to.wait()
-}
-
-func (sf *subfetcher) abort() {
- if sf.to == nil {
- // Unable to open trie
- return
- }
- sf.to.abort()
-}
-
-func (sf *subfetcher) skips() int {
- if sf.to == nil {
- // Unable to open trie
- return 0
- }
- return sf.to.skipCount()
-}
-
-func (sf *subfetcher) copies() int {
- if sf.to == nil {
- // Unable to open trie
- return 0
- }
- return sf.to.copies
-}
-
-// trieOrchestrator is not thread-safe.
-type trieOrchestrator struct {
- sf *subfetcher
-
- // base is an unmodified Trie we keep for
- // creating copies for each worker goroutine.
- //
- // We care more about quick copies than good copies
- // because most (if not all) of the nodes that will be populated
- // in the copy will come from the underlying triedb cache. Ones
- // that don't come from this cache probably had to be fetched
- // from disk anyways.
- base Trie
- baseLock sync.Mutex
-
- tasksAllowed bool
- skips int // number of tasks skipped
- pendingTasks [][]byte
- taskLock sync.Mutex
-
- processingTasks sync.WaitGroup
-
- wake chan struct{}
- stop chan struct{}
- stopOnce sync.Once
- loopTerm chan struct{}
-
- copies int
- copyChan chan Trie
- copySpawner chan struct{}
-}
-
-func newTrieOrchestrator(sf *subfetcher) *trieOrchestrator {
- // Start by opening the trie and stop processing if it fails
- var (
- base Trie
- err error
- )
- if sf.owner == (common.Hash{}) {
- base, err = sf.db.OpenTrie(sf.root)
- if err != nil {
- log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
- return nil
- }
- } else {
- // The trie argument can be nil as verkle doesn't support prefetching
- // yet. TODO FIX IT(rjl493456442), otherwise code will panic here.
- base, err = sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root, nil)
- if err != nil {
- log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
- return nil
- }
- }
-
- // Instantiate trieOrchestrator
- to := &trieOrchestrator{
- sf: sf,
- base: base,
-
- tasksAllowed: true,
- wake: make(chan struct{}, 1),
- stop: make(chan struct{}),
- loopTerm: make(chan struct{}),
-
- copyChan: make(chan Trie, sf.p.maxConcurrency),
- copySpawner: make(chan struct{}, sf.p.maxConcurrency),
- }
-
- // Create initial trie copy
- to.copies++
- to.copySpawner <- struct{}{}
- to.copyChan <- to.copyBase()
- return to
-}
-
-func (to *trieOrchestrator) copyBase() Trie {
- to.baseLock.Lock()
- defer to.baseLock.Unlock()
-
- return to.sf.db.CopyTrie(to.base)
-}
-
-func (to *trieOrchestrator) skipCount() int {
- to.taskLock.Lock()
- defer to.taskLock.Unlock()
-
- return to.skips
-}
-
-func (to *trieOrchestrator) enqueueTasks(tasks [][]byte) {
- to.taskLock.Lock()
- defer to.taskLock.Unlock()
-
- if len(tasks) == 0 {
- return
- }
-
- // Add tasks to [pendingTasks]
- if !to.tasksAllowed {
- to.skips += len(tasks)
- return
- }
- to.processingTasks.Add(len(tasks))
- to.pendingTasks = append(to.pendingTasks, tasks...)
-
- // Wake up processor
- select {
- case to.wake <- struct{}{}:
- default:
- }
-}
-
-func (to *trieOrchestrator) handleStop(remaining int) {
- to.taskLock.Lock()
- to.skips += remaining
- to.taskLock.Unlock()
- to.processingTasks.Add(-remaining)
-}
-
-func (to *trieOrchestrator) processTasks() {
- defer close(to.loopTerm)
-
- for {
- // Determine if we should process or exit
- select {
- case <-to.wake:
- case <-to.stop:
- return
- }
-
- // Get current tasks
- to.taskLock.Lock()
- tasks := to.pendingTasks
- to.pendingTasks = nil
- to.taskLock.Unlock()
-
- // Enqueue more work as soon as trie copies are available
- lt := len(tasks)
- for i := 0; i < lt; i++ {
- // Try to stop as soon as possible, if channel is closed
- remaining := lt - i
- select {
- case <-to.stop:
- to.handleStop(remaining)
- return
- default:
- }
-
- // Try to create to get an active copy first (select is non-deterministic,
- // so we may end up creating a new copy when we don't need to)
- var t Trie
- select {
- case t = <-to.copyChan:
- default:
- // Wait for an available copy or create one, if we weren't
- // able to get a previously created copy
- select {
- case <-to.stop:
- to.handleStop(remaining)
- return
- case t = <-to.copyChan:
- case to.copySpawner <- struct{}{}:
- to.copies++
- t = to.copyBase()
- }
- }
-
- // Enqueue work, unless stopped.
- fTask := tasks[i]
- f := func() {
- // Perform task
- var err error
- if len(fTask) == common.AddressLength {
- _, err = t.GetAccount(common.BytesToAddress(fTask))
- } else {
- _, err = t.GetStorage(to.sf.addr, fTask)
- }
- if err != nil {
- log.Error("Trie prefetcher failed fetching", "root", to.sf.root, "err", err)
- }
- to.processingTasks.Done()
-
- // Return copy when we are done with it, so someone else can use it
- //
- // channel is buffered and will not block
- to.copyChan <- t
- }
-
- // Enqueue task for processing (may spawn new goroutine
- // if not at [maxConcurrency])
- //
- // If workers are stopped before calling [Execute], this function may
- // panic.
- to.sf.p.workers.Execute(f)
- }
- }
-}
-
-func (to *trieOrchestrator) stopAcceptingTasks() {
- to.taskLock.Lock()
- defer to.taskLock.Unlock()
-
- if !to.tasksAllowed {
- return
- }
- to.tasksAllowed = false
-
- // We don't clear [to.pendingTasks] here because
- // it will be faster to prefetch them even though we
- // are still waiting.
-}
-
-// wait stops accepting new tasks and waits for ongoing tasks to complete. If
-// wait is called, it is not necessary to call [abort].
-//
-// It is safe to call wait multiple times.
-func (to *trieOrchestrator) wait() {
- // Prevent more tasks from being enqueued
- to.stopAcceptingTasks()
-
- // Wait for processing tasks to complete
- to.processingTasks.Wait()
-
- // Stop orchestrator loop
- to.stopOnce.Do(func() {
- close(to.stop)
- })
- <-to.loopTerm
-}
-
-// abort stops any ongoing tasks and shuts down the orchestrator loop. If abort
-// is called, it is not necessary to call [wait].
-//
-// It is safe to call abort multiple times.
-func (to *trieOrchestrator) abort() {
- // Prevent more tasks from being enqueued
- to.stopAcceptingTasks()
-
- // Stop orchestrator loop
- to.stopOnce.Do(func() {
- close(to.stop)
- })
- <-to.loopTerm
-
- // Capture any dangling pending tasks (processTasks
- // may exit before enqueing all pendingTasks)
- to.taskLock.Lock()
- pendingCount := len(to.pendingTasks)
- to.skips += pendingCount
- to.pendingTasks = nil
- to.taskLock.Unlock()
- to.processingTasks.Add(-pendingCount)
-
- // Wait for processing tasks to complete
- to.processingTasks.Wait()
-}
diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go
deleted file mode 100644
index c0da75c337..0000000000
--- a/core/state/trie_prefetcher_test.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2021 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package state
-
-import (
- "math/big"
- "testing"
- "time"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/libevm/common"
- "github.com/holiman/uint256"
-)
-
-const maxConcurrency = 4
-
-func filledStateDB() *StateDB {
- state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
-
- // Create an account and check if the retrieved balance is correct
- addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
- skey := common.HexToHash("aaa")
- sval := common.HexToHash("bbb")
-
- state.SetBalance(addr, uint256.NewInt(42)) // Change the account trie
- state.SetCode(addr, []byte("hello")) // Change an external metadata
- state.SetState(addr, skey, sval) // Change the storage trie
- for i := 0; i < 100; i++ {
- sk := common.BigToHash(big.NewInt(int64(i)))
- state.SetState(addr, sk, sk) // Change the storage trie
- }
- return state
-}
-
-func TestCopyAndClose(t *testing.T) {
- db := filledStateDB()
- prefetcher := newTriePrefetcher(db.db, db.originalRoot, "", maxConcurrency)
- skey := common.HexToHash("aaa")
- prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
- prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
- time.Sleep(1 * time.Second)
- a := prefetcher.trie(common.Hash{}, db.originalRoot)
- prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
- b := prefetcher.trie(common.Hash{}, db.originalRoot)
- cpy := prefetcher.copy()
- cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
- cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
- c := cpy.trie(common.Hash{}, db.originalRoot)
- prefetcher.close()
- cpy2 := cpy.copy()
- cpy2.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
- d := cpy2.trie(common.Hash{}, db.originalRoot)
- cpy.close()
- cpy2.close()
- if a.Hash() != b.Hash() || a.Hash() != c.Hash() || a.Hash() != d.Hash() {
- t.Fatalf("Invalid trie, hashes should be equal: %v %v %v %v", a.Hash(), b.Hash(), c.Hash(), d.Hash())
- }
-}
-
-func TestUseAfterClose(t *testing.T) {
- db := filledStateDB()
- prefetcher := newTriePrefetcher(db.db, db.originalRoot, "", maxConcurrency)
- skey := common.HexToHash("aaa")
- prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
- a := prefetcher.trie(common.Hash{}, db.originalRoot)
- prefetcher.close()
- b := prefetcher.trie(common.Hash{}, db.originalRoot)
- if a == nil {
- t.Fatal("Prefetching before close should not return nil")
- }
- if b != nil {
- t.Fatal("Trie after close should return nil")
- }
-}
-
-func TestCopyClose(t *testing.T) {
- db := filledStateDB()
- prefetcher := newTriePrefetcher(db.db, db.originalRoot, "", maxConcurrency)
- skey := common.HexToHash("aaa")
- prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
- cpy := prefetcher.copy()
- a := prefetcher.trie(common.Hash{}, db.originalRoot)
- b := cpy.trie(common.Hash{}, db.originalRoot)
- prefetcher.close()
- c := prefetcher.trie(common.Hash{}, db.originalRoot)
- d := cpy.trie(common.Hash{}, db.originalRoot)
- if a == nil {
- t.Fatal("Prefetching before close should not return nil")
- }
- if b == nil {
- t.Fatal("Copy trie should return nil")
- }
- if c != nil {
- t.Fatal("Trie after close should return nil")
- }
- if d == nil {
- t.Fatal("Copy trie should not return nil")
- }
-}
diff --git a/core/state_processor_test.go b/core/state_processor_test.go
index 520e4f1f7c..cc05576f25 100644
--- a/core/state_processor_test.go
+++ b/core/state_processor_test.go
@@ -38,11 +38,11 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/core/vm"
"github.com/ava-labs/libevm/crypto"
+ "github.com/ava-labs/libevm/trie"
"github.com/holiman/uint256"
"golang.org/x/crypto/sha3"
)
@@ -113,7 +113,7 @@ func TestStateProcessorErrors(t *testing.T) {
Config: config,
Timestamp: uint64(upgrade.InitiallyActiveTime.Unix()),
Alloc: types.GenesisAlloc{
- common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.GenesisAccount{
+ common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.Account{
Balance: big.NewInt(4000000000000000000), // 4 ether
Nonce: 0,
},
@@ -278,7 +278,7 @@ func TestStateProcessorErrors(t *testing.T) {
},
),
Alloc: types.GenesisAlloc{
- common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.GenesisAccount{
+ common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.Account{
Balance: big.NewInt(1000000000000000000), // 1 ether
Nonce: 0,
},
@@ -317,7 +317,7 @@ func TestStateProcessorErrors(t *testing.T) {
gspec = &Genesis{
Config: config,
Alloc: types.GenesisAlloc{
- common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.GenesisAccount{
+ common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.Account{
Balance: big.NewInt(1000000000000000000), // 1 ether
Nonce: 0,
Code: common.FromHex("0xB0B0FACE"),
diff --git a/core/state_transition_test.go b/core/state_transition_test.go
index a25834555b..0db4bcf578 100644
--- a/core/state_transition_test.go
+++ b/core/state_transition_test.go
@@ -100,7 +100,7 @@ func executeStateTransitionTest(t *testing.T, st stateTransitionTest) {
gspec = &Genesis{
Config: st.config,
Alloc: types.GenesisAlloc{
- common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.GenesisAccount{
+ common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.Account{
Balance: big.NewInt(2000000000000000000), // 2 ether
Nonce: 0,
},
diff --git a/core/test_blockchain.go b/core/test_blockchain.go
index 95065f276a..dacccd2e94 100644
--- a/core/test_blockchain.go
+++ b/core/test_blockchain.go
@@ -25,11 +25,11 @@ import (
var TestCallbacks = dummy.ConsensusCallbacks{
OnExtraStateChange: func(block *types.Block, sdb *state.StateDB) (*big.Int, *big.Int, error) {
- sdb.SetBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(block.Number().Int64()))
+ sdb.AddBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(block.Number().Int64()))
return nil, nil, nil
},
OnFinalizeAndAssemble: func(header *types.Header, sdb *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) {
- sdb.SetBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(header.Number.Int64()))
+ sdb.AddBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(header.Number.Int64()))
return nil, nil, nil, nil
},
}
diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go
index 4fc2b8fb57..a33dcbd56c 100644
--- a/core/txpool/blobpool/blobpool_test.go
+++ b/core/txpool/blobpool/blobpool_test.go
@@ -71,7 +71,7 @@ var testChainConfig *params.ChainConfig
func init() {
testChainConfig = new(params.ChainConfig)
- *testChainConfig = *params.TestChainConfig
+ *testChainConfig = params.Copy(params.TestChainConfig)
testChainConfig.CancunTime = new(uint64)
*testChainConfig.CancunTime = uint64(time.Now().Unix())
@@ -584,7 +584,7 @@ func TestOpenDrops(t *testing.T) {
statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), uint256.NewInt(10000000))
statedb.AddBalance(crypto.PubkeyToAddress(duplicater.PublicKey), uint256.NewInt(1000000))
statedb.AddBalance(crypto.PubkeyToAddress(repeater.PublicKey), uint256.NewInt(1000000))
- statedb.Commit(0, true, false)
+ statedb.Commit(0, true)
chain := &testBlockChain{
config: testChainConfig,
@@ -703,7 +703,7 @@ func TestOpenIndex(t *testing.T) {
// Create a blob pool out of the pre-seeded data
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000))
- statedb.Commit(0, true, false)
+ statedb.Commit(0, true)
chain := &testBlockChain{
config: testChainConfig,
@@ -806,7 +806,7 @@ func TestOpenHeap(t *testing.T) {
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000))
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000))
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000))
- statedb.Commit(0, true, false)
+ statedb.Commit(0, true)
chain := &testBlockChain{
config: testChainConfig,
@@ -887,7 +887,7 @@ func TestOpenCap(t *testing.T) {
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000))
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000))
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000))
- statedb.Commit(0, true, false)
+ statedb.Commit(0, true)
chain := &testBlockChain{
config: testChainConfig,
@@ -1305,7 +1305,7 @@ func TestAdd(t *testing.T) {
store.Put(blob)
}
}
- statedb.Commit(0, true, false)
+ statedb.Commit(0, true)
store.Close()
// Create a blob pool out of the pre-seeded dats
@@ -1378,7 +1378,7 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) {
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000))
pool.add(tx)
}
- statedb.Commit(0, true, false)
+ statedb.Commit(0, true)
defer pool.Close()
// Benchmark assembling the pending
diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go
index 4aa44d046c..a193510b2d 100644
--- a/core/txpool/legacypool/legacypool_test.go
+++ b/core/txpool/legacypool/legacypool_test.go
@@ -45,10 +45,10 @@ import (
"github.com/ava-labs/coreth/core/txpool"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/event"
+ "github.com/ava-labs/libevm/trie"
"github.com/holiman/uint256"
)
diff --git a/core/types/account.go b/core/types/account.go
index cf587e16eb..efc0927770 100644
--- a/core/types/account.go
+++ b/core/types/account.go
@@ -29,7 +29,6 @@ import (
)
//go:generate go run github.com/fjl/gencodec -type Account -field-override accountMarshaling -out gen_account.go
-//go:generate go run github.com/fjl/gencodec -type GenesisAccount -field-override genesisAccountMarshaling -out gen_genesis_account.go
// Account represents an Ethereum account and its attached data.
// This type is used to specify accounts in the genesis block state, and
@@ -73,10 +72,10 @@ func (h storageJSON) MarshalText() ([]byte, error) {
}
// GenesisAlloc specifies the initial state of a genesis block.
-type GenesisAlloc map[common.Address]GenesisAccount
+type GenesisAlloc map[common.Address]Account
func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
- m := make(map[common.UnprefixedAddress]GenesisAccount)
+ m := make(map[common.UnprefixedAddress]Account)
if err := json.Unmarshal(data, &m); err != nil {
return err
}
@@ -86,23 +85,3 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
}
return nil
}
-
-type GenesisMultiCoinBalance map[common.Hash]*big.Int
-
-// GenesisAccount is an account in the state of the genesis block.
-type GenesisAccount struct {
- Code []byte `json:"code,omitempty"`
- Storage map[common.Hash]common.Hash `json:"storage,omitempty"`
- Balance *big.Int `json:"balance" gencodec:"required"`
- MCBalance GenesisMultiCoinBalance `json:"mcbalance,omitempty"`
- Nonce uint64 `json:"nonce,omitempty"`
- PrivateKey []byte `json:"secretKey,omitempty"` // for tests
-}
-
-type genesisAccountMarshaling struct {
- Code hexutil.Bytes
- Balance *math.HexOrDecimal256
- Nonce math.HexOrDecimal64
- Storage map[storageJSON]storageJSON
- PrivateKey hexutil.Bytes
-}
diff --git a/core/types/gen_account_rlp.go b/core/types/gen_account_rlp.go
deleted file mode 100644
index b9a41e8e58..0000000000
--- a/core/types/gen_account_rlp.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Code generated by rlpgen. DO NOT EDIT.
-
-package types
-
-import (
- "io"
-
- "github.com/ava-labs/libevm/rlp"
-)
-
-func (obj *StateAccount) EncodeRLP(_w io.Writer) error {
- w := rlp.NewEncoderBuffer(_w)
- _tmp0 := w.List()
- w.WriteUint64(obj.Nonce)
- if obj.Balance == nil {
- w.Write(rlp.EmptyString)
- } else {
- w.WriteUint256(obj.Balance)
- }
- w.WriteBytes(obj.Root[:])
- w.WriteBytes(obj.CodeHash)
- w.WriteBool(obj.IsMultiCoin)
- w.ListEnd(_tmp0)
- return w.Flush()
-}
diff --git a/core/types/gen_genesis_account.go b/core/types/gen_genesis_account.go
deleted file mode 100644
index c94927f566..0000000000
--- a/core/types/gen_genesis_account.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
-
-package types
-
-import (
- "encoding/json"
- "errors"
- "math/big"
-
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/common/hexutil"
- "github.com/ava-labs/libevm/common/math"
-)
-
-var _ = (*genesisAccountMarshaling)(nil)
-
-// MarshalJSON marshals as JSON.
-func (g GenesisAccount) MarshalJSON() ([]byte, error) {
- type GenesisAccount struct {
- Code hexutil.Bytes `json:"code,omitempty"`
- Storage map[storageJSON]storageJSON `json:"storage,omitempty"`
- Balance *math.HexOrDecimal256 `json:"balance" gencodec:"required"`
- MCBalance GenesisMultiCoinBalance `json:"mcbalance,omitempty"`
- Nonce math.HexOrDecimal64 `json:"nonce,omitempty"`
- PrivateKey hexutil.Bytes `json:"secretKey,omitempty"`
- }
- var enc GenesisAccount
- enc.Code = g.Code
- if g.Storage != nil {
- enc.Storage = make(map[storageJSON]storageJSON, len(g.Storage))
- for k, v := range g.Storage {
- enc.Storage[storageJSON(k)] = storageJSON(v)
- }
- }
- enc.Balance = (*math.HexOrDecimal256)(g.Balance)
- enc.MCBalance = g.MCBalance
- enc.Nonce = math.HexOrDecimal64(g.Nonce)
- enc.PrivateKey = g.PrivateKey
- return json.Marshal(&enc)
-}
-
-// UnmarshalJSON unmarshals from JSON.
-func (g *GenesisAccount) UnmarshalJSON(input []byte) error {
- type GenesisAccount struct {
- Code *hexutil.Bytes `json:"code,omitempty"`
- Storage map[storageJSON]storageJSON `json:"storage,omitempty"`
- Balance *math.HexOrDecimal256 `json:"balance" gencodec:"required"`
- MCBalance *GenesisMultiCoinBalance `json:"mcbalance,omitempty"`
- Nonce *math.HexOrDecimal64 `json:"nonce,omitempty"`
- PrivateKey *hexutil.Bytes `json:"secretKey,omitempty"`
- }
- var dec GenesisAccount
- if err := json.Unmarshal(input, &dec); err != nil {
- return err
- }
- if dec.Code != nil {
- g.Code = *dec.Code
- }
- if dec.Storage != nil {
- g.Storage = make(map[common.Hash]common.Hash, len(dec.Storage))
- for k, v := range dec.Storage {
- g.Storage[common.Hash(k)] = common.Hash(v)
- }
- }
- if dec.Balance == nil {
- return errors.New("missing required field 'balance' for GenesisAccount")
- }
- g.Balance = (*big.Int)(dec.Balance)
- if dec.MCBalance != nil {
- g.MCBalance = *dec.MCBalance
- }
- if dec.Nonce != nil {
- g.Nonce = uint64(*dec.Nonce)
- }
- if dec.PrivateKey != nil {
- g.PrivateKey = *dec.PrivateKey
- }
- return nil
-}
diff --git a/core/types/hashing_test.go b/core/types/hashing_test.go
index 5fea29dd91..94dd5631d2 100644
--- a/core/types/hashing_test.go
+++ b/core/types/hashing_test.go
@@ -36,12 +36,12 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/common/hexutil"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
)
func TestDeriveSha(t *testing.T) {
diff --git a/core/types/state_account.go b/core/types/state_account.go
index d2727e3b92..e864b90d0e 100644
--- a/core/types/state_account.go
+++ b/core/types/state_account.go
@@ -27,109 +27,27 @@
package types
import (
- "bytes"
-
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/rlp"
- "github.com/holiman/uint256"
+ ethtypes "github.com/ava-labs/libevm/core/types"
)
-//go:generate go run github.com/ava-labs/libevm/rlp/rlpgen -type StateAccount -out gen_account_rlp.go
-
-// StateAccount is the Ethereum consensus representation of accounts.
-// These objects are stored in the main account trie.
-type StateAccount struct {
- Nonce uint64
- Balance *uint256.Int
- Root common.Hash // merkle root of the storage trie
- CodeHash []byte
- IsMultiCoin bool
-}
-
-// NewEmptyStateAccount constructs an empty state account.
-func NewEmptyStateAccount() *StateAccount {
- return &StateAccount{
- Balance: new(uint256.Int),
- Root: EmptyRootHash,
- CodeHash: EmptyCodeHash.Bytes(),
- }
-}
-
-// Copy returns a deep-copied state account object.
-func (acct *StateAccount) Copy() *StateAccount {
- var balance *uint256.Int
- if acct.Balance != nil {
- balance = new(uint256.Int).Set(acct.Balance)
- }
- return &StateAccount{
- Nonce: acct.Nonce,
- Balance: balance,
- Root: acct.Root,
- CodeHash: common.CopyBytes(acct.CodeHash),
- IsMultiCoin: acct.IsMultiCoin,
- }
-}
-
-// SlimAccount is a modified version of an Account, where the root is replaced
-// with a byte slice. This format can be used to represent full-consensus format
-// or slim format which replaces the empty root and code hash as nil byte slice.
-type SlimAccount struct {
- Nonce uint64
- Balance *uint256.Int
- Root []byte // Nil if root equals to types.EmptyRootHash
- CodeHash []byte // Nil if hash equals to types.EmptyCodeHash
- IsMultiCoin bool
-}
+type (
+ // Import these types from the go-ethereum package
+ StateAccount = ethtypes.StateAccount
+ SlimAccount = ethtypes.SlimAccount
+)
-// SlimAccountRLP encodes the state account in 'slim RLP' format.
-func SlimAccountRLP(account StateAccount) []byte {
- slim := SlimAccount{
- Nonce: account.Nonce,
- Balance: account.Balance,
- IsMultiCoin: account.IsMultiCoin,
- }
- if account.Root != EmptyRootHash {
- slim.Root = account.Root[:]
- }
- if !bytes.Equal(account.CodeHash, EmptyCodeHash[:]) {
- slim.CodeHash = account.CodeHash
- }
- data, err := rlp.EncodeToBytes(slim)
- if err != nil {
- panic(err)
- }
- return data
-}
+var (
+ // Import these functions from the go-ethereum package
+ NewEmptyStateAccount = ethtypes.NewEmptyStateAccount
+ SlimAccountRLP = ethtypes.SlimAccountRLP
+ FullAccount = ethtypes.FullAccount
+ FullAccountRLP = ethtypes.FullAccountRLP
+)
-// FullAccount decodes the data on the 'slim RLP' format and returns
-// the consensus format account.
-func FullAccount(data []byte) (*StateAccount, error) {
- var slim SlimAccount
- if err := rlp.DecodeBytes(data, &slim); err != nil {
- return nil, err
- }
- var account StateAccount
- account.Nonce, account.Balance, account.IsMultiCoin = slim.Nonce, slim.Balance, slim.IsMultiCoin
+type isMultiCoin bool
- // Interpret the storage root and code hash in slim format.
- if len(slim.Root) == 0 {
- account.Root = EmptyRootHash
- } else {
- account.Root = common.BytesToHash(slim.Root)
- }
- if len(slim.CodeHash) == 0 {
- account.CodeHash = EmptyCodeHash[:]
- } else {
- account.CodeHash = slim.CodeHash
- }
- return &account, nil
-}
+var IsMultiCoinPayloads = ethtypes.RegisterExtras[isMultiCoin]()
-// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format.
-func FullAccountRLP(data []byte) ([]byte, error) {
- account, err := FullAccount(data)
- if err != nil {
- return nil, err
- }
- return rlp.EncodeToBytes(account)
+func IsMultiCoin(a *StateAccount) bool {
+ return bool(IsMultiCoinPayloads.FromStateAccount(a))
}
diff --git a/eth/api_debug.go b/eth/api_debug.go
index e1c67dc436..e5073e4f63 100644
--- a/eth/api_debug.go
+++ b/eth/api_debug.go
@@ -37,12 +37,12 @@ import (
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/internal/ethapi"
"github.com/ava-labs/coreth/rpc"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/common/hexutil"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
)
// DebugAPI is the collection of Ethereum full node APIs for debugging the
diff --git a/eth/api_debug_test.go b/eth/api_debug_test.go
index 1c87611904..926e4de48d 100644
--- a/eth/api_debug_test.go
+++ b/eth/api_debug_test.go
@@ -36,7 +36,7 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/triedb"
+ "github.com/ava-labs/libevm/triedb"
"github.com/holiman/uint256"
"github.com/davecgh/go-spew/spew"
@@ -93,7 +93,7 @@ func TestAccountRange(t *testing.T) {
m[addr] = true
}
}
- root, _ := sdb.Commit(0, true, false)
+ root, _ := sdb.Commit(0, true)
sdb, _ = state.New(root, statedb, nil)
trie, err := statedb.OpenTrie(root)
@@ -151,7 +151,7 @@ func TestEmptyAccountRange(t *testing.T) {
st, _ = state.New(types.EmptyRootHash, statedb, nil)
)
// Commit(although nothing to flush) and re-init the statedb
- st.Commit(0, true, false)
+ st.Commit(0, true)
st, _ = state.New(types.EmptyRootHash, statedb, nil)
results := st.RawDump(&state.DumpConfig{
@@ -197,7 +197,7 @@ func TestStorageRangeAt(t *testing.T) {
for _, entry := range storage {
sdb.SetState(addr, *entry.Key, entry.Value)
}
- root, _ := sdb.Commit(0, false, false)
+ root, _ := sdb.Commit(0, false)
sdb, _ = state.New(root, db, nil)
// Check a few combinations of limit and start/end.
diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go
index d5ae4074fe..583f97ad8e 100644
--- a/eth/filters/filter_test.go
+++ b/eth/filters/filter_test.go
@@ -41,10 +41,10 @@ import (
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/rpc"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/core/vm"
"github.com/ava-labs/libevm/crypto"
+ "github.com/ava-labs/libevm/triedb"
"github.com/stretchr/testify/require"
)
diff --git a/eth/state_accessor.go b/eth/state_accessor.go
index 8395f08cd2..37363f4e50 100644
--- a/eth/state_accessor.go
+++ b/eth/state_accessor.go
@@ -37,11 +37,11 @@ import (
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/eth/tracers"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/core/vm"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
)
// noopReleaser is returned in case there is no operation expected
@@ -163,7 +163,7 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err)
}
// Finalize the state so any modifications are written to the trie
- root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()), true)
+ root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()))
if err != nil {
return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w",
current.NumberU64(), current.Root().Hex(), err)
diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go
index 945a46c276..b8703f9772 100644
--- a/eth/tracers/tracers_test.go
+++ b/eth/tracers/tracers_test.go
@@ -79,12 +79,12 @@ func BenchmarkTransactionTrace(b *testing.B) {
byte(vm.PUSH1), 0, // jumpdestination
byte(vm.JUMP),
}
- alloc[common.HexToAddress("0x00000000000000000000000000000000deadbeef")] = types.GenesisAccount{
+ alloc[common.HexToAddress("0x00000000000000000000000000000000deadbeef")] = types.Account{
Nonce: 1,
Code: loop,
Balance: big.NewInt(1),
}
- alloc[from] = types.GenesisAccount{
+ alloc[from] = types.Account{
Nonce: 1,
Code: []byte{},
Balance: big.NewInt(500000000000000),
diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go
index 47384f7188..fda588e45b 100644
--- a/ethclient/ethclient.go
+++ b/ethclient/ethclient.go
@@ -34,7 +34,6 @@ import (
"fmt"
"math/big"
- "github.com/ava-labs/avalanchego/ids"
"github.com/ava-labs/coreth/accounts/abi/bind"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/interfaces"
@@ -90,7 +89,6 @@ type Client interface {
SubscribeNewHead(context.Context, chan<- *types.Header) (interfaces.Subscription, error)
NetworkID(context.Context) (*big.Int, error)
BalanceAt(context.Context, common.Address, *big.Int) (*big.Int, error)
- AssetBalanceAt(context.Context, common.Address, ids.ID, *big.Int) (*big.Int, error)
BalanceAtHash(ctx context.Context, account common.Address, blockHash common.Hash) (*big.Int, error)
StorageAt(context.Context, common.Address, common.Hash, *big.Int) ([]byte, error)
StorageAtHash(ctx context.Context, account common.Address, key common.Hash, blockHash common.Hash) ([]byte, error)
@@ -467,14 +465,6 @@ func (ec *client) BalanceAt(ctx context.Context, account common.Address, blockNu
return (*big.Int)(&result), err
}
-// AssetBalanceAt returns the [assetID] balance of the given account
-// The block number can be nil, in which case the balance is taken from the latest known block.
-func (ec *client) AssetBalanceAt(ctx context.Context, account common.Address, assetID ids.ID, blockNumber *big.Int) (*big.Int, error) {
- var result hexutil.Big
- err := ec.c.CallContext(ctx, &result, "eth_getAssetBalance", account, ToBlockNumArg(blockNumber), assetID)
- return (*big.Int)(&result), err
-}
-
// BalanceAtHash returns the wei balance of the given account.
func (ec *client) BalanceAtHash(ctx context.Context, account common.Address, blockHash common.Hash) (*big.Int, error) {
var result hexutil.Big
diff --git a/go.mod b/go.mod
index 5141cdbf7a..6e9b7f8d1b 100644
--- a/go.mod
+++ b/go.mod
@@ -5,11 +5,10 @@ go 1.22.8
require (
github.com/VictoriaMetrics/fastcache v1.12.1
github.com/ava-labs/avalanchego v1.11.13-0.20241106174551-4fb3f3c6b195
- github.com/ava-labs/libevm v1.13.14-0.1.0-rc.1
+ github.com/ava-labs/libevm v1.13.14-0.1.0-rc.1.0.20241126163706-cd51330a5e2e
github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233
github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set/v2 v2.1.0
- github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46
github.com/gorilla/rpc v1.2.0
github.com/gorilla/websocket v1.5.0
github.com/hashicorp/go-bexpr v0.1.10
@@ -64,6 +63,7 @@ require (
github.com/ethereum/c-kzg-4844 v0.4.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect
+ github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
diff --git a/go.sum b/go.sum
index 5aa8d8e816..cf1fd9d76e 100644
--- a/go.sum
+++ b/go.sum
@@ -58,8 +58,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/ava-labs/avalanchego v1.11.13-0.20241106174551-4fb3f3c6b195 h1:dyf52xlqlA/9SaiCv29oqbitRAYu7L890zK774xDNrE=
github.com/ava-labs/avalanchego v1.11.13-0.20241106174551-4fb3f3c6b195/go.mod h1:eZ/UmH4rDhhgL/FLqtJZYJ7ka73m88RmLrOoAyZFgD4=
-github.com/ava-labs/libevm v1.13.14-0.1.0-rc.1 h1:ughW0E2DUNRnvwJYNU8zUSCUzIWdcOwyXSBpy7oauZE=
-github.com/ava-labs/libevm v1.13.14-0.1.0-rc.1/go.mod h1:yBctIV/wnxXTF38h95943jvpuk4aj07TrjbpoGor6LQ=
+github.com/ava-labs/libevm v1.13.14-0.1.0-rc.1.0.20241126163706-cd51330a5e2e h1:WwDl/jyHr4oJ1VYUi+PEu6l05Vcl4rZv1xXJ0vVP1gI=
+github.com/ava-labs/libevm v1.13.14-0.1.0-rc.1.0.20241126163706-cd51330a5e2e/go.mod h1:yBctIV/wnxXTF38h95943jvpuk4aj07TrjbpoGor6LQ=
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 5fccd248a6..a39fd3fbef 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -35,7 +35,6 @@ import (
"strings"
"time"
- "github.com/ava-labs/avalanchego/ids"
"github.com/ava-labs/coreth/consensus"
"github.com/ava-labs/coreth/core"
"github.com/ava-labs/coreth/core/state"
@@ -43,7 +42,6 @@ import (
"github.com/ava-labs/coreth/eth/gasestimator"
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/rpc"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/accounts"
"github.com/ava-labs/libevm/accounts/keystore"
"github.com/ava-labs/libevm/accounts/scwallet"
@@ -55,6 +53,7 @@ import (
"github.com/ava-labs/libevm/eth/tracers/logger"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
"github.com/davecgh/go-spew/spew"
"github.com/holiman/uint256"
"github.com/tyler-smith/go-bip39"
@@ -646,17 +645,6 @@ func (s *BlockChainAPI) GetBalance(ctx context.Context, address common.Address,
return (*hexutil.Big)(b), state.Error()
}
-// GetAssetBalance returns the amount of [assetID] for the given address in the state of the
-// given block number. The rpc.LatestBlockNumber, rpc.PendingBlockNumber, and
-// rpc.AcceptedBlockNumber meta block numbers are also allowed.
-func (s *BlockChainAPI) GetAssetBalance(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash, assetID ids.ID) (*hexutil.Big, error) {
- state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
- if state == nil || err != nil {
- return nil, err
- }
- return (*hexutil.Big)(state.GetBalanceMultiCoin(address, common.Hash(assetID))), state.Error()
-}
-
// AccountResult structs for GetProof
type AccountResult struct {
Address common.Address `json:"address"`
diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go
index c5ce2cafc1..c3a6c7c880 100644
--- a/internal/ethapi/api_test.go
+++ b/internal/ethapi/api_test.go
@@ -451,7 +451,7 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, engine consensus.E
}
)
accman, acc := newTestAccountManager(t)
- gspec.Alloc[acc.Address] = types.GenesisAccount{Balance: big.NewInt(params.Ether)}
+ gspec.Alloc[acc.Address] = types.Account{Balance: big.NewInt(params.Ether)}
// Generate blocks for testing
db, blocks, _, _ := core.GenerateChainWithGenesis(gspec, engine, n, 10, generator)
chain, err := core.NewBlockChain(db, cacheConfig, gspec, engine, vm.Config{}, gspec.ToBlock().Hash(), false)
diff --git a/nativeasset/contract_test.go b/nativeasset/contract_test.go
index c6446ab207..06e0636687 100644
--- a/nativeasset/contract_test.go
+++ b/nativeasset/contract_test.go
@@ -231,7 +231,7 @@ func TestStatefulPrecompile(t *testing.T) {
t.Fatal(err)
}
statedb.SetBalance(userAddr1, u256Hundred)
- statedb.SetBalanceMultiCoin(userAddr1, assetID, bigHundred)
+ statedb.AddBalanceMultiCoin(userAddr1, assetID, bigHundred)
statedb.Finalise(true)
return statedb
},
@@ -264,7 +264,7 @@ func TestStatefulPrecompile(t *testing.T) {
t.Fatal(err)
}
statedb.SetBalance(userAddr1, u256Hundred)
- statedb.SetBalanceMultiCoin(userAddr1, assetID, bigHundred)
+ statedb.AddBalanceMultiCoin(userAddr1, assetID, bigHundred)
statedb.Finalise(true)
return statedb
},
@@ -299,7 +299,7 @@ func TestStatefulPrecompile(t *testing.T) {
t.Fatal(err)
}
statedb.SetBalance(userAddr1, u256Hundred)
- statedb.SetBalanceMultiCoin(userAddr1, assetID, big.NewInt(50))
+ statedb.AddBalanceMultiCoin(userAddr1, assetID, big.NewInt(50))
statedb.Finalise(true)
return statedb
},
@@ -331,7 +331,7 @@ func TestStatefulPrecompile(t *testing.T) {
t.Fatal(err)
}
statedb.SetBalance(userAddr1, uint256.NewInt(50))
- statedb.SetBalanceMultiCoin(userAddr1, assetID, big.NewInt(50))
+ statedb.AddBalanceMultiCoin(userAddr1, assetID, big.NewInt(50))
statedb.Finalise(true)
return statedb
},
@@ -363,7 +363,7 @@ func TestStatefulPrecompile(t *testing.T) {
t.Fatal(err)
}
statedb.SetBalance(userAddr1, u256Hundred)
- statedb.SetBalanceMultiCoin(userAddr1, assetID, bigHundred)
+ statedb.AddBalanceMultiCoin(userAddr1, assetID, bigHundred)
statedb.Finalise(true)
return statedb
},
@@ -384,7 +384,7 @@ func TestStatefulPrecompile(t *testing.T) {
t.Fatal(err)
}
statedb.SetBalance(userAddr1, u256Hundred)
- statedb.SetBalanceMultiCoin(userAddr1, assetID, bigHundred)
+ statedb.AddBalanceMultiCoin(userAddr1, assetID, bigHundred)
statedb.Finalise(true)
return statedb
},
@@ -416,7 +416,7 @@ func TestStatefulPrecompile(t *testing.T) {
t.Fatal(err)
}
statedb.SetBalance(userAddr1, u256Hundred)
- statedb.SetBalanceMultiCoin(userAddr1, assetID, bigHundred)
+ statedb.AddBalanceMultiCoin(userAddr1, assetID, bigHundred)
statedb.Finalise(true)
return statedb
},
@@ -437,7 +437,7 @@ func TestStatefulPrecompile(t *testing.T) {
t.Fatal(err)
}
statedb.SetBalance(userAddr1, u256Hundred)
- statedb.SetBalanceMultiCoin(userAddr1, assetID, bigHundred)
+ statedb.AddBalanceMultiCoin(userAddr1, assetID, bigHundred)
statedb.Finalise(true)
return statedb
},
diff --git a/plugin/evm/atomic_syncer.go b/plugin/evm/atomic_syncer.go
index 365c7f11fd..2da188d47a 100644
--- a/plugin/evm/atomic_syncer.go
+++ b/plugin/evm/atomic_syncer.go
@@ -16,7 +16,7 @@ import (
"github.com/ava-labs/coreth/plugin/evm/message"
syncclient "github.com/ava-labs/coreth/sync/client"
- "github.com/ava-labs/coreth/trie"
+ "github.com/ava-labs/libevm/trie"
)
var (
diff --git a/plugin/evm/atomic_syncer_test.go b/plugin/evm/atomic_syncer_test.go
index 7054649f3c..ed6ba46466 100644
--- a/plugin/evm/atomic_syncer_test.go
+++ b/plugin/evm/atomic_syncer_test.go
@@ -22,9 +22,9 @@ import (
"github.com/ava-labs/coreth/sync/handlers"
handlerstats "github.com/ava-labs/coreth/sync/handlers/stats"
"github.com/ava-labs/coreth/sync/syncutils"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
)
const commitInterval = 1024
diff --git a/plugin/evm/atomic_trie.go b/plugin/evm/atomic_trie.go
index d4c27a88bd..ec86498904 100644
--- a/plugin/evm/atomic_trie.go
+++ b/plugin/evm/atomic_trie.go
@@ -17,13 +17,13 @@ import (
"github.com/ava-labs/coreth/core"
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/hashdb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/triedb"
)
const (
@@ -154,9 +154,9 @@ func newAtomicTrie(
trieDB := triedb.NewDatabase(
rawdb.NewDatabase(Database{atomicTrieDB}),
&triedb.Config{
- HashDB: &hashdb.Config{
+ DBOverride: hashdb.Config{
CleanCacheSize: 64 * units.MiB, // Allocate 64MB of memory for clean cache
- },
+ }.BackendConstructor,
},
)
diff --git a/plugin/evm/atomic_trie_iterator.go b/plugin/evm/atomic_trie_iterator.go
index edc3a9d47b..6fd636d744 100644
--- a/plugin/evm/atomic_trie_iterator.go
+++ b/plugin/evm/atomic_trie_iterator.go
@@ -12,8 +12,8 @@ import (
"github.com/ava-labs/avalanchego/ids"
"github.com/ava-labs/avalanchego/utils/wrappers"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/common"
+ "github.com/ava-labs/libevm/trie"
)
const atomicTrieKeyLen = wrappers.LongLen + common.HashLength
diff --git a/plugin/evm/block_verification.go b/plugin/evm/block_verification.go
index ef7fda1ad7..9b6eeaae65 100644
--- a/plugin/evm/block_verification.go
+++ b/plugin/evm/block_verification.go
@@ -15,7 +15,7 @@ import (
"github.com/ava-labs/coreth/constants"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/trie"
+ "github.com/ava-labs/libevm/trie"
)
var (
diff --git a/plugin/evm/gossiper_eth_gossiping_test.go b/plugin/evm/gossiper_eth_gossiping_test.go
index 6fe0f5e955..e323308a58 100644
--- a/plugin/evm/gossiper_eth_gossiping_test.go
+++ b/plugin/evm/gossiper_eth_gossiping_test.go
@@ -34,9 +34,9 @@ func fundAddressByGenesis(addrs []common.Address) (string, error) {
Difficulty: common.Big0,
GasLimit: uint64(5000000),
}
- funds := make(map[common.Address]types.GenesisAccount)
+ funds := make(map[common.Address]types.Account)
for _, addr := range addrs {
- funds[addr] = types.GenesisAccount{
+ funds[addr] = types.Account{
Balance: balance,
}
}
diff --git a/plugin/evm/network_handler.go b/plugin/evm/network_handler.go
index d11d12c4a4..15dca8ce38 100644
--- a/plugin/evm/network_handler.go
+++ b/plugin/evm/network_handler.go
@@ -12,10 +12,10 @@ import (
"github.com/ava-labs/coreth/plugin/evm/message"
syncHandlers "github.com/ava-labs/coreth/sync/handlers"
syncStats "github.com/ava-labs/coreth/sync/handlers/stats"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/warp"
warpHandlers "github.com/ava-labs/coreth/warp/handlers"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/triedb"
)
var _ message.RequestHandler = &networkHandler{}
diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go
index c08627b612..3abd73275b 100644
--- a/plugin/evm/syncervm_test.go
+++ b/plugin/evm/syncervm_test.go
@@ -37,13 +37,13 @@ import (
"github.com/ava-labs/coreth/predicate"
statesyncclient "github.com/ava-labs/coreth/sync/client"
"github.com/ava-labs/coreth/sync/statesync"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
)
func TestSkipStateSync(t *testing.T) {
diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go
index 6887f71993..1390ea0ab1 100644
--- a/plugin/evm/vm.go
+++ b/plugin/evm/vm.go
@@ -40,9 +40,9 @@ import (
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/peer"
"github.com/ava-labs/coreth/plugin/evm/message"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/hashdb"
"github.com/ava-labs/coreth/utils"
+ "github.com/ava-labs/libevm/triedb"
warpcontract "github.com/ava-labs/coreth/precompile/contracts/warp"
"github.com/ava-labs/coreth/rpc"
@@ -1289,9 +1289,9 @@ func (vm *VM) setAppRequestHandlers() {
evmTrieDB := triedb.NewDatabase(
vm.chaindb,
&triedb.Config{
- HashDB: &hashdb.Config{
+ DBOverride: hashdb.Config{
CleanCacheSize: vm.config.StateSyncServerTrieCache * units.MiB,
- },
+ }.BackendConstructor,
},
)
networkHandler := newNetworkHandler(
diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go
index 80dc75da50..7d322eb209 100644
--- a/plugin/evm/vm_test.go
+++ b/plugin/evm/vm_test.go
@@ -24,8 +24,8 @@ import (
"github.com/ava-labs/coreth/constants"
"github.com/ava-labs/coreth/eth/filters"
"github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/coreth/utils"
+ "github.com/ava-labs/libevm/trie"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -97,7 +97,7 @@ var (
if params.GetExtra(cfg).IsDurango(0) {
addr := common.HexToAddress("0x99b9DEA54C48Dfea6aA9A4Ca4623633EE04ddbB5")
balance := new(big.Int).Mul(big.NewInt(params.Ether), big.NewInt(10))
- g.Alloc[addr] = types.GenesisAccount{Balance: balance}
+ g.Alloc[addr] = types.Account{Balance: balance}
}
b, err := json.Marshal(g)
@@ -169,7 +169,7 @@ func newPrefundedGenesis(
) *core.Genesis {
alloc := types.GenesisAlloc{}
for _, address := range addresses {
- alloc[address] = types.GenesisAccount{
+ alloc[address] = types.Account{
Balance: big.NewInt(int64(balance)),
}
}
diff --git a/scripts/eth-allowed-packages.txt b/scripts/eth-allowed-packages.txt
index 8d3beea0fd..6f69e52bcd 100644
--- a/scripts/eth-allowed-packages.txt
+++ b/scripts/eth-allowed-packages.txt
@@ -25,6 +25,14 @@
"github.com/ava-labs/libevm/ethdb/pebble"
"github.com/ava-labs/libevm/event"
"github.com/ava-labs/libevm/libevm"
+"github.com/ava-labs/libevm/libevm/stateconf"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/params"
-"github.com/ava-labs/libevm/rlp"
\ No newline at end of file
+"github.com/ava-labs/libevm/rlp"
+"github.com/ava-labs/libevm/trie"
+"github.com/ava-labs/libevm/trie/testutil"
+"github.com/ava-labs/libevm/trie/trienode"
+"github.com/ava-labs/libevm/trie/triestate"
+"github.com/ava-labs/libevm/trie/utils"
+"github.com/ava-labs/libevm/triedb"
+"github.com/ava-labs/libevm/triedb/database"
\ No newline at end of file
diff --git a/sync/client/client.go b/sync/client/client.go
index 72b656d217..6a1348679e 100644
--- a/sync/client/client.go
+++ b/sync/client/client.go
@@ -27,8 +27,8 @@ import (
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/peer"
"github.com/ava-labs/coreth/plugin/evm/message"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/trie"
)
const (
diff --git a/sync/client/client_test.go b/sync/client/client_test.go
index 03566ed5f5..70c7f6f118 100644
--- a/sync/client/client_test.go
+++ b/sync/client/client_test.go
@@ -25,9 +25,9 @@ import (
"github.com/ava-labs/coreth/sync/handlers"
handlerstats "github.com/ava-labs/coreth/sync/handlers/stats"
"github.com/ava-labs/coreth/sync/syncutils"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
+ "github.com/ava-labs/libevm/triedb"
)
func TestGetCode(t *testing.T) {
diff --git a/sync/handlers/block_request_test.go b/sync/handlers/block_request_test.go
index a3557cdcf9..bd9bf8ce4f 100644
--- a/sync/handlers/block_request_test.go
+++ b/sync/handlers/block_request_test.go
@@ -17,10 +17,10 @@ import (
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/plugin/evm/message"
"github.com/ava-labs/coreth/sync/handlers/stats"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/triedb"
"github.com/stretchr/testify/assert"
)
diff --git a/sync/handlers/leafs_request.go b/sync/handlers/leafs_request.go
index 76b726136b..c9df10ef10 100644
--- a/sync/handlers/leafs_request.go
+++ b/sync/handlers/leafs_request.go
@@ -18,13 +18,13 @@ import (
"github.com/ava-labs/coreth/plugin/evm/message"
"github.com/ava-labs/coreth/sync/handlers/stats"
"github.com/ava-labs/coreth/sync/syncutils"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/ethdb/memorydb"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
)
const (
diff --git a/sync/handlers/leafs_request_test.go b/sync/handlers/leafs_request_test.go
index 2cfda9abc1..a98f398cb4 100644
--- a/sync/handlers/leafs_request_test.go
+++ b/sync/handlers/leafs_request_test.go
@@ -16,11 +16,11 @@ import (
"github.com/ava-labs/coreth/plugin/evm/message"
"github.com/ava-labs/coreth/sync/handlers/stats"
"github.com/ava-labs/coreth/sync/syncutils"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
"github.com/stretchr/testify/assert"
)
diff --git a/sync/statesync/state_syncer.go b/sync/statesync/state_syncer.go
index 3ec5013270..7031979208 100644
--- a/sync/statesync/state_syncer.go
+++ b/sync/statesync/state_syncer.go
@@ -10,9 +10,9 @@ import (
"github.com/ava-labs/coreth/core/state/snapshot"
syncclient "github.com/ava-labs/coreth/sync/client"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/triedb"
"golang.org/x/sync/errgroup"
)
@@ -35,12 +35,12 @@ type StateSyncerConfig struct {
// stateSync keeps the state of the entire state sync operation.
type stateSync struct {
- db ethdb.Database // database we are syncing
- root common.Hash // root of the EVM state we are syncing to
- trieDB *triedb.Database // trieDB on top of db we are syncing. used to restore any existing tries.
- snapshot snapshot.Snapshot // used to access the database we are syncing as a snapshot.
- batchSize int // write batches when they reach this size
- client syncclient.Client // used to contact peers over the network
+ db ethdb.Database // database we are syncing
+ root common.Hash // root of the EVM state we are syncing to
+ trieDB *triedb.Database // trieDB on top of db we are syncing. used to restore any existing tries.
+ snapshot snapshot.SnapshotIterable // used to access the database we are syncing as a snapshot.
+ batchSize int // write batches when they reach this size
+ client syncclient.Client // used to contact peers over the network
segments chan syncclient.LeafSyncTask // channel of tasks to sync
syncer *syncclient.CallbackLeafSyncer // performs the sync, looping over each task's range and invoking specified callbacks
diff --git a/sync/statesync/sync_helpers.go b/sync/statesync/sync_helpers.go
index f73e963495..22874b0017 100644
--- a/sync/statesync/sync_helpers.go
+++ b/sync/statesync/sync_helpers.go
@@ -6,9 +6,9 @@ package statesync
import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/trie"
)
// writeAccountSnapshot stores the account represented by [acc] to the snapshot at [accHash], using
diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go
index 456a1b06c1..14a096a7fd 100644
--- a/sync/statesync/sync_test.go
+++ b/sync/statesync/sync_test.go
@@ -21,12 +21,12 @@ import (
"github.com/ava-labs/coreth/sync/handlers"
handlerstats "github.com/ava-labs/coreth/sync/handlers/stats"
"github.com/ava-labs/coreth/sync/syncutils"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
"github.com/stretchr/testify/assert"
)
diff --git a/sync/statesync/test_sync.go b/sync/statesync/test_sync.go
index 515d3cade2..e3083e3484 100644
--- a/sync/statesync/test_sync.go
+++ b/sync/statesync/test_sync.go
@@ -11,12 +11,12 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/sync/syncutils"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/triedb"
"github.com/stretchr/testify/assert"
)
diff --git a/sync/statesync/trie_segments.go b/sync/statesync/trie_segments.go
index 7a685b71ca..d0665ace9e 100644
--- a/sync/statesync/trie_segments.go
+++ b/sync/statesync/trie_segments.go
@@ -14,11 +14,11 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/plugin/evm/message"
syncclient "github.com/ava-labs/coreth/sync/client"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie"
)
var (
diff --git a/sync/statesync/trie_sync_tasks.go b/sync/statesync/trie_sync_tasks.go
index 5cb78ee982..a0c4d4845f 100644
--- a/sync/statesync/trie_sync_tasks.go
+++ b/sync/statesync/trie_sync_tasks.go
@@ -9,11 +9,11 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/sync/syncutils"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
)
var (
diff --git a/sync/syncutils/test_trie.go b/sync/syncutils/test_trie.go
index 1b13867c06..c244d7bb1c 100644
--- a/sync/syncutils/test_trie.go
+++ b/sync/syncutils/test_trie.go
@@ -11,10 +11,10 @@ import (
"github.com/ava-labs/avalanchego/utils/wrappers"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/utils"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/triedb"
"github.com/holiman/uint256"
"github.com/ava-labs/libevm/common"
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 84fb7ac9ca..714ad125a8 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -31,11 +31,11 @@ import (
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/state/snapshot"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/hashdb"
"github.com/ava-labs/coreth/triedb/pathdb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/triedb"
"github.com/holiman/uint256"
)
@@ -50,9 +50,9 @@ type StateTestState struct {
func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, snapshotter bool, scheme string) StateTestState {
tconf := &triedb.Config{Preimages: true}
if scheme == rawdb.HashScheme {
- tconf.HashDB = hashdb.Defaults
+ tconf.DBOverride = hashdb.Defaults.BackendConstructor
} else {
- tconf.PathDB = pathdb.Defaults
+ tconf.DBOverride = pathdb.Defaults.BackendConstructor
}
triedb := triedb.NewDatabase(db, tconf)
sdb := state.NewDatabaseWithNodeDB(db, triedb)
@@ -66,7 +66,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, snapshotter bo
}
}
// Commit and re-open to start with a clean state.
- root, _ := statedb.Commit(0, false, false)
+ root, _ := statedb.Commit(0, false)
// If snapshot is requested, initialize the snapshotter and use it in state.
var snaps *snapshot.Tree
diff --git a/trie/committer.go b/trie/committer.go
deleted file mode 100644
index 5abf11b506..0000000000
--- a/trie/committer.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "fmt"
-
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
-)
-
-// committer is the tool used for the trie Commit operation. The committer will
-// capture all dirty nodes during the commit process and keep them cached in
-// insertion order.
-type committer struct {
- nodes *trienode.NodeSet
- tracer *tracer
- collectLeaf bool
-}
-
-// newCommitter creates a new committer or picks one from the pool.
-func newCommitter(nodeset *trienode.NodeSet, tracer *tracer, collectLeaf bool) *committer {
- return &committer{
- nodes: nodeset,
- tracer: tracer,
- collectLeaf: collectLeaf,
- }
-}
-
-// Commit collapses a node down into a hash node.
-func (c *committer) Commit(n node) hashNode {
- return c.commit(nil, n).(hashNode)
-}
-
-// commit collapses a node down into a hash node and returns it.
-func (c *committer) commit(path []byte, n node) node {
- // if this path is clean, use available cached data
- hash, dirty := n.cache()
- if hash != nil && !dirty {
- return hash
- }
- // Commit children, then parent, and remove the dirty flag.
- switch cn := n.(type) {
- case *shortNode:
- // Commit child
- collapsed := cn.copy()
-
- // If the child is fullNode, recursively commit,
- // otherwise it can only be hashNode or valueNode.
- if _, ok := cn.Val.(*fullNode); ok {
- collapsed.Val = c.commit(append(path, cn.Key...), cn.Val)
- }
- // The key needs to be copied, since we're adding it to the
- // modified nodeset.
- collapsed.Key = hexToCompact(cn.Key)
- hashedNode := c.store(path, collapsed)
- if hn, ok := hashedNode.(hashNode); ok {
- return hn
- }
- return collapsed
- case *fullNode:
- hashedKids := c.commitChildren(path, cn)
- collapsed := cn.copy()
- collapsed.Children = hashedKids
-
- hashedNode := c.store(path, collapsed)
- if hn, ok := hashedNode.(hashNode); ok {
- return hn
- }
- return collapsed
- case hashNode:
- return cn
- default:
- // nil, valuenode shouldn't be committed
- panic(fmt.Sprintf("%T: invalid node: %v", n, n))
- }
-}
-
-// commitChildren commits the children of the given fullnode
-func (c *committer) commitChildren(path []byte, n *fullNode) [17]node {
- var children [17]node
- for i := 0; i < 16; i++ {
- child := n.Children[i]
- if child == nil {
- continue
- }
- // If it's the hashed child, save the hash value directly.
- // Note: it's impossible that the child in range [0, 15]
- // is a valueNode.
- if hn, ok := child.(hashNode); ok {
- children[i] = hn
- continue
- }
- // Commit the child recursively and store the "hashed" value.
- // Note the returned node can be some embedded nodes, so it's
- // possible the type is not hashNode.
- children[i] = c.commit(append(path, byte(i)), child)
- }
- // For the 17th child, it's possible the type is valuenode.
- if n.Children[16] != nil {
- children[16] = n.Children[16]
- }
- return children
-}
-
-// store hashes the node n and adds it to the modified nodeset. If leaf collection
-// is enabled, leaf nodes will be tracked in the modified nodeset as well.
-func (c *committer) store(path []byte, n node) node {
- // Larger nodes are replaced by their hash and stored in the database.
- var hash, _ = n.cache()
-
- // This was not generated - must be a small node stored in the parent.
- // In theory, we should check if the node is leaf here (embedded node
- // usually is leaf node). But small value (less than 32bytes) is not
- // our target (leaves in account trie only).
- if hash == nil {
- // The node is embedded in its parent, in other words, this node
- // will not be stored in the database independently, mark it as
- // deleted only if the node was existent in database before.
- _, ok := c.tracer.accessList[string(path)]
- if ok {
- c.nodes.AddNode(path, trienode.NewDeleted())
- }
- return n
- }
- // Collect the dirty node to nodeset for return.
- nhash := common.BytesToHash(hash)
- c.nodes.AddNode(path, trienode.New(nhash, nodeToBytes(n)))
-
- // Collect the corresponding leaf node if it's required. We don't check
- // full node since it's impossible to store value in fullNode. The key
- // length of leaves should be exactly same.
- if c.collectLeaf {
- if sn, ok := n.(*shortNode); ok {
- if val, ok := sn.Val.(valueNode); ok {
- c.nodes.AddLeaf(nhash, val)
- }
- }
- }
- return hash
-}
-
-// MerkleResolver the children resolver in merkle-patricia-tree.
-type MerkleResolver struct{}
-
-// ForEach implements childResolver, decodes the provided node and
-// traverses the children inside.
-func (resolver MerkleResolver) ForEach(node []byte, onChild func(common.Hash)) {
- forGatherChildren(mustDecodeNodeUnsafe(nil, node), onChild)
-}
-
-// forGatherChildren traverses the node hierarchy and invokes the callback
-// for all the hashnode children.
-func forGatherChildren(n node, onChild func(hash common.Hash)) {
- switch n := n.(type) {
- case *shortNode:
- forGatherChildren(n.Val, onChild)
- case *fullNode:
- for i := 0; i < 16; i++ {
- forGatherChildren(n.Children[i], onChild)
- }
- case hashNode:
- onChild(common.BytesToHash(n))
- case valueNode, nil:
- default:
- panic(fmt.Sprintf("unknown node type: %T", n))
- }
-}
diff --git a/trie/database_test.go b/trie/database_test.go
deleted file mode 100644
index 231c7f9677..0000000000
--- a/trie/database_test.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/triedb/database"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
-)
-
-// testReader implements database.Reader interface, providing function to
-// access trie nodes.
-type testReader struct {
- db ethdb.Database
- scheme string
- nodes []*trienode.MergedNodeSet // sorted from new to old
-}
-
-// Node implements database.Reader interface, retrieving trie node with
-// all available cached layers.
-func (r *testReader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
- // Check the node presence with the cached layer, from latest to oldest.
- for _, nodes := range r.nodes {
- if _, ok := nodes.Sets[owner]; !ok {
- continue
- }
- n, ok := nodes.Sets[owner].Nodes[string(path)]
- if !ok {
- continue
- }
- if n.IsDeleted() || n.Hash != hash {
- return nil, &MissingNodeError{Owner: owner, Path: path, NodeHash: hash}
- }
- return n.Blob, nil
- }
- // Check the node presence in database.
- return rawdb.ReadTrieNode(r.db, owner, path, hash, r.scheme), nil
-}
-
-// testDb implements database.Database interface, using for testing purpose.
-type testDb struct {
- disk ethdb.Database
- root common.Hash
- scheme string
- nodes map[common.Hash]*trienode.MergedNodeSet
- parents map[common.Hash]common.Hash
-}
-
-func newTestDatabase(diskdb ethdb.Database, scheme string) *testDb {
- return &testDb{
- disk: diskdb,
- root: types.EmptyRootHash,
- scheme: scheme,
- nodes: make(map[common.Hash]*trienode.MergedNodeSet),
- parents: make(map[common.Hash]common.Hash),
- }
-}
-
-func (db *testDb) Reader(stateRoot common.Hash) (database.Reader, error) {
- nodes, _ := db.dirties(stateRoot, true)
- return &testReader{db: db.disk, scheme: db.scheme, nodes: nodes}, nil
-}
-
-func (db *testDb) Preimage(hash common.Hash) []byte {
- return rawdb.ReadPreimage(db.disk, hash)
-}
-
-func (db *testDb) InsertPreimage(preimages map[common.Hash][]byte) {
- rawdb.WritePreimages(db.disk, preimages)
-}
-
-func (db *testDb) Scheme() string { return db.scheme }
-
-func (db *testDb) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
- if root == parent {
- return nil
- }
- if _, ok := db.nodes[root]; ok {
- return nil
- }
- db.parents[root] = parent
- db.nodes[root] = nodes
- return nil
-}
-
-func (db *testDb) dirties(root common.Hash, topToBottom bool) ([]*trienode.MergedNodeSet, []common.Hash) {
- var (
- pending []*trienode.MergedNodeSet
- roots []common.Hash
- )
- for {
- if root == db.root {
- break
- }
- nodes, ok := db.nodes[root]
- if !ok {
- break
- }
- if topToBottom {
- pending = append(pending, nodes)
- roots = append(roots, root)
- } else {
- pending = append([]*trienode.MergedNodeSet{nodes}, pending...)
- roots = append([]common.Hash{root}, roots...)
- }
- root = db.parents[root]
- }
- return pending, roots
-}
-
-func (db *testDb) Commit(root common.Hash) error {
- if root == db.root {
- return nil
- }
- pending, roots := db.dirties(root, false)
- for i, nodes := range pending {
- for owner, set := range nodes.Sets {
- if owner == (common.Hash{}) {
- continue
- }
- set.ForEachWithOrder(func(path string, n *trienode.Node) {
- rawdb.WriteTrieNode(db.disk, owner, []byte(path), n.Hash, n.Blob, db.scheme)
- })
- }
- nodes.Sets[common.Hash{}].ForEachWithOrder(func(path string, n *trienode.Node) {
- rawdb.WriteTrieNode(db.disk, common.Hash{}, []byte(path), n.Hash, n.Blob, db.scheme)
- })
- db.root = roots[i]
- }
- for _, root := range roots {
- delete(db.nodes, root)
- delete(db.parents, root)
- }
- return nil
-}
diff --git a/trie/encoding.go b/trie/encoding.go
deleted file mode 100644
index aaa131ef1f..0000000000
--- a/trie/encoding.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-// Trie keys are dealt with in three distinct encodings:
-//
-// KEYBYTES encoding contains the actual key and nothing else. This encoding is the
-// input to most API functions.
-//
-// HEX encoding contains one byte for each nibble of the key and an optional trailing
-// 'terminator' byte of value 0x10 which indicates whether or not the node at the key
-// contains a value. Hex key encoding is used for nodes loaded in memory because it's
-// convenient to access.
-//
-// COMPACT encoding is defined by the Ethereum Yellow Paper (it's called "hex prefix
-// encoding" there) and contains the bytes of the key and a flag. The high nibble of the
-// first byte contains the flag; the lowest bit encoding the oddness of the length and
-// the second-lowest encoding whether the node at the key is a value node. The low nibble
-// of the first byte is zero in the case of an even number of nibbles and the first nibble
-// in the case of an odd number. All remaining nibbles (now an even number) fit properly
-// into the remaining bytes. Compact encoding is used for nodes stored on disk.
-
-func hexToCompact(hex []byte) []byte {
- terminator := byte(0)
- if hasTerm(hex) {
- terminator = 1
- hex = hex[:len(hex)-1]
- }
- buf := make([]byte, len(hex)/2+1)
- buf[0] = terminator << 5 // the flag byte
- if len(hex)&1 == 1 {
- buf[0] |= 1 << 4 // odd flag
- buf[0] |= hex[0] // first nibble is contained in the first byte
- hex = hex[1:]
- }
- decodeNibbles(hex, buf[1:])
- return buf
-}
-
-// hexToCompactInPlace places the compact key in input buffer, returning the compacted key.
-func hexToCompactInPlace(hex []byte) []byte {
- var (
- hexLen = len(hex) // length of the hex input
- firstByte = byte(0)
- )
- // Check if we have a terminator there
- if hexLen > 0 && hex[hexLen-1] == 16 {
- firstByte = 1 << 5
- hexLen-- // last part was the terminator, ignore that
- }
- var (
- binLen = hexLen/2 + 1
- ni = 0 // index in hex
- bi = 1 // index in bin (compact)
- )
- if hexLen&1 == 1 {
- firstByte |= 1 << 4 // odd flag
- firstByte |= hex[0] // first nibble is contained in the first byte
- ni++
- }
- for ; ni < hexLen; bi, ni = bi+1, ni+2 {
- hex[bi] = hex[ni]<<4 | hex[ni+1]
- }
- hex[0] = firstByte
- return hex[:binLen]
-}
-
-func compactToHex(compact []byte) []byte {
- if len(compact) == 0 {
- return compact
- }
- base := keybytesToHex(compact)
- // delete terminator flag
- if base[0] < 2 {
- base = base[:len(base)-1]
- }
- // apply odd flag
- chop := 2 - base[0]&1
- return base[chop:]
-}
-
-func keybytesToHex(str []byte) []byte {
- l := len(str)*2 + 1
- var nibbles = make([]byte, l)
- for i, b := range str {
- nibbles[i*2] = b / 16
- nibbles[i*2+1] = b % 16
- }
- nibbles[l-1] = 16
- return nibbles
-}
-
-// hexToKeybytes turns hex nibbles into key bytes.
-// This can only be used for keys of even length.
-func hexToKeybytes(hex []byte) []byte {
- if hasTerm(hex) {
- hex = hex[:len(hex)-1]
- }
- if len(hex)&1 != 0 {
- panic("can't convert hex key of odd length")
- }
- key := make([]byte, len(hex)/2)
- decodeNibbles(hex, key)
- return key
-}
-
-func decodeNibbles(nibbles []byte, bytes []byte) {
- for bi, ni := 0, 0; ni < len(nibbles); bi, ni = bi+1, ni+2 {
- bytes[bi] = nibbles[ni]<<4 | nibbles[ni+1]
- }
-}
-
-// prefixLen returns the length of the common prefix of a and b.
-func prefixLen(a, b []byte) int {
- var i, length = 0, len(a)
- if len(b) < length {
- length = len(b)
- }
- for ; i < length; i++ {
- if a[i] != b[i] {
- break
- }
- }
- return i
-}
-
-// hasTerm returns whether a hex key has the terminator flag.
-func hasTerm(s []byte) bool {
- return len(s) > 0 && s[len(s)-1] == 16
-}
diff --git a/trie/encoding_test.go b/trie/encoding_test.go
deleted file mode 100644
index e25e4ae600..0000000000
--- a/trie/encoding_test.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- crand "crypto/rand"
- "encoding/hex"
- "math/rand"
- "testing"
-)
-
-func TestHexCompact(t *testing.T) {
- tests := []struct{ hex, compact []byte }{
- // empty keys, with and without terminator.
- {hex: []byte{}, compact: []byte{0x00}},
- {hex: []byte{16}, compact: []byte{0x20}},
- // odd length, no terminator
- {hex: []byte{1, 2, 3, 4, 5}, compact: []byte{0x11, 0x23, 0x45}},
- // even length, no terminator
- {hex: []byte{0, 1, 2, 3, 4, 5}, compact: []byte{0x00, 0x01, 0x23, 0x45}},
- // odd length, terminator
- {hex: []byte{15, 1, 12, 11, 8, 16 /*term*/}, compact: []byte{0x3f, 0x1c, 0xb8}},
- // even length, terminator
- {hex: []byte{0, 15, 1, 12, 11, 8, 16 /*term*/}, compact: []byte{0x20, 0x0f, 0x1c, 0xb8}},
- }
- for _, test := range tests {
- if c := hexToCompact(test.hex); !bytes.Equal(c, test.compact) {
- t.Errorf("hexToCompact(%x) -> %x, want %x", test.hex, c, test.compact)
- }
- if h := compactToHex(test.compact); !bytes.Equal(h, test.hex) {
- t.Errorf("compactToHex(%x) -> %x, want %x", test.compact, h, test.hex)
- }
- }
-}
-
-func TestHexKeybytes(t *testing.T) {
- tests := []struct{ key, hexIn, hexOut []byte }{
- {key: []byte{}, hexIn: []byte{16}, hexOut: []byte{16}},
- {key: []byte{}, hexIn: []byte{}, hexOut: []byte{16}},
- {
- key: []byte{0x12, 0x34, 0x56},
- hexIn: []byte{1, 2, 3, 4, 5, 6, 16},
- hexOut: []byte{1, 2, 3, 4, 5, 6, 16},
- },
- {
- key: []byte{0x12, 0x34, 0x5},
- hexIn: []byte{1, 2, 3, 4, 0, 5, 16},
- hexOut: []byte{1, 2, 3, 4, 0, 5, 16},
- },
- {
- key: []byte{0x12, 0x34, 0x56},
- hexIn: []byte{1, 2, 3, 4, 5, 6},
- hexOut: []byte{1, 2, 3, 4, 5, 6, 16},
- },
- }
- for _, test := range tests {
- if h := keybytesToHex(test.key); !bytes.Equal(h, test.hexOut) {
- t.Errorf("keybytesToHex(%x) -> %x, want %x", test.key, h, test.hexOut)
- }
- if k := hexToKeybytes(test.hexIn); !bytes.Equal(k, test.key) {
- t.Errorf("hexToKeybytes(%x) -> %x, want %x", test.hexIn, k, test.key)
- }
- }
-}
-
-func TestHexToCompactInPlace(t *testing.T) {
- for i, key := range []string{
- "00",
- "060a040c0f000a090b040803010801010900080d090a0a0d0903000b10",
- "10",
- } {
- hexBytes, _ := hex.DecodeString(key)
- exp := hexToCompact(hexBytes)
- got := hexToCompactInPlace(hexBytes)
- if !bytes.Equal(exp, got) {
- t.Fatalf("test %d: encoding err\ninp %v\ngot %x\nexp %x\n", i, key, got, exp)
- }
- }
-}
-
-func TestHexToCompactInPlaceRandom(t *testing.T) {
- for i := 0; i < 10000; i++ {
- l := rand.Intn(128)
- key := make([]byte, l)
- crand.Read(key)
- hexBytes := keybytesToHex(key)
- hexOrig := []byte(string(hexBytes))
- exp := hexToCompact(hexBytes)
- got := hexToCompactInPlace(hexBytes)
-
- if !bytes.Equal(exp, got) {
- t.Fatalf("encoding err \ncpt %x\nhex %x\ngot %x\nexp %x\n",
- key, hexOrig, got, exp)
- }
- }
-}
-
-func BenchmarkHexToCompact(b *testing.B) {
- testBytes := []byte{0, 15, 1, 12, 11, 8, 16 /*term*/}
- for i := 0; i < b.N; i++ {
- hexToCompact(testBytes)
- }
-}
-
-func BenchmarkHexToCompactInPlace(b *testing.B) {
- testBytes := []byte{0, 15, 1, 12, 11, 8, 16 /*term*/}
- for i := 0; i < b.N; i++ {
- hexToCompactInPlace(testBytes)
- }
-}
-
-func BenchmarkCompactToHex(b *testing.B) {
- testBytes := []byte{0, 15, 1, 12, 11, 8, 16 /*term*/}
- for i := 0; i < b.N; i++ {
- compactToHex(testBytes)
- }
-}
-
-func BenchmarkKeybytesToHex(b *testing.B) {
- testBytes := []byte{7, 6, 6, 5, 7, 2, 6, 2, 16}
- for i := 0; i < b.N; i++ {
- keybytesToHex(testBytes)
- }
-}
-
-func BenchmarkHexToKeybytes(b *testing.B) {
- testBytes := []byte{7, 6, 6, 5, 7, 2, 6, 2, 16}
- for i := 0; i < b.N; i++ {
- hexToKeybytes(testBytes)
- }
-}
diff --git a/trie/errors.go b/trie/errors.go
deleted file mode 100644
index a39fb9baf9..0000000000
--- a/trie/errors.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "errors"
- "fmt"
-
- "github.com/ava-labs/libevm/common"
-)
-
-// ErrCommitted is returned when a already committed trie is requested for usage.
-// The potential usages can be `Get`, `Update`, `Delete`, `NodeIterator`, `Prove`
-// and so on.
-var ErrCommitted = errors.New("trie is already committed")
-
-// MissingNodeError is returned by the trie functions (Get, Update, Delete)
-// in the case where a trie node is not present in the local database. It contains
-// information necessary for retrieving the missing node.
-type MissingNodeError struct {
- Owner common.Hash // owner of the trie if it's 2-layered trie
- NodeHash common.Hash // hash of the missing node
- Path []byte // hex-encoded path to the missing node
- err error // concrete error for missing trie node
-}
-
-// Unwrap returns the concrete error for missing trie node which
-// allows us for further analysis outside.
-func (err *MissingNodeError) Unwrap() error {
- return err.err
-}
-
-func (err *MissingNodeError) Error() string {
- if err.Owner == (common.Hash{}) {
- return fmt.Sprintf("missing trie node %x (path %x) %v", err.NodeHash, err.Path, err.err)
- }
- return fmt.Sprintf("missing trie node %x (owner %x) (path %x) %v", err.NodeHash, err.Owner, err.Path, err.err)
-}
diff --git a/trie/hasher.go b/trie/hasher.go
deleted file mode 100644
index 72e4fe8384..0000000000
--- a/trie/hasher.go
+++ /dev/null
@@ -1,218 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "sync"
-
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/rlp"
- "golang.org/x/crypto/sha3"
-)
-
-// hasher is a type used for the trie Hash operation. A hasher has some
-// internal preallocated temp space
-type hasher struct {
- sha crypto.KeccakState
- tmp []byte
- encbuf rlp.EncoderBuffer
- parallel bool // Whether to use parallel threads when hashing
-}
-
-// hasherPool holds pureHashers
-var hasherPool = sync.Pool{
- New: func() interface{} {
- return &hasher{
- tmp: make([]byte, 0, 550), // cap is as large as a full fullNode.
- sha: sha3.NewLegacyKeccak256().(crypto.KeccakState),
- encbuf: rlp.NewEncoderBuffer(nil),
- }
- },
-}
-
-func newHasher(parallel bool) *hasher {
- h := hasherPool.Get().(*hasher)
- h.parallel = parallel
- return h
-}
-
-func returnHasherToPool(h *hasher) {
- hasherPool.Put(h)
-}
-
-// hash collapses a node down into a hash node, also returning a copy of the
-// original node initialized with the computed hash to replace the original one.
-func (h *hasher) hash(n node, force bool) (hashed node, cached node) {
- // Return the cached hash if it's available
- if hash, _ := n.cache(); hash != nil {
- return hash, n
- }
- // Trie not processed yet, walk the children
- switch n := n.(type) {
- case *shortNode:
- collapsed, cached := h.hashShortNodeChildren(n)
- hashed := h.shortnodeToHash(collapsed, force)
- // We need to retain the possibly _not_ hashed node, in case it was too
- // small to be hashed
- if hn, ok := hashed.(hashNode); ok {
- cached.flags.hash = hn
- } else {
- cached.flags.hash = nil
- }
- return hashed, cached
- case *fullNode:
- collapsed, cached := h.hashFullNodeChildren(n)
- hashed = h.fullnodeToHash(collapsed, force)
- if hn, ok := hashed.(hashNode); ok {
- cached.flags.hash = hn
- } else {
- cached.flags.hash = nil
- }
- return hashed, cached
- default:
- // Value and hash nodes don't have children, so they're left as were
- return n, n
- }
-}
-
-// hashShortNodeChildren collapses the short node. The returned collapsed node
-// holds a live reference to the Key, and must not be modified.
-func (h *hasher) hashShortNodeChildren(n *shortNode) (collapsed, cached *shortNode) {
- // Hash the short node's child, caching the newly hashed subtree
- collapsed, cached = n.copy(), n.copy()
- // Previously, we did copy this one. We don't seem to need to actually
- // do that, since we don't overwrite/reuse keys
- // cached.Key = common.CopyBytes(n.Key)
- collapsed.Key = hexToCompact(n.Key)
- // Unless the child is a valuenode or hashnode, hash it
- switch n.Val.(type) {
- case *fullNode, *shortNode:
- collapsed.Val, cached.Val = h.hash(n.Val, false)
- }
- return collapsed, cached
-}
-
-func (h *hasher) hashFullNodeChildren(n *fullNode) (collapsed *fullNode, cached *fullNode) {
- // Hash the full node's children, caching the newly hashed subtrees
- cached = n.copy()
- collapsed = n.copy()
- if h.parallel {
- var wg sync.WaitGroup
- wg.Add(16)
- for i := 0; i < 16; i++ {
- go func(i int) {
- hasher := newHasher(false)
- if child := n.Children[i]; child != nil {
- collapsed.Children[i], cached.Children[i] = hasher.hash(child, false)
- } else {
- collapsed.Children[i] = nilValueNode
- }
- returnHasherToPool(hasher)
- wg.Done()
- }(i)
- }
- wg.Wait()
- } else {
- for i := 0; i < 16; i++ {
- if child := n.Children[i]; child != nil {
- collapsed.Children[i], cached.Children[i] = h.hash(child, false)
- } else {
- collapsed.Children[i] = nilValueNode
- }
- }
- }
- return collapsed, cached
-}
-
-// shortnodeToHash creates a hashNode from a shortNode. The supplied shortnode
-// should have hex-type Key, which will be converted (without modification)
-// into compact form for RLP encoding.
-// If the rlp data is smaller than 32 bytes, `nil` is returned.
-func (h *hasher) shortnodeToHash(n *shortNode, force bool) node {
- n.encode(h.encbuf)
- enc := h.encodedBytes()
-
- if len(enc) < 32 && !force {
- return n // Nodes smaller than 32 bytes are stored inside their parent
- }
- return h.hashData(enc)
-}
-
-// fullnodeToHash is used to create a hashNode from a fullNode, (which
-// may contain nil values)
-func (h *hasher) fullnodeToHash(n *fullNode, force bool) node {
- n.encode(h.encbuf)
- enc := h.encodedBytes()
-
- if len(enc) < 32 && !force {
- return n // Nodes smaller than 32 bytes are stored inside their parent
- }
- return h.hashData(enc)
-}
-
-// encodedBytes returns the result of the last encoding operation on h.encbuf.
-// This also resets the encoder buffer.
-//
-// All node encoding must be done like this:
-//
-// node.encode(h.encbuf)
-// enc := h.encodedBytes()
-//
-// This convention exists because node.encode can only be inlined/escape-analyzed when
-// called on a concrete receiver type.
-func (h *hasher) encodedBytes() []byte {
- h.tmp = h.encbuf.AppendToBytes(h.tmp[:0])
- h.encbuf.Reset(nil)
- return h.tmp
-}
-
-// hashData hashes the provided data
-func (h *hasher) hashData(data []byte) hashNode {
- n := make(hashNode, 32)
- h.sha.Reset()
- h.sha.Write(data)
- h.sha.Read(n)
- return n
-}
-
-// proofHash is used to construct trie proofs, and returns the 'collapsed'
-// node (for later RLP encoding) as well as the hashed node -- unless the
-// node is smaller than 32 bytes, in which case it will be returned as is.
-// This method does not do anything on value- or hash-nodes.
-func (h *hasher) proofHash(original node) (collapsed, hashed node) {
- switch n := original.(type) {
- case *shortNode:
- sn, _ := h.hashShortNodeChildren(n)
- return sn, h.shortnodeToHash(sn, false)
- case *fullNode:
- fn, _ := h.hashFullNodeChildren(n)
- return fn, h.fullnodeToHash(fn, false)
- default:
- // Value and hash nodes don't have children, so they're left as were
- return n, n
- }
-}
diff --git a/trie/iterator.go b/trie/iterator.go
deleted file mode 100644
index d174dae0b3..0000000000
--- a/trie/iterator.go
+++ /dev/null
@@ -1,801 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "container/heap"
- "errors"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/libevm/common"
-)
-
-// NodeResolver is used for looking up trie nodes before reaching into the real
-// persistent layer. This is not mandatory, rather is an optimization for cases
-// where trie nodes can be recovered from some external mechanism without reading
-// from disk. In those cases, this resolver allows short circuiting accesses and
-// returning them from memory.
-type NodeResolver func(owner common.Hash, path []byte, hash common.Hash) []byte
-
-// Iterator is a key-value trie iterator that traverses a Trie.
-type Iterator struct {
- nodeIt NodeIterator
-
- Key []byte // Current data key on which the iterator is positioned on
- Value []byte // Current data value on which the iterator is positioned on
- Err error
-}
-
-// NewIterator creates a new key-value iterator from a node iterator.
-// Note that the value returned by the iterator is raw. If the content is encoded
-// (e.g. storage value is RLP-encoded), it's caller's duty to decode it.
-func NewIterator(it NodeIterator) *Iterator {
- return &Iterator{
- nodeIt: it,
- }
-}
-
-// Next moves the iterator forward one key-value entry.
-func (it *Iterator) Next() bool {
- for it.nodeIt.Next(true) {
- if it.nodeIt.Leaf() {
- it.Key = it.nodeIt.LeafKey()
- it.Value = it.nodeIt.LeafBlob()
- return true
- }
- }
- it.Key = nil
- it.Value = nil
- it.Err = it.nodeIt.Error()
- return false
-}
-
-// Prove generates the Merkle proof for the leaf node the iterator is currently
-// positioned on.
-func (it *Iterator) Prove() [][]byte {
- return it.nodeIt.LeafProof()
-}
-
-// NodeIterator is an iterator to traverse the trie pre-order.
-type NodeIterator interface {
- // Next moves the iterator to the next node. If the parameter is false, any child
- // nodes will be skipped.
- Next(bool) bool
-
- // Error returns the error status of the iterator.
- Error() error
-
- // Hash returns the hash of the current node.
- Hash() common.Hash
-
- // Parent returns the hash of the parent of the current node. The hash may be the one
- // grandparent if the immediate parent is an internal node with no hash.
- Parent() common.Hash
-
- // Path returns the hex-encoded path to the current node.
- // Callers must not retain references to the return value after calling Next.
- // For leaf nodes, the last element of the path is the 'terminator symbol' 0x10.
- Path() []byte
-
- // NodeBlob returns the rlp-encoded value of the current iterated node.
- // If the node is an embedded node in its parent, nil is returned then.
- NodeBlob() []byte
-
- // Leaf returns true iff the current node is a leaf node.
- Leaf() bool
-
- // LeafKey returns the key of the leaf. The method panics if the iterator is not
- // positioned at a leaf. Callers must not retain references to the value after
- // calling Next.
- LeafKey() []byte
-
- // LeafBlob returns the content of the leaf. The method panics if the iterator
- // is not positioned at a leaf. Callers must not retain references to the value
- // after calling Next.
- LeafBlob() []byte
-
- // LeafProof returns the Merkle proof of the leaf. The method panics if the
- // iterator is not positioned at a leaf. Callers must not retain references
- // to the value after calling Next.
- LeafProof() [][]byte
-
- // AddResolver sets a node resolver to use for looking up trie nodes before
- // reaching into the real persistent layer.
- //
- // This is not required for normal operation, rather is an optimization for
- // cases where trie nodes can be recovered from some external mechanism without
- // reading from disk. In those cases, this resolver allows short circuiting
- // accesses and returning them from memory.
- //
- // Before adding a similar mechanism to any other place in Geth, consider
- // making trie.Database an interface and wrapping at that level. It's a huge
- // refactor, but it could be worth it if another occurrence arises.
- AddResolver(NodeResolver)
-}
-
-// nodeIteratorState represents the iteration state at one particular node of the
-// trie, which can be resumed at a later invocation.
-type nodeIteratorState struct {
- hash common.Hash // Hash of the node being iterated (nil if not standalone)
- node node // Trie node being iterated
- parent common.Hash // Hash of the first full ancestor node (nil if current is the root)
- index int // Child to be processed next
- pathlen int // Length of the path to this node
-}
-
-type nodeIterator struct {
- trie *Trie // Trie being iterated
- stack []*nodeIteratorState // Hierarchy of trie nodes persisting the iteration state
- path []byte // Path to the current node
- err error // Failure set in case of an internal error in the iterator
-
- resolver NodeResolver // optional node resolver for avoiding disk hits
- pool []*nodeIteratorState // local pool for iteratorstates
-}
-
-// errIteratorEnd is stored in nodeIterator.err when iteration is done.
-var errIteratorEnd = errors.New("end of iteration")
-
-// seekError is stored in nodeIterator.err if the initial seek has failed.
-type seekError struct {
- key []byte
- err error
-}
-
-func (e seekError) Error() string {
- return "seek error: " + e.err.Error()
-}
-
-func newNodeIterator(trie *Trie, start []byte) NodeIterator {
- if trie.Hash() == types.EmptyRootHash {
- return &nodeIterator{
- trie: trie,
- err: errIteratorEnd,
- }
- }
- it := &nodeIterator{trie: trie}
- it.err = it.seek(start)
- return it
-}
-
-func (it *nodeIterator) putInPool(item *nodeIteratorState) {
- if len(it.pool) < 40 {
- item.node = nil
- it.pool = append(it.pool, item)
- }
-}
-
-func (it *nodeIterator) getFromPool() *nodeIteratorState {
- idx := len(it.pool) - 1
- if idx < 0 {
- return new(nodeIteratorState)
- }
- el := it.pool[idx]
- it.pool[idx] = nil
- it.pool = it.pool[:idx]
- return el
-}
-
-func (it *nodeIterator) AddResolver(resolver NodeResolver) {
- it.resolver = resolver
-}
-
-func (it *nodeIterator) Hash() common.Hash {
- if len(it.stack) == 0 {
- return common.Hash{}
- }
- return it.stack[len(it.stack)-1].hash
-}
-
-func (it *nodeIterator) Parent() common.Hash {
- if len(it.stack) == 0 {
- return common.Hash{}
- }
- return it.stack[len(it.stack)-1].parent
-}
-
-func (it *nodeIterator) Leaf() bool {
- return hasTerm(it.path)
-}
-
-func (it *nodeIterator) LeafKey() []byte {
- if len(it.stack) > 0 {
- if _, ok := it.stack[len(it.stack)-1].node.(valueNode); ok {
- return hexToKeybytes(it.path)
- }
- }
- panic("not at leaf")
-}
-
-func (it *nodeIterator) LeafBlob() []byte {
- if len(it.stack) > 0 {
- if node, ok := it.stack[len(it.stack)-1].node.(valueNode); ok {
- return node
- }
- }
- panic("not at leaf")
-}
-
-func (it *nodeIterator) LeafProof() [][]byte {
- if len(it.stack) > 0 {
- if _, ok := it.stack[len(it.stack)-1].node.(valueNode); ok {
- hasher := newHasher(false)
- defer returnHasherToPool(hasher)
- proofs := make([][]byte, 0, len(it.stack))
-
- for i, item := range it.stack[:len(it.stack)-1] {
- // Gather nodes that end up as hash nodes (or the root)
- node, hashed := hasher.proofHash(item.node)
- if _, ok := hashed.(hashNode); ok || i == 0 {
- proofs = append(proofs, nodeToBytes(node))
- }
- }
- return proofs
- }
- }
- panic("not at leaf")
-}
-
-func (it *nodeIterator) Path() []byte {
- return it.path
-}
-
-func (it *nodeIterator) NodeBlob() []byte {
- if it.Hash() == (common.Hash{}) {
- return nil // skip the non-standalone node
- }
- blob, err := it.resolveBlob(it.Hash().Bytes(), it.Path())
- if err != nil {
- it.err = err
- return nil
- }
- return blob
-}
-
-func (it *nodeIterator) Error() error {
- if it.err == errIteratorEnd {
- return nil
- }
- if seek, ok := it.err.(seekError); ok {
- return seek.err
- }
- return it.err
-}
-
-// Next moves the iterator to the next node, returning whether there are any
-// further nodes. In case of an internal error this method returns false and
-// sets the Error field to the encountered failure. If `descend` is false,
-// skips iterating over any subnodes of the current node.
-func (it *nodeIterator) Next(descend bool) bool {
- if it.err == errIteratorEnd {
- return false
- }
- if seek, ok := it.err.(seekError); ok {
- if it.err = it.seek(seek.key); it.err != nil {
- return false
- }
- }
- // Otherwise step forward with the iterator and report any errors.
- state, parentIndex, path, err := it.peek(descend)
- it.err = err
- if it.err != nil {
- return false
- }
- it.push(state, parentIndex, path)
- return true
-}
-
-func (it *nodeIterator) seek(prefix []byte) error {
- // The path we're looking for is the hex encoded key without terminator.
- key := keybytesToHex(prefix)
- key = key[:len(key)-1]
- // Move forward until we're just before the closest match to key.
- for {
- state, parentIndex, path, err := it.peekSeek(key)
- if err == errIteratorEnd {
- return errIteratorEnd
- } else if err != nil {
- return seekError{prefix, err}
- } else if bytes.Compare(path, key) >= 0 {
- return nil
- }
- it.push(state, parentIndex, path)
- }
-}
-
-// init initializes the iterator.
-func (it *nodeIterator) init() (*nodeIteratorState, error) {
- root := it.trie.Hash()
- state := &nodeIteratorState{node: it.trie.root, index: -1}
- if root != types.EmptyRootHash {
- state.hash = root
- }
- return state, state.resolve(it, nil)
-}
-
-// peek creates the next state of the iterator.
-func (it *nodeIterator) peek(descend bool) (*nodeIteratorState, *int, []byte, error) {
- // Initialize the iterator if we've just started.
- if len(it.stack) == 0 {
- state, err := it.init()
- return state, nil, nil, err
- }
- if !descend {
- // If we're skipping children, pop the current node first
- it.pop()
- }
-
- // Continue iteration to the next child
- for len(it.stack) > 0 {
- parent := it.stack[len(it.stack)-1]
- ancestor := parent.hash
- if (ancestor == common.Hash{}) {
- ancestor = parent.parent
- }
- state, path, ok := it.nextChild(parent, ancestor)
- if ok {
- if err := state.resolve(it, path); err != nil {
- return parent, &parent.index, path, err
- }
- return state, &parent.index, path, nil
- }
- // No more child nodes, move back up.
- it.pop()
- }
- return nil, nil, nil, errIteratorEnd
-}
-
-// peekSeek is like peek, but it also tries to skip resolving hashes by skipping
-// over the siblings that do not lead towards the desired seek position.
-func (it *nodeIterator) peekSeek(seekKey []byte) (*nodeIteratorState, *int, []byte, error) {
- // Initialize the iterator if we've just started.
- if len(it.stack) == 0 {
- state, err := it.init()
- return state, nil, nil, err
- }
- if !bytes.HasPrefix(seekKey, it.path) {
- // If we're skipping children, pop the current node first
- it.pop()
- }
-
- // Continue iteration to the next child
- for len(it.stack) > 0 {
- parent := it.stack[len(it.stack)-1]
- ancestor := parent.hash
- if (ancestor == common.Hash{}) {
- ancestor = parent.parent
- }
- state, path, ok := it.nextChildAt(parent, ancestor, seekKey)
- if ok {
- if err := state.resolve(it, path); err != nil {
- return parent, &parent.index, path, err
- }
- return state, &parent.index, path, nil
- }
- // No more child nodes, move back up.
- it.pop()
- }
- return nil, nil, nil, errIteratorEnd
-}
-
-func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) {
- if it.resolver != nil {
- if blob := it.resolver(it.trie.owner, path, common.BytesToHash(hash)); len(blob) > 0 {
- if resolved, err := decodeNode(hash, blob); err == nil {
- return resolved, nil
- }
- }
- }
- // Retrieve the specified node from the underlying node reader.
- // it.trie.resolveAndTrack is not used since in that function the
- // loaded blob will be tracked, while it's not required here since
- // all loaded nodes won't be linked to trie at all and track nodes
- // may lead to out-of-memory issue.
- blob, err := it.trie.reader.node(path, common.BytesToHash(hash))
- if err != nil {
- return nil, err
- }
- // The raw-blob format nodes are loaded either from the
- // clean cache or the database, they are all in their own
- // copy and safe to use unsafe decoder.
- return mustDecodeNodeUnsafe(hash, blob), nil
-}
-
-func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error) {
- if it.resolver != nil {
- if blob := it.resolver(it.trie.owner, path, common.BytesToHash(hash)); len(blob) > 0 {
- return blob, nil
- }
- }
- // Retrieve the specified node from the underlying node reader.
- // it.trie.resolveAndTrack is not used since in that function the
- // loaded blob will be tracked, while it's not required here since
- // all loaded nodes won't be linked to trie at all and track nodes
- // may lead to out-of-memory issue.
- return it.trie.reader.node(path, common.BytesToHash(hash))
-}
-
-func (st *nodeIteratorState) resolve(it *nodeIterator, path []byte) error {
- if hash, ok := st.node.(hashNode); ok {
- resolved, err := it.resolveHash(hash, path)
- if err != nil {
- return err
- }
- st.node = resolved
- st.hash = common.BytesToHash(hash)
- }
- return nil
-}
-
-func (it *nodeIterator) findChild(n *fullNode, index int, ancestor common.Hash) (node, *nodeIteratorState, []byte, int) {
- var (
- path = it.path
- child node
- state *nodeIteratorState
- childPath []byte
- )
- for ; index < len(n.Children); index++ {
- if n.Children[index] != nil {
- child = n.Children[index]
- hash, _ := child.cache()
- state = it.getFromPool()
- state.hash = common.BytesToHash(hash)
- state.node = child
- state.parent = ancestor
- state.index = -1
- state.pathlen = len(path)
- childPath = append(childPath, path...)
- childPath = append(childPath, byte(index))
- return child, state, childPath, index
- }
- }
- return nil, nil, nil, 0
-}
-
-func (it *nodeIterator) nextChild(parent *nodeIteratorState, ancestor common.Hash) (*nodeIteratorState, []byte, bool) {
- switch node := parent.node.(type) {
- case *fullNode:
- // Full node, move to the first non-nil child.
- if child, state, path, index := it.findChild(node, parent.index+1, ancestor); child != nil {
- parent.index = index - 1
- return state, path, true
- }
- case *shortNode:
- // Short node, return the pointer singleton child
- if parent.index < 0 {
- hash, _ := node.Val.cache()
- state := it.getFromPool()
- state.hash = common.BytesToHash(hash)
- state.node = node.Val
- state.parent = ancestor
- state.index = -1
- state.pathlen = len(it.path)
- path := append(it.path, node.Key...)
- return state, path, true
- }
- }
- return parent, it.path, false
-}
-
-// nextChildAt is similar to nextChild, except that it targets a child as close to the
-// target key as possible, thus skipping siblings.
-func (it *nodeIterator) nextChildAt(parent *nodeIteratorState, ancestor common.Hash, key []byte) (*nodeIteratorState, []byte, bool) {
- switch n := parent.node.(type) {
- case *fullNode:
- // Full node, move to the first non-nil child before the desired key position
- child, state, path, index := it.findChild(n, parent.index+1, ancestor)
- if child == nil {
- // No more children in this fullnode
- return parent, it.path, false
- }
- // If the child we found is already past the seek position, just return it.
- if bytes.Compare(path, key) >= 0 {
- parent.index = index - 1
- return state, path, true
- }
- // The child is before the seek position. Try advancing
- for {
- nextChild, nextState, nextPath, nextIndex := it.findChild(n, index+1, ancestor)
- // If we run out of children, or skipped past the target, return the
- // previous one
- if nextChild == nil || bytes.Compare(nextPath, key) >= 0 {
- parent.index = index - 1
- return state, path, true
- }
- // We found a better child closer to the target
- state, path, index = nextState, nextPath, nextIndex
- }
- case *shortNode:
- // Short node, return the pointer singleton child
- if parent.index < 0 {
- hash, _ := n.Val.cache()
- state := it.getFromPool()
- state.hash = common.BytesToHash(hash)
- state.node = n.Val
- state.parent = ancestor
- state.index = -1
- state.pathlen = len(it.path)
- path := append(it.path, n.Key...)
- return state, path, true
- }
- }
- return parent, it.path, false
-}
-
-func (it *nodeIterator) push(state *nodeIteratorState, parentIndex *int, path []byte) {
- it.path = path
- it.stack = append(it.stack, state)
- if parentIndex != nil {
- *parentIndex++
- }
-}
-
-func (it *nodeIterator) pop() {
- last := it.stack[len(it.stack)-1]
- it.path = it.path[:last.pathlen]
- it.stack[len(it.stack)-1] = nil
- it.stack = it.stack[:len(it.stack)-1]
- // last is now unused
- it.putInPool(last)
-}
-
-func compareNodes(a, b NodeIterator) int {
- if cmp := bytes.Compare(a.Path(), b.Path()); cmp != 0 {
- return cmp
- }
- if a.Leaf() && !b.Leaf() {
- return -1
- } else if b.Leaf() && !a.Leaf() {
- return 1
- }
- if cmp := bytes.Compare(a.Hash().Bytes(), b.Hash().Bytes()); cmp != 0 {
- return cmp
- }
- if a.Leaf() && b.Leaf() {
- return bytes.Compare(a.LeafBlob(), b.LeafBlob())
- }
- return 0
-}
-
-type differenceIterator struct {
- a, b NodeIterator // Nodes returned are those in b - a.
- eof bool // Indicates a has run out of elements
- count int // Number of nodes scanned on either trie
-}
-
-// NewDifferenceIterator constructs a NodeIterator that iterates over elements in b that
-// are not in a. Returns the iterator, and a pointer to an integer recording the number
-// of nodes seen.
-func NewDifferenceIterator(a, b NodeIterator) (NodeIterator, *int) {
- a.Next(true)
- it := &differenceIterator{
- a: a,
- b: b,
- }
- return it, &it.count
-}
-
-func (it *differenceIterator) Hash() common.Hash {
- return it.b.Hash()
-}
-
-func (it *differenceIterator) Parent() common.Hash {
- return it.b.Parent()
-}
-
-func (it *differenceIterator) Leaf() bool {
- return it.b.Leaf()
-}
-
-func (it *differenceIterator) LeafKey() []byte {
- return it.b.LeafKey()
-}
-
-func (it *differenceIterator) LeafBlob() []byte {
- return it.b.LeafBlob()
-}
-
-func (it *differenceIterator) LeafProof() [][]byte {
- return it.b.LeafProof()
-}
-
-func (it *differenceIterator) Path() []byte {
- return it.b.Path()
-}
-
-func (it *differenceIterator) NodeBlob() []byte {
- return it.b.NodeBlob()
-}
-
-func (it *differenceIterator) AddResolver(resolver NodeResolver) {
- panic("not implemented")
-}
-
-func (it *differenceIterator) Next(bool) bool {
- // Invariants:
- // - We always advance at least one element in b.
- // - At the start of this function, a's path is lexically greater than b's.
- if !it.b.Next(true) {
- return false
- }
- it.count++
-
- if it.eof {
- // a has reached eof, so we just return all elements from b
- return true
- }
-
- for {
- switch compareNodes(it.a, it.b) {
- case -1:
- // b jumped past a; advance a
- if !it.a.Next(true) {
- it.eof = true
- return true
- }
- it.count++
- case 1:
- // b is before a
- return true
- case 0:
- // a and b are identical; skip this whole subtree if the nodes have hashes
- hasHash := it.a.Hash() == common.Hash{}
- if !it.b.Next(hasHash) {
- return false
- }
- it.count++
- if !it.a.Next(hasHash) {
- it.eof = true
- return true
- }
- it.count++
- }
- }
-}
-
-func (it *differenceIterator) Error() error {
- if err := it.a.Error(); err != nil {
- return err
- }
- return it.b.Error()
-}
-
-type nodeIteratorHeap []NodeIterator
-
-func (h nodeIteratorHeap) Len() int { return len(h) }
-func (h nodeIteratorHeap) Less(i, j int) bool { return compareNodes(h[i], h[j]) < 0 }
-func (h nodeIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
-func (h *nodeIteratorHeap) Push(x interface{}) { *h = append(*h, x.(NodeIterator)) }
-func (h *nodeIteratorHeap) Pop() interface{} {
- n := len(*h)
- x := (*h)[n-1]
- *h = (*h)[0 : n-1]
- return x
-}
-
-type unionIterator struct {
- items *nodeIteratorHeap // Nodes returned are the union of the ones in these iterators
- count int // Number of nodes scanned across all tries
-}
-
-// NewUnionIterator constructs a NodeIterator that iterates over elements in the union
-// of the provided NodeIterators. Returns the iterator, and a pointer to an integer
-// recording the number of nodes visited.
-func NewUnionIterator(iters []NodeIterator) (NodeIterator, *int) {
- h := make(nodeIteratorHeap, len(iters))
- copy(h, iters)
- heap.Init(&h)
-
- ui := &unionIterator{items: &h}
- return ui, &ui.count
-}
-
-func (it *unionIterator) Hash() common.Hash {
- return (*it.items)[0].Hash()
-}
-
-func (it *unionIterator) Parent() common.Hash {
- return (*it.items)[0].Parent()
-}
-
-func (it *unionIterator) Leaf() bool {
- return (*it.items)[0].Leaf()
-}
-
-func (it *unionIterator) LeafKey() []byte {
- return (*it.items)[0].LeafKey()
-}
-
-func (it *unionIterator) LeafBlob() []byte {
- return (*it.items)[0].LeafBlob()
-}
-
-func (it *unionIterator) LeafProof() [][]byte {
- return (*it.items)[0].LeafProof()
-}
-
-func (it *unionIterator) Path() []byte {
- return (*it.items)[0].Path()
-}
-
-func (it *unionIterator) NodeBlob() []byte {
- return (*it.items)[0].NodeBlob()
-}
-
-func (it *unionIterator) AddResolver(resolver NodeResolver) {
- panic("not implemented")
-}
-
-// Next returns the next node in the union of tries being iterated over.
-//
-// It does this by maintaining a heap of iterators, sorted by the iteration
-// order of their next elements, with one entry for each source trie. Each
-// time Next() is called, it takes the least element from the heap to return,
-// advancing any other iterators that also point to that same element. These
-// iterators are called with descend=false, since we know that any nodes under
-// these nodes will also be duplicates, found in the currently selected iterator.
-// Whenever an iterator is advanced, it is pushed back into the heap if it still
-// has elements remaining.
-//
-// In the case that descend=false - eg, we're asked to ignore all subnodes of the
-// current node - we also advance any iterators in the heap that have the current
-// path as a prefix.
-func (it *unionIterator) Next(descend bool) bool {
- if len(*it.items) == 0 {
- return false
- }
-
- // Get the next key from the union
- least := heap.Pop(it.items).(NodeIterator)
-
- // Skip over other nodes as long as they're identical, or, if we're not descending, as
- // long as they have the same prefix as the current node.
- for len(*it.items) > 0 && ((!descend && bytes.HasPrefix((*it.items)[0].Path(), least.Path())) || compareNodes(least, (*it.items)[0]) == 0) {
- skipped := heap.Pop(it.items).(NodeIterator)
- // Skip the whole subtree if the nodes have hashes; otherwise just skip this node
- if skipped.Next(skipped.Hash() == common.Hash{}) {
- it.count++
- // If there are more elements, push the iterator back on the heap
- heap.Push(it.items, skipped)
- }
- }
- if least.Next(descend) {
- it.count++
- heap.Push(it.items, least)
- }
- return len(*it.items) > 0
-}
-
-func (it *unionIterator) Error() error {
- for i := 0; i < len(*it.items); i++ {
- if err := (*it.items)[i].Error(); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
deleted file mode 100644
index 3219d93502..0000000000
--- a/trie/iterator_test.go
+++ /dev/null
@@ -1,641 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "fmt"
- "math/rand"
- "testing"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
-)
-
-func TestEmptyIterator(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- iter := trie.MustNodeIterator(nil)
-
- seen := make(map[string]struct{})
- for iter.Next(true) {
- seen[string(iter.Path())] = struct{}{}
- }
- if len(seen) != 0 {
- t.Fatal("Unexpected trie node iterated")
- }
-}
-
-func TestIterator(t *testing.T) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie := NewEmpty(db)
- vals := []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- {"shaman", "horse"},
- {"doge", "coin"},
- {"dog", "puppy"},
- {"somethingveryoddindeedthis is", "myothernodedata"},
- }
- all := make(map[string]string)
- for _, val := range vals {
- all[val.k] = val.v
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- }
- root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
-
- trie, _ = New(TrieID(root), db)
- found := make(map[string]string)
- it := NewIterator(trie.MustNodeIterator(nil))
- for it.Next() {
- found[string(it.Key)] = string(it.Value)
- }
-
- for k, v := range all {
- if found[k] != v {
- t.Errorf("iterator value mismatch for %s: got %q want %q", k, found[k], v)
- }
- }
-}
-
-type kv struct {
- k, v []byte
- t bool
-}
-
-func (k *kv) cmp(other *kv) int {
- return bytes.Compare(k.k, other.k)
-}
-
-func TestIteratorLargeData(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- vals := make(map[string]*kv)
-
- for i := byte(0); i < 255; i++ {
- value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
- value2 := &kv{common.LeftPadBytes([]byte{10, i}, 32), []byte{i}, false}
- trie.MustUpdate(value.k, value.v)
- trie.MustUpdate(value2.k, value2.v)
- vals[string(value.k)] = value
- vals[string(value2.k)] = value2
- }
-
- it := NewIterator(trie.MustNodeIterator(nil))
- for it.Next() {
- vals[string(it.Key)].t = true
- }
-
- var untouched []*kv
- for _, value := range vals {
- if !value.t {
- untouched = append(untouched, value)
- }
- }
-
- if len(untouched) > 0 {
- t.Errorf("Missed %d nodes", len(untouched))
- for _, value := range untouched {
- t.Error(value)
- }
- }
-}
-
-type iterationElement struct {
- hash common.Hash
- path []byte
- blob []byte
-}
-
-// Tests that the node iterator indeed walks over the entire database contents.
-func TestNodeIteratorCoverage(t *testing.T) {
- testNodeIteratorCoverage(t, rawdb.HashScheme)
- testNodeIteratorCoverage(t, rawdb.PathScheme)
-}
-
-func testNodeIteratorCoverage(t *testing.T, scheme string) {
- // Create some arbitrary test trie to iterate
- db, nodeDb, trie, _ := makeTestTrie(scheme)
-
- // Gather all the node hashes found by the iterator
- var elements = make(map[common.Hash]iterationElement)
- for it := trie.MustNodeIterator(nil); it.Next(true); {
- if it.Hash() != (common.Hash{}) {
- elements[it.Hash()] = iterationElement{
- hash: it.Hash(),
- path: common.CopyBytes(it.Path()),
- blob: common.CopyBytes(it.NodeBlob()),
- }
- }
- }
- // Cross check the hashes and the database itself
- reader, err := nodeDb.Reader(trie.Hash())
- if err != nil {
- t.Fatalf("state is not available %x", trie.Hash())
- }
- for _, element := range elements {
- if blob, err := reader.Node(common.Hash{}, element.path, element.hash); err != nil {
- t.Errorf("failed to retrieve reported node %x: %v", element.hash, err)
- } else if !bytes.Equal(blob, element.blob) {
- t.Errorf("node blob is different, want %v got %v", element.blob, blob)
- }
- }
- var (
- count int
- it = db.NewIterator(nil, nil)
- )
- for it.Next() {
- res, _, _ := isTrieNode(nodeDb.Scheme(), it.Key(), it.Value())
- if !res {
- continue
- }
- count += 1
- if elem, ok := elements[crypto.Keccak256Hash(it.Value())]; !ok {
- t.Error("state entry not reported")
- } else if !bytes.Equal(it.Value(), elem.blob) {
- t.Errorf("node blob is different, want %v got %v", elem.blob, it.Value())
- }
- }
- it.Release()
- if count != len(elements) {
- t.Errorf("state entry is mismatched %d %d", count, len(elements))
- }
-}
-
-type kvs struct{ k, v string }
-
-var testdata1 = []kvs{
- {"barb", "ba"},
- {"bard", "bc"},
- {"bars", "bb"},
- {"bar", "b"},
- {"fab", "z"},
- {"food", "ab"},
- {"foos", "aa"},
- {"foo", "a"},
-}
-
-var testdata2 = []kvs{
- {"aardvark", "c"},
- {"bar", "b"},
- {"barb", "bd"},
- {"bars", "be"},
- {"fab", "z"},
- {"foo", "a"},
- {"foos", "aa"},
- {"food", "ab"},
- {"jars", "d"},
-}
-
-func TestIteratorSeek(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- for _, val := range testdata1 {
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- }
-
- // Seek to the middle.
- it := NewIterator(trie.MustNodeIterator([]byte("fab")))
- if err := checkIteratorOrder(testdata1[4:], it); err != nil {
- t.Fatal(err)
- }
-
- // Seek to a non-existent key.
- it = NewIterator(trie.MustNodeIterator([]byte("barc")))
- if err := checkIteratorOrder(testdata1[1:], it); err != nil {
- t.Fatal(err)
- }
-
- // Seek beyond the end.
- it = NewIterator(trie.MustNodeIterator([]byte("z")))
- if err := checkIteratorOrder(nil, it); err != nil {
- t.Fatal(err)
- }
-}
-
-func checkIteratorOrder(want []kvs, it *Iterator) error {
- for it.Next() {
- if len(want) == 0 {
- return fmt.Errorf("didn't expect any more values, got key %q", it.Key)
- }
- if !bytes.Equal(it.Key, []byte(want[0].k)) {
- return fmt.Errorf("wrong key: got %q, want %q", it.Key, want[0].k)
- }
- want = want[1:]
- }
- if len(want) > 0 {
- return fmt.Errorf("iterator ended early, want key %q", want[0])
- }
- return nil
-}
-
-func TestDifferenceIterator(t *testing.T) {
- dba := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- triea := NewEmpty(dba)
- for _, val := range testdata1 {
- triea.MustUpdate([]byte(val.k), []byte(val.v))
- }
- rootA, nodesA, _ := triea.Commit(false)
- dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
- triea, _ = New(TrieID(rootA), dba)
-
- dbb := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trieb := NewEmpty(dbb)
- for _, val := range testdata2 {
- trieb.MustUpdate([]byte(val.k), []byte(val.v))
- }
- rootB, nodesB, _ := trieb.Commit(false)
- dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
- trieb, _ = New(TrieID(rootB), dbb)
-
- found := make(map[string]string)
- di, _ := NewDifferenceIterator(triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil))
- it := NewIterator(di)
- for it.Next() {
- found[string(it.Key)] = string(it.Value)
- }
-
- all := []struct{ k, v string }{
- {"aardvark", "c"},
- {"barb", "bd"},
- {"bars", "be"},
- {"jars", "d"},
- }
- for _, item := range all {
- if found[item.k] != item.v {
- t.Errorf("iterator value mismatch for %s: got %v want %v", item.k, found[item.k], item.v)
- }
- }
- if len(found) != len(all) {
- t.Errorf("iterator count mismatch: got %d values, want %d", len(found), len(all))
- }
-}
-
-func TestUnionIterator(t *testing.T) {
- dba := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- triea := NewEmpty(dba)
- for _, val := range testdata1 {
- triea.MustUpdate([]byte(val.k), []byte(val.v))
- }
- rootA, nodesA, _ := triea.Commit(false)
- dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
- triea, _ = New(TrieID(rootA), dba)
-
- dbb := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trieb := NewEmpty(dbb)
- for _, val := range testdata2 {
- trieb.MustUpdate([]byte(val.k), []byte(val.v))
- }
- rootB, nodesB, _ := trieb.Commit(false)
- dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
- trieb, _ = New(TrieID(rootB), dbb)
-
- di, _ := NewUnionIterator([]NodeIterator{triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil)})
- it := NewIterator(di)
-
- all := []struct{ k, v string }{
- {"aardvark", "c"},
- {"barb", "ba"},
- {"barb", "bd"},
- {"bard", "bc"},
- {"bars", "bb"},
- {"bars", "be"},
- {"bar", "b"},
- {"fab", "z"},
- {"food", "ab"},
- {"foos", "aa"},
- {"foo", "a"},
- {"jars", "d"},
- }
-
- for i, kv := range all {
- if !it.Next() {
- t.Errorf("Iterator ends prematurely at element %d", i)
- }
- if kv.k != string(it.Key) {
- t.Errorf("iterator value mismatch for element %d: got key %s want %s", i, it.Key, kv.k)
- }
- if kv.v != string(it.Value) {
- t.Errorf("iterator value mismatch for element %d: got value %s want %s", i, it.Value, kv.v)
- }
- }
- if it.Next() {
- t.Errorf("Iterator returned extra values.")
- }
-}
-
-func TestIteratorNoDups(t *testing.T) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- tr := NewEmpty(db)
- for _, val := range testdata1 {
- tr.MustUpdate([]byte(val.k), []byte(val.v))
- }
- checkIteratorNoDups(t, tr.MustNodeIterator(nil), nil)
-}
-
-// This test checks that nodeIterator.Next can be retried after inserting missing trie nodes.
-func TestIteratorContinueAfterError(t *testing.T) {
- testIteratorContinueAfterError(t, false, rawdb.HashScheme)
- testIteratorContinueAfterError(t, true, rawdb.HashScheme)
- testIteratorContinueAfterError(t, false, rawdb.PathScheme)
- testIteratorContinueAfterError(t, true, rawdb.PathScheme)
-}
-
-func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
- diskdb := rawdb.NewMemoryDatabase()
- tdb := newTestDatabase(diskdb, scheme)
-
- tr := NewEmpty(tdb)
- for _, val := range testdata1 {
- tr.MustUpdate([]byte(val.k), []byte(val.v))
- }
- root, nodes, _ := tr.Commit(false)
- tdb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- if !memonly {
- tdb.Commit(root)
- }
- tr, _ = New(TrieID(root), tdb)
- wantNodeCount := checkIteratorNoDups(t, tr.MustNodeIterator(nil), nil)
-
- var (
- paths [][]byte
- hashes []common.Hash
- )
- if memonly {
- for path, n := range nodes.Nodes {
- paths = append(paths, []byte(path))
- hashes = append(hashes, n.Hash)
- }
- } else {
- it := diskdb.NewIterator(nil, nil)
- for it.Next() {
- ok, path, hash := isTrieNode(tdb.Scheme(), it.Key(), it.Value())
- if !ok {
- continue
- }
- paths = append(paths, path)
- hashes = append(hashes, hash)
- }
- it.Release()
- }
- for i := 0; i < 20; i++ {
- // Create trie that will load all nodes from DB.
- tr, _ := New(TrieID(tr.Hash()), tdb)
-
- // Remove a random node from the database. It can't be the root node
- // because that one is already loaded.
- var (
- rval []byte
- rpath []byte
- rhash common.Hash
- )
- for {
- if memonly {
- rpath = paths[rand.Intn(len(paths))]
- n := nodes.Nodes[string(rpath)]
- if n == nil {
- continue
- }
- rhash = n.Hash
- } else {
- index := rand.Intn(len(paths))
- rpath = paths[index]
- rhash = hashes[index]
- }
- if rhash != tr.Hash() {
- break
- }
- }
- if memonly {
- tr.reader.banned = map[string]struct{}{string(rpath): {}}
- } else {
- rval = rawdb.ReadTrieNode(diskdb, common.Hash{}, rpath, rhash, tdb.Scheme())
- rawdb.DeleteTrieNode(diskdb, common.Hash{}, rpath, rhash, tdb.Scheme())
- }
- // Iterate until the error is hit.
- seen := make(map[string]bool)
- it := tr.MustNodeIterator(nil)
- checkIteratorNoDups(t, it, seen)
- missing, ok := it.Error().(*MissingNodeError)
- if !ok || missing.NodeHash != rhash {
- t.Fatal("didn't hit missing node, got", it.Error())
- }
-
- // Add the node back and continue iteration.
- if memonly {
- delete(tr.reader.banned, string(rpath))
- } else {
- rawdb.WriteTrieNode(diskdb, common.Hash{}, rpath, rhash, rval, tdb.Scheme())
- }
- checkIteratorNoDups(t, it, seen)
- if it.Error() != nil {
- t.Fatal("unexpected error", it.Error())
- }
- if len(seen) != wantNodeCount {
- t.Fatal("wrong node iteration count, got", len(seen), "want", wantNodeCount)
- }
- }
-}
-
-// Similar to the test above, this one checks that failure to create nodeIterator at a
-// certain key prefix behaves correctly when Next is called. The expectation is that Next
-// should retry seeking before returning true for the first time.
-func TestIteratorContinueAfterSeekError(t *testing.T) {
- testIteratorContinueAfterSeekError(t, false, rawdb.HashScheme)
- testIteratorContinueAfterSeekError(t, true, rawdb.HashScheme)
- testIteratorContinueAfterSeekError(t, false, rawdb.PathScheme)
- testIteratorContinueAfterSeekError(t, true, rawdb.PathScheme)
-}
-
-func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme string) {
- // Commit test trie to db, then remove the node containing "bars".
- var (
- barNodePath []byte
- barNodeHash = common.HexToHash("05041990364eb72fcb1127652ce40d8bab765f2bfe53225b1170d276cc101c2e")
- )
- diskdb := rawdb.NewMemoryDatabase()
- triedb := newTestDatabase(diskdb, scheme)
- ctr := NewEmpty(triedb)
- for _, val := range testdata1 {
- ctr.MustUpdate([]byte(val.k), []byte(val.v))
- }
- root, nodes, _ := ctr.Commit(false)
- for path, n := range nodes.Nodes {
- if n.Hash == barNodeHash {
- barNodePath = []byte(path)
- break
- }
- }
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- if !memonly {
- triedb.Commit(root)
- }
- var (
- barNodeBlob []byte
- )
- tr, _ := New(TrieID(root), triedb)
- if memonly {
- tr.reader.banned = map[string]struct{}{string(barNodePath): {}}
- } else {
- barNodeBlob = rawdb.ReadTrieNode(diskdb, common.Hash{}, barNodePath, barNodeHash, triedb.Scheme())
- rawdb.DeleteTrieNode(diskdb, common.Hash{}, barNodePath, barNodeHash, triedb.Scheme())
- }
- // Create a new iterator that seeks to "bars". Seeking can't proceed because
- // the node is missing.
- it := tr.MustNodeIterator([]byte("bars"))
- missing, ok := it.Error().(*MissingNodeError)
- if !ok {
- t.Fatal("want MissingNodeError, got", it.Error())
- } else if missing.NodeHash != barNodeHash {
- t.Fatal("wrong node missing")
- }
- // Reinsert the missing node.
- if memonly {
- delete(tr.reader.banned, string(barNodePath))
- } else {
- rawdb.WriteTrieNode(diskdb, common.Hash{}, barNodePath, barNodeHash, barNodeBlob, triedb.Scheme())
- }
- // Check that iteration produces the right set of values.
- if err := checkIteratorOrder(testdata1[2:], NewIterator(it)); err != nil {
- t.Fatal(err)
- }
-}
-
-func checkIteratorNoDups(t *testing.T, it NodeIterator, seen map[string]bool) int {
- if seen == nil {
- seen = make(map[string]bool)
- }
- for it.Next(true) {
- if seen[string(it.Path())] {
- t.Fatalf("iterator visited node path %x twice", it.Path())
- }
- seen[string(it.Path())] = true
- }
- return len(seen)
-}
-
-func TestIteratorNodeBlob(t *testing.T) {
- testIteratorNodeBlob(t, rawdb.HashScheme)
- testIteratorNodeBlob(t, rawdb.PathScheme)
-}
-
-func testIteratorNodeBlob(t *testing.T, scheme string) {
- var (
- db = rawdb.NewMemoryDatabase()
- triedb = newTestDatabase(db, scheme)
- trie = NewEmpty(triedb)
- )
- vals := []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- {"shaman", "horse"},
- {"doge", "coin"},
- {"dog", "puppy"},
- {"somethingveryoddindeedthis is", "myothernodedata"},
- }
- all := make(map[string]string)
- for _, val := range vals {
- all[val.k] = val.v
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- }
- root, nodes, _ := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- triedb.Commit(root)
-
- var found = make(map[common.Hash][]byte)
- trie, _ = New(TrieID(root), triedb)
- it := trie.MustNodeIterator(nil)
- for it.Next(true) {
- if it.Hash() == (common.Hash{}) {
- continue
- }
- found[it.Hash()] = it.NodeBlob()
- }
-
- dbIter := db.NewIterator(nil, nil)
- defer dbIter.Release()
-
- var count int
- for dbIter.Next() {
- ok, _, _ := isTrieNode(triedb.Scheme(), dbIter.Key(), dbIter.Value())
- if !ok {
- continue
- }
- got, present := found[crypto.Keccak256Hash(dbIter.Value())]
- if !present {
- t.Fatal("Miss trie node")
- }
- if !bytes.Equal(got, dbIter.Value()) {
- t.Fatalf("Unexpected trie node want %v got %v", dbIter.Value(), got)
- }
- count += 1
- }
- if count != len(found) {
- t.Fatal("Find extra trie node via iterator")
- }
-}
-
-// isTrieNode is a helper function which reports if the provided
-// database entry belongs to a trie node or not. Note in tests
-// only single layer trie is used, namely storage trie is not
-// considered at all.
-func isTrieNode(scheme string, key, val []byte) (bool, []byte, common.Hash) {
- var (
- path []byte
- hash common.Hash
- )
- if scheme == rawdb.HashScheme {
- ok := rawdb.IsLegacyTrieNode(key, val)
- if !ok {
- return false, nil, common.Hash{}
- }
- hash = common.BytesToHash(key)
- } else {
- ok, remain := rawdb.ResolveAccountTrieNodeKey(key)
- if !ok {
- return false, nil, common.Hash{}
- }
- path = common.CopyBytes(remain)
- hash = crypto.Keccak256Hash(val)
- }
- return true, path, hash
-}
-
-func BenchmarkIterator(b *testing.B) {
- diskDb, srcDb, tr, _ := makeTestTrie(rawdb.HashScheme)
- root := tr.Hash()
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- if err := checkTrieConsistency(diskDb, srcDb.Scheme(), root, false); err != nil {
- b.Fatal(err)
- }
- }
-}
diff --git a/trie/node.go b/trie/node.go
deleted file mode 100644
index 523a7b3497..0000000000
--- a/trie/node.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "fmt"
- "io"
- "strings"
-
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/rlp"
-)
-
-var indices = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "[17]"}
-
-type node interface {
- cache() (hashNode, bool)
- encode(w rlp.EncoderBuffer)
- fstring(string) string
-}
-
-type (
- fullNode struct {
- Children [17]node // Actual trie node data to encode/decode (needs custom encoder)
- flags nodeFlag
- }
- shortNode struct {
- Key []byte
- Val node
- flags nodeFlag
- }
- hashNode []byte
- valueNode []byte
-)
-
-// nilValueNode is used when collapsing internal trie nodes for hashing, since
-// unset children need to serialize correctly.
-var nilValueNode = valueNode(nil)
-
-// EncodeRLP encodes a full node into the consensus RLP format.
-func (n *fullNode) EncodeRLP(w io.Writer) error {
- eb := rlp.NewEncoderBuffer(w)
- n.encode(eb)
- return eb.Flush()
-}
-
-func (n *fullNode) copy() *fullNode { copy := *n; return © }
-func (n *shortNode) copy() *shortNode { copy := *n; return © }
-
-// nodeFlag contains caching-related metadata about a node.
-type nodeFlag struct {
- hash hashNode // cached hash of the node (may be nil)
- dirty bool // whether the node has changes that must be written to the database
-}
-
-func (n *fullNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty }
-func (n *shortNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty }
-func (n hashNode) cache() (hashNode, bool) { return nil, true }
-func (n valueNode) cache() (hashNode, bool) { return nil, true }
-
-// Pretty printing.
-func (n *fullNode) String() string { return n.fstring("") }
-func (n *shortNode) String() string { return n.fstring("") }
-func (n hashNode) String() string { return n.fstring("") }
-func (n valueNode) String() string { return n.fstring("") }
-
-func (n *fullNode) fstring(ind string) string {
- resp := fmt.Sprintf("[\n%s ", ind)
- for i, node := range &n.Children {
- if node == nil {
- resp += fmt.Sprintf("%s: ", indices[i])
- } else {
- resp += fmt.Sprintf("%s: %v", indices[i], node.fstring(ind+" "))
- }
- }
- return resp + fmt.Sprintf("\n%s] ", ind)
-}
-func (n *shortNode) fstring(ind string) string {
- return fmt.Sprintf("{%x: %v} ", n.Key, n.Val.fstring(ind+" "))
-}
-func (n hashNode) fstring(ind string) string {
- return fmt.Sprintf("<%x> ", []byte(n))
-}
-func (n valueNode) fstring(ind string) string {
- return fmt.Sprintf("%x ", []byte(n))
-}
-
-// rawNode is a simple binary blob used to differentiate between collapsed trie
-// nodes and already encoded RLP binary blobs (while at the same time store them
-// in the same cache fields).
-type rawNode []byte
-
-func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
-func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") }
-
-func (n rawNode) EncodeRLP(w io.Writer) error {
- _, err := w.Write(n)
- return err
-}
-
-// mustDecodeNode is a wrapper of decodeNode and panic if any error is encountered.
-func mustDecodeNode(hash, buf []byte) node {
- n, err := decodeNode(hash, buf)
- if err != nil {
- panic(fmt.Sprintf("node %x: %v", hash, err))
- }
- return n
-}
-
-// mustDecodeNodeUnsafe is a wrapper of decodeNodeUnsafe and panic if any error is
-// encountered.
-func mustDecodeNodeUnsafe(hash, buf []byte) node {
- n, err := decodeNodeUnsafe(hash, buf)
- if err != nil {
- panic(fmt.Sprintf("node %x: %v", hash, err))
- }
- return n
-}
-
-// decodeNode parses the RLP encoding of a trie node. It will deep-copy the passed
-// byte slice for decoding, so it's safe to modify the byte slice afterwards. The-
-// decode performance of this function is not optimal, but it is suitable for most
-// scenarios with low performance requirements and hard to determine whether the
-// byte slice be modified or not.
-func decodeNode(hash, buf []byte) (node, error) {
- return decodeNodeUnsafe(hash, common.CopyBytes(buf))
-}
-
-// decodeNodeUnsafe parses the RLP encoding of a trie node. The passed byte slice
-// will be directly referenced by node without bytes deep copy, so the input MUST
-// not be changed after.
-func decodeNodeUnsafe(hash, buf []byte) (node, error) {
- if len(buf) == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- elems, _, err := rlp.SplitList(buf)
- if err != nil {
- return nil, fmt.Errorf("decode error: %v", err)
- }
- switch c, _ := rlp.CountValues(elems); c {
- case 2:
- n, err := decodeShort(hash, elems)
- return n, wrapError(err, "short")
- case 17:
- n, err := decodeFull(hash, elems)
- return n, wrapError(err, "full")
- default:
- return nil, fmt.Errorf("invalid number of list elements: %v", c)
- }
-}
-
-func decodeShort(hash, elems []byte) (node, error) {
- kbuf, rest, err := rlp.SplitString(elems)
- if err != nil {
- return nil, err
- }
- flag := nodeFlag{hash: hash}
- key := compactToHex(kbuf)
- if hasTerm(key) {
- // value node
- val, _, err := rlp.SplitString(rest)
- if err != nil {
- return nil, fmt.Errorf("invalid value node: %v", err)
- }
- return &shortNode{key, valueNode(val), flag}, nil
- }
- r, _, err := decodeRef(rest)
- if err != nil {
- return nil, wrapError(err, "val")
- }
- return &shortNode{key, r, flag}, nil
-}
-
-func decodeFull(hash, elems []byte) (*fullNode, error) {
- n := &fullNode{flags: nodeFlag{hash: hash}}
- for i := 0; i < 16; i++ {
- cld, rest, err := decodeRef(elems)
- if err != nil {
- return n, wrapError(err, fmt.Sprintf("[%d]", i))
- }
- n.Children[i], elems = cld, rest
- }
- val, _, err := rlp.SplitString(elems)
- if err != nil {
- return n, err
- }
- if len(val) > 0 {
- n.Children[16] = valueNode(val)
- }
- return n, nil
-}
-
-const hashLen = len(common.Hash{})
-
-func decodeRef(buf []byte) (node, []byte, error) {
- kind, val, rest, err := rlp.Split(buf)
- if err != nil {
- return nil, buf, err
- }
- switch {
- case kind == rlp.List:
- // 'embedded' node reference. The encoding must be smaller
- // than a hash in order to be valid.
- if size := len(buf) - len(rest); size > hashLen {
- err := fmt.Errorf("oversized embedded node (size is %d bytes, want size < %d)", size, hashLen)
- return nil, buf, err
- }
- n, err := decodeNode(nil, buf)
- return n, rest, err
- case kind == rlp.String && len(val) == 0:
- // empty node
- return nil, rest, nil
- case kind == rlp.String && len(val) == 32:
- return hashNode(val), rest, nil
- default:
- return nil, nil, fmt.Errorf("invalid RLP string size %d (want 0 or 32)", len(val))
- }
-}
-
-// wraps a decoding error with information about the path to the
-// invalid child node (for debugging encoding issues).
-type decodeError struct {
- what error
- stack []string
-}
-
-func wrapError(err error, ctx string) error {
- if err == nil {
- return nil
- }
- if decErr, ok := err.(*decodeError); ok {
- decErr.stack = append(decErr.stack, ctx)
- return decErr
- }
- return &decodeError{err, []string{ctx}}
-}
-
-func (err *decodeError) Error() string {
- return fmt.Sprintf("%v (decode path: %s)", err.what, strings.Join(err.stack, "<-"))
-}
diff --git a/trie/node_enc.go b/trie/node_enc.go
deleted file mode 100644
index 6cd6aba3c9..0000000000
--- a/trie/node_enc.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// (c) 2022, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "github.com/ava-labs/libevm/rlp"
-)
-
-func nodeToBytes(n node) []byte {
- w := rlp.NewEncoderBuffer(nil)
- n.encode(w)
- result := w.ToBytes()
- w.Flush()
- return result
-}
-
-func (n *fullNode) encode(w rlp.EncoderBuffer) {
- offset := w.List()
- for _, c := range n.Children {
- if c != nil {
- c.encode(w)
- } else {
- w.Write(rlp.EmptyString)
- }
- }
- w.ListEnd(offset)
-}
-
-func (n *shortNode) encode(w rlp.EncoderBuffer) {
- offset := w.List()
- w.WriteBytes(n.Key)
- if n.Val != nil {
- n.Val.encode(w)
- } else {
- w.Write(rlp.EmptyString)
- }
- w.ListEnd(offset)
-}
-
-func (n hashNode) encode(w rlp.EncoderBuffer) {
- w.WriteBytes(n)
-}
-
-func (n valueNode) encode(w rlp.EncoderBuffer) {
- w.WriteBytes(n)
-}
-
-func (n rawNode) encode(w rlp.EncoderBuffer) {
- w.Write(n)
-}
diff --git a/trie/node_test.go b/trie/node_test.go
deleted file mode 100644
index 51dd126bde..0000000000
--- a/trie/node_test.go
+++ /dev/null
@@ -1,225 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "testing"
-
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/rlp"
-)
-
-func newTestFullNode(v []byte) []interface{} {
- fullNodeData := []interface{}{}
- for i := 0; i < 16; i++ {
- k := bytes.Repeat([]byte{byte(i + 1)}, 32)
- fullNodeData = append(fullNodeData, k)
- }
- fullNodeData = append(fullNodeData, v)
- return fullNodeData
-}
-
-func TestDecodeNestedNode(t *testing.T) {
- fullNodeData := newTestFullNode([]byte("fullnode"))
-
- data := [][]byte{}
- for i := 0; i < 16; i++ {
- data = append(data, nil)
- }
- data = append(data, []byte("subnode"))
- fullNodeData[15] = data
-
- buf := bytes.NewBuffer([]byte{})
- rlp.Encode(buf, fullNodeData)
-
- if _, err := decodeNode([]byte("testdecode"), buf.Bytes()); err != nil {
- t.Fatalf("decode nested full node err: %v", err)
- }
-}
-
-func TestDecodeFullNodeWrongSizeChild(t *testing.T) {
- fullNodeData := newTestFullNode([]byte("wrongsizechild"))
- fullNodeData[0] = []byte("00")
- buf := bytes.NewBuffer([]byte{})
- rlp.Encode(buf, fullNodeData)
-
- _, err := decodeNode([]byte("testdecode"), buf.Bytes())
- if _, ok := err.(*decodeError); !ok {
- t.Fatalf("decodeNode returned wrong err: %v", err)
- }
-}
-
-func TestDecodeFullNodeWrongNestedFullNode(t *testing.T) {
- fullNodeData := newTestFullNode([]byte("fullnode"))
-
- data := [][]byte{}
- for i := 0; i < 16; i++ {
- data = append(data, []byte("123456"))
- }
- data = append(data, []byte("subnode"))
- fullNodeData[15] = data
-
- buf := bytes.NewBuffer([]byte{})
- rlp.Encode(buf, fullNodeData)
-
- _, err := decodeNode([]byte("testdecode"), buf.Bytes())
- if _, ok := err.(*decodeError); !ok {
- t.Fatalf("decodeNode returned wrong err: %v", err)
- }
-}
-
-func TestDecodeFullNode(t *testing.T) {
- fullNodeData := newTestFullNode([]byte("decodefullnode"))
- buf := bytes.NewBuffer([]byte{})
- rlp.Encode(buf, fullNodeData)
-
- _, err := decodeNode([]byte("testdecode"), buf.Bytes())
- if err != nil {
- t.Fatalf("decode full node err: %v", err)
- }
-}
-
-// goos: darwin
-// goarch: arm64
-// pkg: github.com/ava-labs/coreth/trie
-// BenchmarkEncodeShortNode
-// BenchmarkEncodeShortNode-8 16878850 70.81 ns/op 48 B/op 1 allocs/op
-func BenchmarkEncodeShortNode(b *testing.B) {
- node := &shortNode{
- Key: []byte{0x1, 0x2},
- Val: hashNode(randBytes(32)),
- }
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- nodeToBytes(node)
- }
-}
-
-// goos: darwin
-// goarch: arm64
-// pkg: github.com/ava-labs/coreth/trie
-// BenchmarkEncodeFullNode
-// BenchmarkEncodeFullNode-8 4323273 284.4 ns/op 576 B/op 1 allocs/op
-func BenchmarkEncodeFullNode(b *testing.B) {
- node := &fullNode{}
- for i := 0; i < 16; i++ {
- node.Children[i] = hashNode(randBytes(32))
- }
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- nodeToBytes(node)
- }
-}
-
-// goos: darwin
-// goarch: arm64
-// pkg: github.com/ava-labs/coreth/trie
-// BenchmarkDecodeShortNode
-// BenchmarkDecodeShortNode-8 7925638 151.0 ns/op 157 B/op 4 allocs/op
-func BenchmarkDecodeShortNode(b *testing.B) {
- node := &shortNode{
- Key: []byte{0x1, 0x2},
- Val: hashNode(randBytes(32)),
- }
- blob := nodeToBytes(node)
- hash := crypto.Keccak256(blob)
-
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- mustDecodeNode(hash, blob)
- }
-}
-
-// goos: darwin
-// goarch: arm64
-// pkg: github.com/ava-labs/coreth/trie
-// BenchmarkDecodeShortNodeUnsafe
-// BenchmarkDecodeShortNodeUnsafe-8 9027476 128.6 ns/op 109 B/op 3 allocs/op
-func BenchmarkDecodeShortNodeUnsafe(b *testing.B) {
- node := &shortNode{
- Key: []byte{0x1, 0x2},
- Val: hashNode(randBytes(32)),
- }
- blob := nodeToBytes(node)
- hash := crypto.Keccak256(blob)
-
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- mustDecodeNodeUnsafe(hash, blob)
- }
-}
-
-// goos: darwin
-// goarch: arm64
-// pkg: github.com/ava-labs/coreth/trie
-// BenchmarkDecodeFullNode
-// BenchmarkDecodeFullNode-8 1597462 761.9 ns/op 1280 B/op 18 allocs/op
-func BenchmarkDecodeFullNode(b *testing.B) {
- node := &fullNode{}
- for i := 0; i < 16; i++ {
- node.Children[i] = hashNode(randBytes(32))
- }
- blob := nodeToBytes(node)
- hash := crypto.Keccak256(blob)
-
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- mustDecodeNode(hash, blob)
- }
-}
-
-// goos: darwin
-// goarch: arm64
-// pkg: github.com/ava-labs/coreth/trie
-// BenchmarkDecodeFullNodeUnsafe
-// BenchmarkDecodeFullNodeUnsafe-8 1789070 687.1 ns/op 704 B/op 17 allocs/op
-func BenchmarkDecodeFullNodeUnsafe(b *testing.B) {
- node := &fullNode{}
- for i := 0; i < 16; i++ {
- node.Children[i] = hashNode(randBytes(32))
- }
- blob := nodeToBytes(node)
- hash := crypto.Keccak256(blob)
-
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- mustDecodeNodeUnsafe(hash, blob)
- }
-}
diff --git a/trie/proof.go b/trie/proof.go
deleted file mode 100644
index 0b2ee57796..0000000000
--- a/trie/proof.go
+++ /dev/null
@@ -1,626 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "errors"
- "fmt"
-
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
-)
-
-// Prove constructs a merkle proof for key. The result contains all encoded nodes
-// on the path to the value at key. The value itself is also included in the last
-// node and can be retrieved by verifying the proof.
-//
-// If the trie does not contain a value for key, the returned proof contains all
-// nodes of the longest existing prefix of the key (at least the root node), ending
-// with the node that proves the absence of the key.
-func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
- // Short circuit if the trie is already committed and not usable.
- if t.committed {
- return ErrCommitted
- }
- // Collect all nodes on the path to key.
- var (
- prefix []byte
- nodes []node
- tn = t.root
- )
- key = keybytesToHex(key)
- for len(key) > 0 && tn != nil {
- switch n := tn.(type) {
- case *shortNode:
- if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) {
- // The trie doesn't contain the key.
- tn = nil
- } else {
- tn = n.Val
- prefix = append(prefix, n.Key...)
- key = key[len(n.Key):]
- }
- nodes = append(nodes, n)
- case *fullNode:
- tn = n.Children[key[0]]
- prefix = append(prefix, key[0])
- key = key[1:]
- nodes = append(nodes, n)
- case hashNode:
- // Retrieve the specified node from the underlying node reader.
- // trie.resolveAndTrack is not used since in that function the
- // loaded blob will be tracked, while it's not required here since
- // all loaded nodes won't be linked to trie at all and track nodes
- // may lead to out-of-memory issue.
- blob, err := t.reader.node(prefix, common.BytesToHash(n))
- if err != nil {
- log.Error("Unhandled trie error in Trie.Prove", "err", err)
- return err
- }
- // The raw-blob format nodes are loaded either from the
- // clean cache or the database, they are all in their own
- // copy and safe to use unsafe decoder.
- tn = mustDecodeNodeUnsafe(n, blob)
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", tn, tn))
- }
- }
- hasher := newHasher(false)
- defer returnHasherToPool(hasher)
-
- for i, n := range nodes {
- var hn node
- n, hn = hasher.proofHash(n)
- if hash, ok := hn.(hashNode); ok || i == 0 {
- // If the node's database encoding is a hash (or is the
- // root node), it becomes a proof element.
- enc := nodeToBytes(n)
- if !ok {
- hash = hasher.hashData(enc)
- }
- proofDb.Put(hash, enc)
- }
- }
- return nil
-}
-
-// Prove constructs a merkle proof for key. The result contains all encoded nodes
-// on the path to the value at key. The value itself is also included in the last
-// node and can be retrieved by verifying the proof.
-//
-// If the trie does not contain a value for key, the returned proof contains all
-// nodes of the longest existing prefix of the key (at least the root node), ending
-// with the node that proves the absence of the key.
-func (t *StateTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
- return t.trie.Prove(key, proofDb)
-}
-
-// VerifyProof checks merkle proofs. The given proof must contain the value for
-// key in a trie with the given root hash. VerifyProof returns an error if the
-// proof contains invalid trie nodes or the wrong value.
-func VerifyProof(rootHash common.Hash, key []byte, proofDb ethdb.KeyValueReader) (value []byte, err error) {
- key = keybytesToHex(key)
- wantHash := rootHash
- for i := 0; ; i++ {
- buf, _ := proofDb.Get(wantHash[:])
- if buf == nil {
- return nil, fmt.Errorf("proof node %d (hash %064x) missing", i, wantHash)
- }
- n, err := decodeNode(wantHash[:], buf)
- if err != nil {
- return nil, fmt.Errorf("bad proof node %d: %v", i, err)
- }
- keyrest, cld := get(n, key, true)
- switch cld := cld.(type) {
- case nil:
- // The trie doesn't contain the key.
- return nil, nil
- case hashNode:
- key = keyrest
- copy(wantHash[:], cld)
- case valueNode:
- return cld, nil
- }
- }
-}
-
-// proofToPath converts a merkle proof to trie node path. The main purpose of
-// this function is recovering a node path from the merkle proof stream. All
-// necessary nodes will be resolved and leave the remaining as hashnode.
-//
-// The given edge proof is allowed to be an existent or non-existent proof.
-func proofToPath(rootHash common.Hash, root node, key []byte, proofDb ethdb.KeyValueReader, allowNonExistent bool) (node, []byte, error) {
- // resolveNode retrieves and resolves trie node from merkle proof stream
- resolveNode := func(hash common.Hash) (node, error) {
- buf, _ := proofDb.Get(hash[:])
- if buf == nil {
- return nil, fmt.Errorf("proof node (hash %064x) missing", hash)
- }
- n, err := decodeNode(hash[:], buf)
- if err != nil {
- return nil, fmt.Errorf("bad proof node %v", err)
- }
- return n, err
- }
- // If the root node is empty, resolve it first.
- // Root node must be included in the proof.
- if root == nil {
- n, err := resolveNode(rootHash)
- if err != nil {
- return nil, nil, err
- }
- root = n
- }
- var (
- err error
- child, parent node
- keyrest []byte
- valnode []byte
- )
- key, parent = keybytesToHex(key), root
- for {
- keyrest, child = get(parent, key, false)
- switch cld := child.(type) {
- case nil:
- // The trie doesn't contain the key. It's possible
- // the proof is a non-existing proof, but at least
- // we can prove all resolved nodes are correct, it's
- // enough for us to prove range.
- if allowNonExistent {
- return root, nil, nil
- }
- return nil, nil, errors.New("the node is not contained in trie")
- case *shortNode:
- key, parent = keyrest, child // Already resolved
- continue
- case *fullNode:
- key, parent = keyrest, child // Already resolved
- continue
- case hashNode:
- child, err = resolveNode(common.BytesToHash(cld))
- if err != nil {
- return nil, nil, err
- }
- case valueNode:
- valnode = cld
- }
- // Link the parent and child.
- switch pnode := parent.(type) {
- case *shortNode:
- pnode.Val = child
- case *fullNode:
- pnode.Children[key[0]] = child
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", pnode, pnode))
- }
- if len(valnode) > 0 {
- return root, valnode, nil // The whole path is resolved
- }
- key, parent = keyrest, child
- }
-}
-
-// unsetInternal removes all internal node references(hashnode, embedded node).
-// It should be called after a trie is constructed with two edge paths. Also
-// the given boundary keys must be the one used to construct the edge paths.
-//
-// It's the key step for range proof. All visited nodes should be marked dirty
-// since the node content might be modified. Besides it can happen that some
-// fullnodes only have one child which is disallowed. But if the proof is valid,
-// the missing children will be filled, otherwise it will be thrown anyway.
-//
-// Note we have the assumption here the given boundary keys are different
-// and right is larger than left.
-func unsetInternal(n node, left []byte, right []byte) (bool, error) {
- left, right = keybytesToHex(left), keybytesToHex(right)
-
- // Step down to the fork point. There are two scenarios can happen:
- // - the fork point is a shortnode: either the key of left proof or
- // right proof doesn't match with shortnode's key.
- // - the fork point is a fullnode: both two edge proofs are allowed
- // to point to a non-existent key.
- var (
- pos = 0
- parent node
-
- // fork indicator, 0 means no fork, -1 means proof is less, 1 means proof is greater
- shortForkLeft, shortForkRight int
- )
-findFork:
- for {
- switch rn := (n).(type) {
- case *shortNode:
- rn.flags = nodeFlag{dirty: true}
-
- // If either the key of left proof or right proof doesn't match with
- // shortnode, stop here and the forkpoint is the shortnode.
- if len(left)-pos < len(rn.Key) {
- shortForkLeft = bytes.Compare(left[pos:], rn.Key)
- } else {
- shortForkLeft = bytes.Compare(left[pos:pos+len(rn.Key)], rn.Key)
- }
- if len(right)-pos < len(rn.Key) {
- shortForkRight = bytes.Compare(right[pos:], rn.Key)
- } else {
- shortForkRight = bytes.Compare(right[pos:pos+len(rn.Key)], rn.Key)
- }
- if shortForkLeft != 0 || shortForkRight != 0 {
- break findFork
- }
- parent = n
- n, pos = rn.Val, pos+len(rn.Key)
- case *fullNode:
- rn.flags = nodeFlag{dirty: true}
-
- // If either the node pointed by left proof or right proof is nil,
- // stop here and the forkpoint is the fullnode.
- leftnode, rightnode := rn.Children[left[pos]], rn.Children[right[pos]]
- if leftnode == nil || rightnode == nil || leftnode != rightnode {
- break findFork
- }
- parent = n
- n, pos = rn.Children[left[pos]], pos+1
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", n, n))
- }
- }
- switch rn := n.(type) {
- case *shortNode:
- // There can have these five scenarios:
- // - both proofs are less than the trie path => no valid range
- // - both proofs are greater than the trie path => no valid range
- // - left proof is less and right proof is greater => valid range, unset the shortnode entirely
- // - left proof points to the shortnode, but right proof is greater
- // - right proof points to the shortnode, but left proof is less
- if shortForkLeft == -1 && shortForkRight == -1 {
- return false, errors.New("empty range")
- }
- if shortForkLeft == 1 && shortForkRight == 1 {
- return false, errors.New("empty range")
- }
- if shortForkLeft != 0 && shortForkRight != 0 {
- // The fork point is root node, unset the entire trie
- if parent == nil {
- return true, nil
- }
- parent.(*fullNode).Children[left[pos-1]] = nil
- return false, nil
- }
- // Only one proof points to non-existent key.
- if shortForkRight != 0 {
- if _, ok := rn.Val.(valueNode); ok {
- // The fork point is root node, unset the entire trie
- if parent == nil {
- return true, nil
- }
- parent.(*fullNode).Children[left[pos-1]] = nil
- return false, nil
- }
- return false, unset(rn, rn.Val, left[pos:], len(rn.Key), false)
- }
- if shortForkLeft != 0 {
- if _, ok := rn.Val.(valueNode); ok {
- // The fork point is root node, unset the entire trie
- if parent == nil {
- return true, nil
- }
- parent.(*fullNode).Children[right[pos-1]] = nil
- return false, nil
- }
- return false, unset(rn, rn.Val, right[pos:], len(rn.Key), true)
- }
- return false, nil
- case *fullNode:
- // unset all internal nodes in the forkpoint
- for i := left[pos] + 1; i < right[pos]; i++ {
- rn.Children[i] = nil
- }
- if err := unset(rn, rn.Children[left[pos]], left[pos:], 1, false); err != nil {
- return false, err
- }
- if err := unset(rn, rn.Children[right[pos]], right[pos:], 1, true); err != nil {
- return false, err
- }
- return false, nil
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", n, n))
- }
-}
-
-// unset removes all internal node references either the left most or right most.
-// It can meet these scenarios:
-//
-// - The given path is existent in the trie, unset the associated nodes with the
-// specific direction
-// - The given path is non-existent in the trie
-// - the fork point is a fullnode, the corresponding child pointed by path
-// is nil, return
-// - the fork point is a shortnode, the shortnode is included in the range,
-// keep the entire branch and return.
-// - the fork point is a shortnode, the shortnode is excluded in the range,
-// unset the entire branch.
-func unset(parent node, child node, key []byte, pos int, removeLeft bool) error {
- switch cld := child.(type) {
- case *fullNode:
- if removeLeft {
- for i := 0; i < int(key[pos]); i++ {
- cld.Children[i] = nil
- }
- cld.flags = nodeFlag{dirty: true}
- } else {
- for i := key[pos] + 1; i < 16; i++ {
- cld.Children[i] = nil
- }
- cld.flags = nodeFlag{dirty: true}
- }
- return unset(cld, cld.Children[key[pos]], key, pos+1, removeLeft)
- case *shortNode:
- if len(key[pos:]) < len(cld.Key) || !bytes.Equal(cld.Key, key[pos:pos+len(cld.Key)]) {
- // Find the fork point, it's an non-existent branch.
- if removeLeft {
- if bytes.Compare(cld.Key, key[pos:]) < 0 {
- // The key of fork shortnode is less than the path
- // (it belongs to the range), unset the entire
- // branch. The parent must be a fullnode.
- fn := parent.(*fullNode)
- fn.Children[key[pos-1]] = nil
- }
- //else {
- // The key of fork shortnode is greater than the
- // path(it doesn't belong to the range), keep
- // it with the cached hash available.
- //}
- } else {
- if bytes.Compare(cld.Key, key[pos:]) > 0 {
- // The key of fork shortnode is greater than the
- // path(it belongs to the range), unset the entries
- // branch. The parent must be a fullnode.
- fn := parent.(*fullNode)
- fn.Children[key[pos-1]] = nil
- }
- //else {
- // The key of fork shortnode is less than the
- // path(it doesn't belong to the range), keep
- // it with the cached hash available.
- //}
- }
- return nil
- }
- if _, ok := cld.Val.(valueNode); ok {
- fn := parent.(*fullNode)
- fn.Children[key[pos-1]] = nil
- return nil
- }
- cld.flags = nodeFlag{dirty: true}
- return unset(cld, cld.Val, key, pos+len(cld.Key), removeLeft)
- case nil:
- // If the node is nil, then it's a child of the fork point
- // fullnode(it's a non-existent branch).
- return nil
- default:
- panic("it shouldn't happen") // hashNode, valueNode
- }
-}
-
-// hasRightElement returns the indicator whether there exists more elements
-// on the right side of the given path. The given path can point to an existent
-// key or a non-existent one. This function has the assumption that the whole
-// path should already be resolved.
-func hasRightElement(node node, key []byte) bool {
- pos, key := 0, keybytesToHex(key)
- for node != nil {
- switch rn := node.(type) {
- case *fullNode:
- for i := key[pos] + 1; i < 16; i++ {
- if rn.Children[i] != nil {
- return true
- }
- }
- node, pos = rn.Children[key[pos]], pos+1
- case *shortNode:
- if len(key)-pos < len(rn.Key) || !bytes.Equal(rn.Key, key[pos:pos+len(rn.Key)]) {
- return bytes.Compare(rn.Key, key[pos:]) > 0
- }
- node, pos = rn.Val, pos+len(rn.Key)
- case valueNode:
- return false // We have resolved the whole path
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", node, node)) // hashnode
- }
- }
- return false
-}
-
-// VerifyRangeProof checks whether the given leaf nodes and edge proof
-// can prove the given trie leaves range is matched with the specific root.
-// Besides, the range should be consecutive (no gap inside) and monotonic
-// increasing.
-//
-// Note the given proof actually contains two edge proofs. Both of them can
-// be non-existent proofs. For example the first proof is for a non-existent
-// key 0x03, the last proof is for a non-existent key 0x10. The given batch
-// leaves are [0x04, 0x05, .. 0x09]. It's still feasible to prove the given
-// batch is valid.
-//
-// The firstKey is paired with firstProof, not necessarily the same as keys[0]
-// (unless firstProof is an existent proof). Similarly, lastKey and lastProof
-// are paired.
-//
-// Expect the normal case, this function can also be used to verify the following
-// range proofs:
-//
-// - All elements proof. In this case the proof can be nil, but the range should
-// be all the leaves in the trie.
-//
-// - One element proof. In this case no matter the edge proof is a non-existent
-// proof or not, we can always verify the correctness of the proof.
-//
-// - Zero element proof. In this case a single non-existent proof is enough to prove.
-// Besides, if there are still some other leaves available on the right side, then
-// an error will be returned.
-//
-// Except returning the error to indicate the proof is valid or not, the function will
-// also return a flag to indicate whether there exists more accounts/slots in the trie.
-//
-// Note: This method does not verify that the proof is of minimal form. If the input
-// proofs are 'bloated' with neighbour leaves or random data, aside from the 'useful'
-// data, then the proof will still be accepted.
-func VerifyRangeProof(rootHash common.Hash, firstKey []byte, keys [][]byte, values [][]byte, proof ethdb.KeyValueReader) (bool, error) {
- if len(keys) != len(values) {
- return false, fmt.Errorf("inconsistent proof data, keys: %d, values: %d", len(keys), len(values))
- }
- // Ensure the received batch is monotonic increasing and contains no deletions
- for i := 0; i < len(keys)-1; i++ {
- if bytes.Compare(keys[i], keys[i+1]) >= 0 {
- return false, errors.New("range is not monotonically increasing")
- }
- }
- for _, value := range values {
- if len(value) == 0 {
- return false, errors.New("range contains deletion")
- }
- }
- // Special case, there is no edge proof at all. The given range is expected
- // to be the whole leaf-set in the trie.
- if proof == nil {
- tr := NewStackTrie(nil)
- for index, key := range keys {
- tr.Update(key, values[index])
- }
- if have, want := tr.Hash(), rootHash; have != want {
- return false, fmt.Errorf("invalid proof, want hash %x, got %x", want, have)
- }
- return false, nil // No more elements
- }
- // Special case, there is a provided edge proof but zero key/value
- // pairs, ensure there are no more accounts / slots in the trie.
- if len(keys) == 0 {
- root, val, err := proofToPath(rootHash, nil, firstKey, proof, true)
- if err != nil {
- return false, err
- }
- if val != nil || hasRightElement(root, firstKey) {
- return false, errors.New("more entries available")
- }
- return false, nil
- }
- var lastKey = keys[len(keys)-1]
- // Special case, there is only one element and two edge keys are same.
- // In this case, we can't construct two edge paths. So handle it here.
- if len(keys) == 1 && bytes.Equal(firstKey, lastKey) {
- root, val, err := proofToPath(rootHash, nil, firstKey, proof, false)
- if err != nil {
- return false, err
- }
- if !bytes.Equal(firstKey, keys[0]) {
- return false, errors.New("correct proof but invalid key")
- }
- if !bytes.Equal(val, values[0]) {
- return false, errors.New("correct proof but invalid data")
- }
- return hasRightElement(root, firstKey), nil
- }
- // Ok, in all other cases, we require two edge paths available.
- // First check the validity of edge keys.
- if bytes.Compare(firstKey, lastKey) >= 0 {
- return false, errors.New("invalid edge keys")
- }
- // todo(rjl493456442) different length edge keys should be supported
- if len(firstKey) != len(lastKey) {
- return false, fmt.Errorf("inconsistent edge keys (%d != %d)", len(firstKey), len(lastKey))
- }
- // Convert the edge proofs to edge trie paths. Then we can
- // have the same tree architecture with the original one.
- // For the first edge proof, non-existent proof is allowed.
- root, _, err := proofToPath(rootHash, nil, firstKey, proof, true)
- if err != nil {
- return false, err
- }
- // Pass the root node here, the second path will be merged
- // with the first one. For the last edge proof, non-existent
- // proof is also allowed.
- root, _, err = proofToPath(rootHash, root, lastKey, proof, true)
- if err != nil {
- return false, err
- }
- // Remove all internal references. All the removed parts should
- // be re-filled(or re-constructed) by the given leaves range.
- empty, err := unsetInternal(root, firstKey, lastKey)
- if err != nil {
- return false, err
- }
- // Rebuild the trie with the leaf stream, the shape of trie
- // should be same with the original one.
- tr := &Trie{root: root, reader: newEmptyReader(), tracer: newTracer()}
- if empty {
- tr.root = nil
- }
- for index, key := range keys {
- tr.Update(key, values[index])
- }
- if tr.Hash() != rootHash {
- return false, fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, tr.Hash())
- }
- return hasRightElement(tr.root, keys[len(keys)-1]), nil
-}
-
-// get returns the child of the given node. Return nil if the
-// node with specified key doesn't exist at all.
-//
-// There is an additional flag `skipResolved`. If it's set then
-// all resolved nodes won't be returned.
-func get(tn node, key []byte, skipResolved bool) ([]byte, node) {
- for {
- switch n := tn.(type) {
- case *shortNode:
- if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) {
- return nil, nil
- }
- tn = n.Val
- key = key[len(n.Key):]
- if !skipResolved {
- return key, tn
- }
- case *fullNode:
- tn = n.Children[key[0]]
- key = key[1:]
- if !skipResolved {
- return key, tn
- }
- case hashNode:
- return key, n
- case nil:
- return key, nil
- case valueNode:
- return nil, n
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", tn, tn))
- }
- }
-}
diff --git a/trie/proof_test.go b/trie/proof_test.go
deleted file mode 100644
index c266dc2595..0000000000
--- a/trie/proof_test.go
+++ /dev/null
@@ -1,1012 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- crand "crypto/rand"
- "encoding/binary"
- "fmt"
- mrand "math/rand"
- "testing"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/ethdb/memorydb"
- "golang.org/x/exp/slices"
-)
-
-// Prng is a pseudo random number generator seeded by strong randomness.
-// The randomness is printed on startup in order to make failures reproducible.
-var prng = initRnd()
-
-func initRnd() *mrand.Rand {
- var seed [8]byte
- crand.Read(seed[:])
- rnd := mrand.New(mrand.NewSource(int64(binary.LittleEndian.Uint64(seed[:]))))
- fmt.Printf("Seed: %x\n", seed)
- return rnd
-}
-
-func randBytes(n int) []byte {
- r := make([]byte, n)
- prng.Read(r)
- return r
-}
-
-// makeProvers creates Merkle trie provers based on different implementations to
-// test all variations.
-func makeProvers(trie *Trie) []func(key []byte) *memorydb.Database {
- var provers []func(key []byte) *memorydb.Database
-
- // Create a direct trie based Merkle prover
- provers = append(provers, func(key []byte) *memorydb.Database {
- proof := memorydb.New()
- trie.Prove(key, proof)
- return proof
- })
- // Create a leaf iterator based Merkle prover
- provers = append(provers, func(key []byte) *memorydb.Database {
- proof := memorydb.New()
- if it := NewIterator(trie.MustNodeIterator(key)); it.Next() && bytes.Equal(key, it.Key) {
- for _, p := range it.Prove() {
- proof.Put(crypto.Keccak256(p), p)
- }
- }
- return proof
- })
- return provers
-}
-
-func TestProof(t *testing.T) {
- trie, vals := randomTrie(500)
- root := trie.Hash()
- for i, prover := range makeProvers(trie) {
- for _, kv := range vals {
- proof := prover(kv.k)
- if proof == nil {
- t.Fatalf("prover %d: missing key %x while constructing proof", i, kv.k)
- }
- val, err := VerifyProof(root, kv.k, proof)
- if err != nil {
- t.Fatalf("prover %d: failed to verify proof for key %x: %v\nraw proof: %x", i, kv.k, err, proof)
- }
- if !bytes.Equal(val, kv.v) {
- t.Fatalf("prover %d: verified value mismatch for key %x: have %x, want %x", i, kv.k, val, kv.v)
- }
- }
- }
-}
-
-func TestOneElementProof(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- updateString(trie, "k", "v")
- for i, prover := range makeProvers(trie) {
- proof := prover([]byte("k"))
- if proof == nil {
- t.Fatalf("prover %d: nil proof", i)
- }
- if proof.Len() != 1 {
- t.Errorf("prover %d: proof should have one element", i)
- }
- val, err := VerifyProof(trie.Hash(), []byte("k"), proof)
- if err != nil {
- t.Fatalf("prover %d: failed to verify proof: %v\nraw proof: %x", i, err, proof)
- }
- if !bytes.Equal(val, []byte("v")) {
- t.Fatalf("prover %d: verified value mismatch: have %x, want 'k'", i, val)
- }
- }
-}
-
-func TestBadProof(t *testing.T) {
- trie, vals := randomTrie(800)
- root := trie.Hash()
- for i, prover := range makeProvers(trie) {
- for _, kv := range vals {
- proof := prover(kv.k)
- if proof == nil {
- t.Fatalf("prover %d: nil proof", i)
- }
- it := proof.NewIterator(nil, nil)
- for i, d := 0, mrand.Intn(proof.Len()); i <= d; i++ {
- it.Next()
- }
- key := it.Key()
- val, _ := proof.Get(key)
- proof.Delete(key)
- it.Release()
-
- mutateByte(val)
- proof.Put(crypto.Keccak256(val), val)
-
- if _, err := VerifyProof(root, kv.k, proof); err == nil {
- t.Fatalf("prover %d: expected proof to fail for key %x", i, kv.k)
- }
- }
- }
-}
-
-// Tests that missing keys can also be proven. The test explicitly uses a single
-// entry trie and checks for missing keys both before and after the single entry.
-func TestMissingKeyProof(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- updateString(trie, "k", "v")
-
- for i, key := range []string{"a", "j", "l", "z"} {
- proof := memorydb.New()
- trie.Prove([]byte(key), proof)
-
- if proof.Len() != 1 {
- t.Errorf("test %d: proof should have one element", i)
- }
- val, err := VerifyProof(trie.Hash(), []byte(key), proof)
- if err != nil {
- t.Fatalf("test %d: failed to verify proof: %v\nraw proof: %x", i, err, proof)
- }
- if val != nil {
- t.Fatalf("test %d: verified value mismatch: have %x, want nil", i, val)
- }
- }
-}
-
-// TestRangeProof tests normal range proof with both edge proofs
-// as the existent proof. The test cases are generated randomly.
-func TestRangeProof(t *testing.T) {
- trie, vals := randomTrie(4096)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
- for i := 0; i < 500; i++ {
- start := mrand.Intn(len(entries))
- end := mrand.Intn(len(entries)-start) + start + 1
-
- proof := memorydb.New()
- if err := trie.Prove(entries[start].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[end-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- var keys [][]byte
- var vals [][]byte
- for i := start; i < end; i++ {
- keys = append(keys, entries[i].k)
- vals = append(vals, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof)
- if err != nil {
- t.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err)
- }
- }
-}
-
-// TestRangeProof tests normal range proof with two non-existent proofs.
-// The test cases are generated randomly.
-func TestRangeProofWithNonExistentProof(t *testing.T) {
- trie, vals := randomTrie(4096)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
- for i := 0; i < 500; i++ {
- start := mrand.Intn(len(entries))
- end := mrand.Intn(len(entries)-start) + start + 1
- proof := memorydb.New()
-
- // Short circuit if the decreased key is same with the previous key
- first := decreaseKey(common.CopyBytes(entries[start].k))
- if start != 0 && bytes.Equal(first, entries[start-1].k) {
- continue
- }
- // Short circuit if the decreased key is underflow
- if bytes.Compare(first, entries[start].k) > 0 {
- continue
- }
- if err := trie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[end-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- var keys [][]byte
- var vals [][]byte
- for i := start; i < end; i++ {
- keys = append(keys, entries[i].k)
- vals = append(vals, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), first, keys, vals, proof)
- if err != nil {
- t.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err)
- }
- }
-}
-
-// TestRangeProofWithInvalidNonExistentProof tests such scenarios:
-// - There exists a gap between the first element and the left edge proof
-func TestRangeProofWithInvalidNonExistentProof(t *testing.T) {
- trie, vals := randomTrie(4096)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- // Case 1
- start, end := 100, 200
- first := decreaseKey(common.CopyBytes(entries[start].k))
-
- proof := memorydb.New()
- if err := trie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[end-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- start = 105 // Gap created
- k := make([][]byte, 0)
- v := make([][]byte, 0)
- for i := start; i < end; i++ {
- k = append(k, entries[i].k)
- v = append(v, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), first, k, v, proof)
- if err == nil {
- t.Fatalf("Expected to detect the error, got nil")
- }
-}
-
-// TestOneElementRangeProof tests the proof with only one
-// element. The first edge proof can be existent one or
-// non-existent one.
-func TestOneElementRangeProof(t *testing.T) {
- trie, vals := randomTrie(4096)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- // One element with existent edge proof, both edge proofs
- // point to the SAME key.
- start := 1000
- proof := memorydb.New()
- if err := trie.Prove(entries[start].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- _, err := VerifyRangeProof(trie.Hash(), entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- // One element with left non-existent edge proof
- start = 1000
- first := decreaseKey(common.CopyBytes(entries[start].k))
- proof = memorydb.New()
- if err := trie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[start].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- _, err = VerifyRangeProof(trie.Hash(), first, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- // One element with right non-existent edge proof
- start = 1000
- last := increaseKey(common.CopyBytes(entries[start].k))
- proof = memorydb.New()
- if err := trie.Prove(entries[start].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(last, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- _, err = VerifyRangeProof(trie.Hash(), entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- // One element with two non-existent edge proofs
- start = 1000
- first, last = decreaseKey(common.CopyBytes(entries[start].k)), increaseKey(common.CopyBytes(entries[start].k))
- proof = memorydb.New()
- if err := trie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(last, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- _, err = VerifyRangeProof(trie.Hash(), first, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- // Test the mini trie with only a single element.
- tinyTrie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- entry := &kv{randBytes(32), randBytes(20), false}
- tinyTrie.MustUpdate(entry.k, entry.v)
-
- first = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
- last = entry.k
- proof = memorydb.New()
- if err := tinyTrie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := tinyTrie.Prove(last, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- _, err = VerifyRangeProof(tinyTrie.Hash(), first, [][]byte{entry.k}, [][]byte{entry.v}, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-}
-
-// TestAllElementsProof tests the range proof with all elements.
-// The edge proofs can be nil.
-func TestAllElementsProof(t *testing.T) {
- trie, vals := randomTrie(4096)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- var k [][]byte
- var v [][]byte
- for i := 0; i < len(entries); i++ {
- k = append(k, entries[i].k)
- v = append(v, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), nil, k, v, nil)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- // With edge proofs, it should still work.
- proof := memorydb.New()
- if err := trie.Prove(entries[0].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[len(entries)-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- _, err = VerifyRangeProof(trie.Hash(), k[0], k, v, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- // Even with non-existent edge proofs, it should still work.
- proof = memorydb.New()
- first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
- if err := trie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[len(entries)-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- _, err = VerifyRangeProof(trie.Hash(), first, k, v, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-}
-
-// TestSingleSideRangeProof tests the range starts from zero.
-func TestSingleSideRangeProof(t *testing.T) {
- for i := 0; i < 64; i++ {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- var entries []*kv
- for i := 0; i < 4096; i++ {
- value := &kv{randBytes(32), randBytes(20), false}
- trie.MustUpdate(value.k, value.v)
- entries = append(entries, value)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- var cases = []int{0, 1, 50, 100, 1000, 2000, len(entries) - 1}
- for _, pos := range cases {
- proof := memorydb.New()
- if err := trie.Prove(common.Hash{}.Bytes(), proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[pos].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- k := make([][]byte, 0)
- v := make([][]byte, 0)
- for i := 0; i <= pos; i++ {
- k = append(k, entries[i].k)
- v = append(v, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), common.Hash{}.Bytes(), k, v, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
- }
- }
-}
-
-// TestBadRangeProof tests a few cases which the proof is wrong.
-// The prover is expected to detect the error.
-func TestBadRangeProof(t *testing.T) {
- trie, vals := randomTrie(4096)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- for i := 0; i < 500; i++ {
- start := mrand.Intn(len(entries))
- end := mrand.Intn(len(entries)-start) + start + 1
- proof := memorydb.New()
- if err := trie.Prove(entries[start].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[end-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- var keys [][]byte
- var vals [][]byte
- for i := start; i < end; i++ {
- keys = append(keys, entries[i].k)
- vals = append(vals, entries[i].v)
- }
- var first = keys[0]
- testcase := mrand.Intn(6)
- var index int
- switch testcase {
- case 0:
- // Modified key
- index = mrand.Intn(end - start)
- keys[index] = randBytes(32) // In theory it can't be same
- case 1:
- // Modified val
- index = mrand.Intn(end - start)
- vals[index] = randBytes(20) // In theory it can't be same
- case 2:
- // Gapped entry slice
- index = mrand.Intn(end - start)
- if (index == 0 && start < 100) || (index == end-start-1) {
- continue
- }
- keys = append(keys[:index], keys[index+1:]...)
- vals = append(vals[:index], vals[index+1:]...)
- case 3:
- // Out of order
- index1 := mrand.Intn(end - start)
- index2 := mrand.Intn(end - start)
- if index1 == index2 {
- continue
- }
- keys[index1], keys[index2] = keys[index2], keys[index1]
- vals[index1], vals[index2] = vals[index2], vals[index1]
- case 4:
- // Set random key to nil, do nothing
- index = mrand.Intn(end - start)
- keys[index] = nil
- case 5:
- // Set random value to nil, deletion
- index = mrand.Intn(end - start)
- vals[index] = nil
- }
- _, err := VerifyRangeProof(trie.Hash(), first, keys, vals, proof)
- if err == nil {
- t.Fatalf("%d Case %d index %d range: (%d->%d) expect error, got nil", i, testcase, index, start, end-1)
- }
- }
-}
-
-// TestGappedRangeProof focuses on the small trie with embedded nodes.
-// If the gapped node is embedded in the trie, it should be detected too.
-func TestGappedRangeProof(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- var entries []*kv // Sorted entries
- for i := byte(0); i < 10; i++ {
- value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
- trie.MustUpdate(value.k, value.v)
- entries = append(entries, value)
- }
- first, last := 2, 8
- proof := memorydb.New()
- if err := trie.Prove(entries[first].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[last-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- var keys [][]byte
- var vals [][]byte
- for i := first; i < last; i++ {
- if i == (first+last)/2 {
- continue
- }
- keys = append(keys, entries[i].k)
- vals = append(vals, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof)
- if err == nil {
- t.Fatal("expect error, got nil")
- }
-}
-
-// TestSameSideProofs tests the element is not in the range covered by proofs
-func TestSameSideProofs(t *testing.T) {
- trie, vals := randomTrie(4096)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- pos := 1000
- first := common.CopyBytes(entries[0].k)
-
- proof := memorydb.New()
- if err := trie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[2000].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- _, err := VerifyRangeProof(trie.Hash(), first, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof)
- if err == nil {
- t.Fatalf("Expected error, got nil")
- }
-
- first = increaseKey(common.CopyBytes(entries[pos].k))
- last := increaseKey(common.CopyBytes(entries[pos].k))
- last = increaseKey(last)
-
- proof = memorydb.New()
- if err := trie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(last, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- _, err = VerifyRangeProof(trie.Hash(), first, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof)
- if err == nil {
- t.Fatalf("Expected error, got nil")
- }
-}
-
-func TestHasRightElement(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- var entries []*kv
- for i := 0; i < 4096; i++ {
- value := &kv{randBytes(32), randBytes(20), false}
- trie.MustUpdate(value.k, value.v)
- entries = append(entries, value)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- var cases = []struct {
- start int
- end int
- hasMore bool
- }{
- {-1, 1, true}, // single element with non-existent left proof
- {0, 1, true}, // single element with existent left proof
- {0, 10, true},
- {50, 100, true},
- {50, len(entries), false}, // No more element expected
- {len(entries) - 1, len(entries), false}, // Single last element with two existent proofs(point to same key)
- {0, len(entries), false}, // The whole set with existent left proof
- {-1, len(entries), false}, // The whole set with non-existent left proof
- }
- for _, c := range cases {
- var (
- firstKey []byte
- start = c.start
- end = c.end
- proof = memorydb.New()
- )
- if c.start == -1 {
- firstKey, start = common.Hash{}.Bytes(), 0
- if err := trie.Prove(firstKey, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- } else {
- firstKey = entries[c.start].k
- if err := trie.Prove(entries[c.start].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- }
- if err := trie.Prove(entries[c.end-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- k := make([][]byte, 0)
- v := make([][]byte, 0)
- for i := start; i < end; i++ {
- k = append(k, entries[i].k)
- v = append(v, entries[i].v)
- }
- hasMore, err := VerifyRangeProof(trie.Hash(), firstKey, k, v, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
- if hasMore != c.hasMore {
- t.Fatalf("Wrong hasMore indicator, want %t, got %t", c.hasMore, hasMore)
- }
- }
-}
-
-// TestEmptyRangeProof tests the range proof with "no" element.
-// The first edge proof must be a non-existent proof.
-func TestEmptyRangeProof(t *testing.T) {
- trie, vals := randomTrie(4096)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- var cases = []struct {
- pos int
- err bool
- }{
- {len(entries) - 1, false},
- {500, true},
- }
- for _, c := range cases {
- proof := memorydb.New()
- first := increaseKey(common.CopyBytes(entries[c.pos].k))
- if err := trie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- _, err := VerifyRangeProof(trie.Hash(), first, nil, nil, proof)
- if c.err && err == nil {
- t.Fatalf("Expected error, got nil")
- }
- if !c.err && err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
- }
-}
-
-// TestBloatedProof tests a malicious proof, where the proof is more or less the
-// whole trie. Previously we didn't accept such packets, but the new APIs do, so
-// lets leave this test as a bit weird, but present.
-func TestBloatedProof(t *testing.T) {
- // Use a small trie
- trie, kvs := nonRandomTrie(100)
- var entries []*kv
- for _, kv := range kvs {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
- var keys [][]byte
- var vals [][]byte
-
- proof := memorydb.New()
- // In the 'malicious' case, we add proofs for every single item
- // (but only one key/value pair used as leaf)
- for i, entry := range entries {
- trie.Prove(entry.k, proof)
- if i == 50 {
- keys = append(keys, entry.k)
- vals = append(vals, entry.v)
- }
- }
- // For reference, we use the same function, but _only_ prove the first
- // and last element
- want := memorydb.New()
- trie.Prove(keys[0], want)
- trie.Prove(keys[len(keys)-1], want)
-
- if _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof); err != nil {
- t.Fatalf("expected bloated proof to succeed, got %v", err)
- }
-}
-
-// TestEmptyValueRangeProof tests normal range proof with both edge proofs
-// as the existent proof, but with an extra empty value included, which is a
-// noop technically, but practically should be rejected.
-func TestEmptyValueRangeProof(t *testing.T) {
- trie, values := randomTrie(512)
- var entries []*kv
- for _, kv := range values {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- // Create a new entry with a slightly modified key
- mid := len(entries) / 2
- key := common.CopyBytes(entries[mid-1].k)
- for n := len(key) - 1; n >= 0; n-- {
- if key[n] < 0xff {
- key[n]++
- break
- }
- }
- noop := &kv{key, []byte{}, false}
- entries = append(append(append([]*kv{}, entries[:mid]...), noop), entries[mid:]...)
-
- start, end := 1, len(entries)-1
-
- proof := memorydb.New()
- if err := trie.Prove(entries[start].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[end-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- var keys [][]byte
- var vals [][]byte
- for i := start; i < end; i++ {
- keys = append(keys, entries[i].k)
- vals = append(vals, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof)
- if err == nil {
- t.Fatalf("Expected failure on noop entry")
- }
-}
-
-// TestAllElementsEmptyValueRangeProof tests the range proof with all elements,
-// but with an extra empty value included, which is a noop technically, but
-// practically should be rejected.
-func TestAllElementsEmptyValueRangeProof(t *testing.T) {
- trie, values := randomTrie(512)
- var entries []*kv
- for _, kv := range values {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- // Create a new entry with a slightly modified key
- mid := len(entries) / 2
- key := common.CopyBytes(entries[mid-1].k)
- for n := len(key) - 1; n >= 0; n-- {
- if key[n] < 0xff {
- key[n]++
- break
- }
- }
- noop := &kv{key, []byte{}, false}
- entries = append(append(append([]*kv{}, entries[:mid]...), noop), entries[mid:]...)
-
- var keys [][]byte
- var vals [][]byte
- for i := 0; i < len(entries); i++ {
- keys = append(keys, entries[i].k)
- vals = append(vals, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), nil, keys, vals, nil)
- if err == nil {
- t.Fatalf("Expected failure on noop entry")
- }
-}
-
-// mutateByte changes one byte in b.
-func mutateByte(b []byte) {
- for r := mrand.Intn(len(b)); ; {
- new := byte(mrand.Intn(255))
- if new != b[r] {
- b[r] = new
- break
- }
- }
-}
-
-func increaseKey(key []byte) []byte {
- for i := len(key) - 1; i >= 0; i-- {
- key[i]++
- if key[i] != 0x0 {
- break
- }
- }
- return key
-}
-
-func decreaseKey(key []byte) []byte {
- for i := len(key) - 1; i >= 0; i-- {
- key[i]--
- if key[i] != 0xff {
- break
- }
- }
- return key
-}
-
-func BenchmarkProve(b *testing.B) {
- trie, vals := randomTrie(100)
- var keys []string
- for k := range vals {
- keys = append(keys, k)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- kv := vals[keys[i%len(keys)]]
- proofs := memorydb.New()
- if trie.Prove(kv.k, proofs); proofs.Len() == 0 {
- b.Fatalf("zero length proof for %x", kv.k)
- }
- }
-}
-
-func BenchmarkVerifyProof(b *testing.B) {
- trie, vals := randomTrie(100)
- root := trie.Hash()
- var keys []string
- var proofs []*memorydb.Database
- for k := range vals {
- keys = append(keys, k)
- proof := memorydb.New()
- trie.Prove([]byte(k), proof)
- proofs = append(proofs, proof)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- im := i % len(keys)
- if _, err := VerifyProof(root, []byte(keys[im]), proofs[im]); err != nil {
- b.Fatalf("key %x: %v", keys[im], err)
- }
- }
-}
-
-func BenchmarkVerifyRangeProof10(b *testing.B) { benchmarkVerifyRangeProof(b, 10) }
-func BenchmarkVerifyRangeProof100(b *testing.B) { benchmarkVerifyRangeProof(b, 100) }
-func BenchmarkVerifyRangeProof1000(b *testing.B) { benchmarkVerifyRangeProof(b, 1000) }
-func BenchmarkVerifyRangeProof5000(b *testing.B) { benchmarkVerifyRangeProof(b, 5000) }
-
-func benchmarkVerifyRangeProof(b *testing.B, size int) {
- trie, vals := randomTrie(8192)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- start := 2
- end := start + size
- proof := memorydb.New()
- if err := trie.Prove(entries[start].k, proof); err != nil {
- b.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[end-1].k, proof); err != nil {
- b.Fatalf("Failed to prove the last node %v", err)
- }
- var keys [][]byte
- var values [][]byte
- for i := start; i < end; i++ {
- keys = append(keys, entries[i].k)
- values = append(values, entries[i].v)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, values, proof)
- if err != nil {
- b.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err)
- }
- }
-}
-
-func BenchmarkVerifyRangeNoProof10(b *testing.B) { benchmarkVerifyRangeNoProof(b, 100) }
-func BenchmarkVerifyRangeNoProof500(b *testing.B) { benchmarkVerifyRangeNoProof(b, 500) }
-func BenchmarkVerifyRangeNoProof1000(b *testing.B) { benchmarkVerifyRangeNoProof(b, 1000) }
-
-func benchmarkVerifyRangeNoProof(b *testing.B, size int) {
- trie, vals := randomTrie(size)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- var keys [][]byte
- var values [][]byte
- for _, entry := range entries {
- keys = append(keys, entry.k)
- values = append(values, entry.v)
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, values, nil)
- if err != nil {
- b.Fatalf("Expected no error, got %v", err)
- }
- }
-}
-
-func randomTrie(n int) (*Trie, map[string]*kv) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- vals := make(map[string]*kv)
- for i := byte(0); i < 100; i++ {
- value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
- value2 := &kv{common.LeftPadBytes([]byte{i + 10}, 32), []byte{i}, false}
- trie.MustUpdate(value.k, value.v)
- trie.MustUpdate(value2.k, value2.v)
- vals[string(value.k)] = value
- vals[string(value2.k)] = value2
- }
- for i := 0; i < n; i++ {
- value := &kv{randBytes(32), randBytes(20), false}
- trie.MustUpdate(value.k, value.v)
- vals[string(value.k)] = value
- }
- return trie, vals
-}
-
-func nonRandomTrie(n int) (*Trie, map[string]*kv) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- vals := make(map[string]*kv)
- max := uint64(0xffffffffffffffff)
- for i := uint64(0); i < uint64(n); i++ {
- value := make([]byte, 32)
- key := make([]byte, 32)
- binary.LittleEndian.PutUint64(key, i)
- binary.LittleEndian.PutUint64(value, i-max)
- //value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
- elem := &kv{key, value, false}
- trie.MustUpdate(elem.k, elem.v)
- vals[string(elem.k)] = elem
- }
- return trie, vals
-}
-
-func TestRangeProofKeysWithSharedPrefix(t *testing.T) {
- keys := [][]byte{
- common.Hex2Bytes("aa10000000000000000000000000000000000000000000000000000000000000"),
- common.Hex2Bytes("aa20000000000000000000000000000000000000000000000000000000000000"),
- }
- vals := [][]byte{
- common.Hex2Bytes("02"),
- common.Hex2Bytes("03"),
- }
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- for i, key := range keys {
- trie.MustUpdate(key, vals[i])
- }
- root := trie.Hash()
- proof := memorydb.New()
- start := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")
- if err := trie.Prove(start, proof); err != nil {
- t.Fatalf("failed to prove start: %v", err)
- }
- if err := trie.Prove(keys[len(keys)-1], proof); err != nil {
- t.Fatalf("failed to prove end: %v", err)
- }
-
- more, err := VerifyRangeProof(root, start, keys, vals, proof)
- if err != nil {
- t.Fatalf("failed to verify range proof: %v", err)
- }
- if more != false {
- t.Error("expected more to be false")
- }
-}
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
deleted file mode 100644
index 887ddd4090..0000000000
--- a/trie/secure_trie.go
+++ /dev/null
@@ -1,296 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/triedb/database"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/rlp"
-)
-
-// SecureTrie is the old name of StateTrie.
-// Deprecated: use StateTrie.
-type SecureTrie = StateTrie
-
-// NewSecure creates a new StateTrie.
-// Deprecated: use NewStateTrie.
-func NewSecure(stateRoot common.Hash, owner common.Hash, root common.Hash, db database.Database) (*SecureTrie, error) {
- id := &ID{
- StateRoot: stateRoot,
- Owner: owner,
- Root: root,
- }
- return NewStateTrie(id, db)
-}
-
-// StateTrie wraps a trie with key hashing. In a stateTrie trie, all
-// access operations hash the key using keccak256. This prevents
-// calling code from creating long chains of nodes that
-// increase the access time.
-//
-// Contrary to a regular trie, a StateTrie can only be created with
-// New and must have an attached database. The database also stores
-// the preimage of each key if preimage recording is enabled.
-//
-// StateTrie is not safe for concurrent use.
-type StateTrie struct {
- trie Trie
- db database.Database
- hashKeyBuf [common.HashLength]byte
- secKeyCache map[string][]byte
- secKeyCacheOwner *StateTrie // Pointer to self, replace the key cache on mismatch
-}
-
-// NewStateTrie creates a trie with an existing root node from a backing database.
-//
-// If root is the zero hash or the sha3 hash of an empty string, the
-// trie is initially empty. Otherwise, New will panic if db is nil
-// and returns MissingNodeError if the root node cannot be found.
-func NewStateTrie(id *ID, db database.Database) (*StateTrie, error) {
- if db == nil {
- panic("trie.NewStateTrie called without a database")
- }
- trie, err := New(id, db)
- if err != nil {
- return nil, err
- }
- return &StateTrie{trie: *trie, db: db}, nil
-}
-
-// MustGet returns the value for key stored in the trie.
-// The value bytes must not be modified by the caller.
-//
-// This function will omit any encountered error but just
-// print out an error message.
-func (t *StateTrie) MustGet(key []byte) []byte {
- return t.trie.MustGet(t.hashKey(key))
-}
-
-// GetStorage attempts to retrieve a storage slot with provided account address
-// and slot key. The value bytes must not be modified by the caller.
-// If the specified storage slot is not in the trie, nil will be returned.
-// If a trie node is not found in the database, a MissingNodeError is returned.
-func (t *StateTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) {
- enc, err := t.trie.Get(t.hashKey(key))
- if err != nil || len(enc) == 0 {
- return nil, err
- }
- _, content, _, err := rlp.Split(enc)
- return content, err
-}
-
-// GetAccount attempts to retrieve an account with provided account address.
-// If the specified account is not in the trie, nil will be returned.
-// If a trie node is not found in the database, a MissingNodeError is returned.
-func (t *StateTrie) GetAccount(address common.Address) (*types.StateAccount, error) {
- res, err := t.trie.Get(t.hashKey(address.Bytes()))
- if res == nil || err != nil {
- return nil, err
- }
- ret := new(types.StateAccount)
- err = rlp.DecodeBytes(res, ret)
- return ret, err
-}
-
-// GetAccountByHash does the same thing as GetAccount, however it expects an
-// account hash that is the hash of address. This constitutes an abstraction
-// leak, since the client code needs to know the key format.
-func (t *StateTrie) GetAccountByHash(addrHash common.Hash) (*types.StateAccount, error) {
- res, err := t.trie.Get(addrHash.Bytes())
- if res == nil || err != nil {
- return nil, err
- }
- ret := new(types.StateAccount)
- err = rlp.DecodeBytes(res, ret)
- return ret, err
-}
-
-// GetNode attempts to retrieve a trie node by compact-encoded path. It is not
-// possible to use keybyte-encoding as the path might contain odd nibbles.
-// If the specified trie node is not in the trie, nil will be returned.
-// If a trie node is not found in the database, a MissingNodeError is returned.
-func (t *StateTrie) GetNode(path []byte) ([]byte, int, error) {
- return t.trie.GetNode(path)
-}
-
-// MustUpdate associates key with value in the trie. Subsequent calls to
-// Get will return value. If value has length zero, any existing value
-// is deleted from the trie and calls to Get will return nil.
-//
-// The value bytes must not be modified by the caller while they are
-// stored in the trie.
-//
-// This function will omit any encountered error but just print out an
-// error message.
-func (t *StateTrie) MustUpdate(key, value []byte) {
- hk := t.hashKey(key)
- t.trie.MustUpdate(hk, value)
- t.getSecKeyCache()[string(hk)] = common.CopyBytes(key)
-}
-
-// UpdateStorage associates key with value in the trie. Subsequent calls to
-// Get will return value. If value has length zero, any existing value
-// is deleted from the trie and calls to Get will return nil.
-//
-// The value bytes must not be modified by the caller while they are
-// stored in the trie.
-//
-// If a node is not found in the database, a MissingNodeError is returned.
-func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error {
- hk := t.hashKey(key)
- v, _ := rlp.EncodeToBytes(value)
- err := t.trie.Update(hk, v)
- if err != nil {
- return err
- }
- t.getSecKeyCache()[string(hk)] = common.CopyBytes(key)
- return nil
-}
-
-// UpdateAccount will abstract the write of an account to the secure trie.
-func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccount) error {
- hk := t.hashKey(address.Bytes())
- data, err := rlp.EncodeToBytes(acc)
- if err != nil {
- return err
- }
- if err := t.trie.Update(hk, data); err != nil {
- return err
- }
- t.getSecKeyCache()[string(hk)] = address.Bytes()
- return nil
-}
-
-func (t *StateTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error {
- return nil
-}
-
-// MustDelete removes any existing value for key from the trie. This function
-// will omit any encountered error but just print out an error message.
-func (t *StateTrie) MustDelete(key []byte) {
- hk := t.hashKey(key)
- delete(t.getSecKeyCache(), string(hk))
- t.trie.MustDelete(hk)
-}
-
-// DeleteStorage removes any existing storage slot from the trie.
-// If the specified trie node is not in the trie, nothing will be changed.
-// If a node is not found in the database, a MissingNodeError is returned.
-func (t *StateTrie) DeleteStorage(_ common.Address, key []byte) error {
- hk := t.hashKey(key)
- delete(t.getSecKeyCache(), string(hk))
- return t.trie.Delete(hk)
-}
-
-// DeleteAccount abstracts an account deletion from the trie.
-func (t *StateTrie) DeleteAccount(address common.Address) error {
- hk := t.hashKey(address.Bytes())
- delete(t.getSecKeyCache(), string(hk))
- return t.trie.Delete(hk)
-}
-
-// GetKey returns the sha3 preimage of a hashed key that was
-// previously used to store a value.
-func (t *StateTrie) GetKey(shaKey []byte) []byte {
- if key, ok := t.getSecKeyCache()[string(shaKey)]; ok {
- return key
- }
- return t.db.Preimage(common.BytesToHash(shaKey))
-}
-
-// Commit collects all dirty nodes in the trie and replaces them with the
-// corresponding node hash. All collected nodes (including dirty leaves if
-// collectLeaf is true) will be encapsulated into a nodeset for return.
-// The returned nodeset can be nil if the trie is clean (nothing to commit).
-// All cached preimages will be also flushed if preimages recording is enabled.
-// Once the trie is committed, it's not usable anymore. A new trie must
-// be created with new root and updated trie database for following usage
-func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
- // Write all the pre-images to the actual disk database
- if len(t.getSecKeyCache()) > 0 {
- preimages := make(map[common.Hash][]byte)
- for hk, key := range t.secKeyCache {
- preimages[common.BytesToHash([]byte(hk))] = key
- }
- t.db.InsertPreimage(preimages)
- t.secKeyCache = make(map[string][]byte)
- }
- // Commit the trie and return its modified nodeset.
- return t.trie.Commit(collectLeaf)
-}
-
-// Hash returns the root hash of StateTrie. It does not write to the
-// database and can be used even if the trie doesn't have one.
-func (t *StateTrie) Hash() common.Hash {
- return t.trie.Hash()
-}
-
-// Copy returns a copy of StateTrie.
-func (t *StateTrie) Copy() *StateTrie {
- return &StateTrie{
- trie: *t.trie.Copy(),
- db: t.db,
- secKeyCache: t.secKeyCache,
- }
-}
-
-// NodeIterator returns an iterator that returns nodes of the underlying trie.
-// Iteration starts at the key after the given start key.
-func (t *StateTrie) NodeIterator(start []byte) (NodeIterator, error) {
- return t.trie.NodeIterator(start)
-}
-
-// MustNodeIterator is a wrapper of NodeIterator and will omit any encountered
-// error but just print out an error message.
-func (t *StateTrie) MustNodeIterator(start []byte) NodeIterator {
- return t.trie.MustNodeIterator(start)
-}
-
-// hashKey returns the hash of key as an ephemeral buffer.
-// The caller must not hold onto the return value because it will become
-// invalid on the next call to hashKey or secKey.
-func (t *StateTrie) hashKey(key []byte) []byte {
- h := newHasher(false)
- h.sha.Reset()
- h.sha.Write(key)
- h.sha.Read(t.hashKeyBuf[:])
- returnHasherToPool(h)
- return t.hashKeyBuf[:]
-}
-
-// getSecKeyCache returns the current secure key cache, creating a new one if
-// ownership changed (i.e. the current secure trie is a copy of another owning
-// the actual cache).
-func (t *StateTrie) getSecKeyCache() map[string][]byte {
- if t != t.secKeyCacheOwner {
- t.secKeyCacheOwner = t
- t.secKeyCache = make(map[string][]byte)
- }
- return t.secKeyCache
-}
diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go
deleted file mode 100644
index 222552cbdd..0000000000
--- a/trie/secure_trie_test.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "fmt"
- "runtime"
- "sync"
- "testing"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
-)
-
-func newEmptySecure() *StateTrie {
- trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- return trie
-}
-
-// makeTestStateTrie creates a large enough secure trie for testing.
-func makeTestStateTrie() (*testDb, *StateTrie, map[string][]byte) {
- // Create an empty trie
- triedb := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb)
-
- // Fill it with some arbitrary data
- content := make(map[string][]byte)
- for i := byte(0); i < 255; i++ {
- // Map the same data under multiple keys
- key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i}
- content[string(key)] = val
- trie.MustUpdate(key, val)
-
- key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i}
- content[string(key)] = val
- trie.MustUpdate(key, val)
-
- // Add some other data to inflate the trie
- for j := byte(3); j < 13; j++ {
- key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i}
- content[string(key)] = val
- trie.MustUpdate(key, val)
- }
- }
- root, nodes, _ := trie.Commit(false)
- if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil {
- panic(fmt.Errorf("failed to commit db %v", err))
- }
- // Re-create the trie based on the new state
- trie, _ = NewStateTrie(TrieID(root), triedb)
- return triedb, trie, content
-}
-
-func TestSecureDelete(t *testing.T) {
- trie := newEmptySecure()
- vals := []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- {"shaman", "horse"},
- {"doge", "coin"},
- {"ether", ""},
- {"dog", "puppy"},
- {"shaman", ""},
- }
- for _, val := range vals {
- if val.v != "" {
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- } else {
- trie.MustDelete([]byte(val.k))
- }
- }
- hash := trie.Hash()
- exp := common.HexToHash("29b235a58c3c25ab83010c327d5932bcf05324b7d6b1185e650798034783ca9d")
- if hash != exp {
- t.Errorf("expected %x got %x", exp, hash)
- }
-}
-
-func TestSecureGetKey(t *testing.T) {
- trie := newEmptySecure()
- trie.MustUpdate([]byte("foo"), []byte("bar"))
-
- key := []byte("foo")
- value := []byte("bar")
- seckey := crypto.Keccak256(key)
-
- if !bytes.Equal(trie.MustGet(key), value) {
- t.Errorf("Get did not return bar")
- }
- if k := trie.GetKey(seckey); !bytes.Equal(k, key) {
- t.Errorf("GetKey returned %q, want %q", k, key)
- }
-}
-
-func TestStateTrieConcurrency(t *testing.T) {
- // Create an initial trie and copy if for concurrent access
- _, trie, _ := makeTestStateTrie()
-
- threads := runtime.NumCPU()
- tries := make([]*StateTrie, threads)
- for i := 0; i < threads; i++ {
- tries[i] = trie.Copy()
- }
- // Start a batch of goroutines interacting with the trie
- pend := new(sync.WaitGroup)
- pend.Add(threads)
- for i := 0; i < threads; i++ {
- go func(index int) {
- defer pend.Done()
-
- for j := byte(0); j < 255; j++ {
- // Map the same data under multiple keys
- key, val := common.LeftPadBytes([]byte{byte(index), 1, j}, 32), []byte{j}
- tries[index].MustUpdate(key, val)
-
- key, val = common.LeftPadBytes([]byte{byte(index), 2, j}, 32), []byte{j}
- tries[index].MustUpdate(key, val)
-
- // Add some other data to inflate the trie
- for k := byte(3); k < 13; k++ {
- key, val = common.LeftPadBytes([]byte{byte(index), k, j}, 32), []byte{k, j}
- tries[index].MustUpdate(key, val)
- }
- }
- tries[index].Commit(false)
- }(i)
- }
- // Wait for all threads to finish
- pend.Wait()
-}
diff --git a/trie/stacktrie.go b/trie/stacktrie.go
deleted file mode 100644
index a972dcccaf..0000000000
--- a/trie/stacktrie.go
+++ /dev/null
@@ -1,489 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "errors"
- "sync"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/log"
-)
-
-var (
- stPool = sync.Pool{New: func() any { return new(stNode) }}
- _ = types.TrieHasher((*StackTrie)(nil))
-)
-
-// StackTrieOptions contains the configured options for manipulating the stackTrie.
-type StackTrieOptions struct {
- Writer func(path []byte, hash common.Hash, blob []byte) // The function to commit the dirty nodes
- Cleaner func(path []byte) // The function to clean up dangling nodes
-
- SkipLeftBoundary bool // Flag whether the nodes on the left boundary are skipped for committing
- SkipRightBoundary bool // Flag whether the nodes on the right boundary are skipped for committing
- boundaryGauge metrics.Gauge // Gauge to track how many boundary nodes are met
-}
-
-// NewStackTrieOptions initializes an empty options for stackTrie.
-func NewStackTrieOptions() *StackTrieOptions { return &StackTrieOptions{} }
-
-// WithWriter configures trie node writer within the options.
-func (o *StackTrieOptions) WithWriter(writer func(path []byte, hash common.Hash, blob []byte)) *StackTrieOptions {
- o.Writer = writer
- return o
-}
-
-// WithCleaner configures the cleaner in the option for removing dangling nodes.
-func (o *StackTrieOptions) WithCleaner(cleaner func(path []byte)) *StackTrieOptions {
- o.Cleaner = cleaner
- return o
-}
-
-// WithSkipBoundary configures whether the left and right boundary nodes are
-// filtered for committing, along with a gauge metrics to track how many
-// boundary nodes are met.
-func (o *StackTrieOptions) WithSkipBoundary(skipLeft, skipRight bool, gauge metrics.Gauge) *StackTrieOptions {
- o.SkipLeftBoundary = skipLeft
- o.SkipRightBoundary = skipRight
- o.boundaryGauge = gauge
- return o
-}
-
-// StackTrie is a trie implementation that expects keys to be inserted
-// in order. Once it determines that a subtree will no longer be inserted
-// into, it will hash it and free up the memory it uses.
-type StackTrie struct {
- options *StackTrieOptions
- root *stNode
- h *hasher
-
- first []byte // The (hex-encoded without terminator) key of first inserted entry, tracked as left boundary.
- last []byte // The (hex-encoded without terminator) key of last inserted entry, tracked as right boundary.
-}
-
-// NewStackTrie allocates and initializes an empty trie.
-func NewStackTrie(options *StackTrieOptions) *StackTrie {
- if options == nil {
- options = NewStackTrieOptions()
- }
- return &StackTrie{
- options: options,
- root: stPool.Get().(*stNode),
- h: newHasher(false),
- }
-}
-
-// Update inserts a (key, value) pair into the stack trie.
-func (t *StackTrie) Update(key, value []byte) error {
- if len(value) == 0 {
- return errors.New("trying to insert empty (deletion)")
- }
- k := keybytesToHex(key)
- k = k[:len(k)-1] // chop the termination flag
- if bytes.Compare(t.last, k) >= 0 {
- return errors.New("non-ascending key order")
- }
- // track the first and last inserted entries.
- if t.first == nil {
- t.first = append([]byte{}, k...)
- }
- if t.last == nil {
- t.last = append([]byte{}, k...) // allocate key slice
- } else {
- t.last = append(t.last[:0], k...) // reuse key slice
- }
- t.insert(t.root, k, value, nil)
- return nil
-}
-
-// MustUpdate is a wrapper of Update and will omit any encountered error but
-// just print out an error message.
-func (t *StackTrie) MustUpdate(key, value []byte) {
- if err := t.Update(key, value); err != nil {
- log.Error("Unhandled trie error in StackTrie.Update", "err", err)
- }
-}
-
-// Reset resets the stack trie object to empty state.
-func (t *StackTrie) Reset() {
- t.options = NewStackTrieOptions()
- t.root = stPool.Get().(*stNode)
- t.first = nil
- t.last = nil
-}
-
-// stNode represents a node within a StackTrie
-type stNode struct {
- typ uint8 // node type (as in branch, ext, leaf)
- key []byte // key chunk covered by this (leaf|ext) node
- val []byte // value contained by this node if it's a leaf
- children [16]*stNode // list of children (for branch and exts)
-}
-
-// newLeaf constructs a leaf node with provided node key and value. The key
-// will be deep-copied in the function and safe to modify afterwards, but
-// value is not.
-func newLeaf(key, val []byte) *stNode {
- st := stPool.Get().(*stNode)
- st.typ = leafNode
- st.key = append(st.key, key...)
- st.val = val
- return st
-}
-
-// newExt constructs an extension node with provided node key and child. The
-// key will be deep-copied in the function and safe to modify afterwards.
-func newExt(key []byte, child *stNode) *stNode {
- st := stPool.Get().(*stNode)
- st.typ = extNode
- st.key = append(st.key, key...)
- st.children[0] = child
- return st
-}
-
-// List all values that stNode#nodeType can hold
-const (
- emptyNode = iota
- branchNode
- extNode
- leafNode
- hashedNode
-)
-
-func (n *stNode) reset() *stNode {
- n.key = n.key[:0]
- n.val = nil
- for i := range n.children {
- n.children[i] = nil
- }
- n.typ = emptyNode
- return n
-}
-
-// Helper function that, given a full key, determines the index
-// at which the chunk pointed by st.keyOffset is different from
-// the same chunk in the full key.
-func (n *stNode) getDiffIndex(key []byte) int {
- for idx, nibble := range n.key {
- if nibble != key[idx] {
- return idx
- }
- }
- return len(n.key)
-}
-
-// Helper function to that inserts a (key, value) pair into
-// the trie.
-func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) {
- switch st.typ {
- case branchNode: /* Branch */
- idx := int(key[0])
-
- // Unresolve elder siblings
- for i := idx - 1; i >= 0; i-- {
- if st.children[i] != nil {
- if st.children[i].typ != hashedNode {
- t.hash(st.children[i], append(path, byte(i)))
- }
- break
- }
- }
-
- // Add new child
- if st.children[idx] == nil {
- st.children[idx] = newLeaf(key[1:], value)
- } else {
- t.insert(st.children[idx], key[1:], value, append(path, key[0]))
- }
-
- case extNode: /* Ext */
- // Compare both key chunks and see where they differ
- diffidx := st.getDiffIndex(key)
-
- // Check if chunks are identical. If so, recurse into
- // the child node. Otherwise, the key has to be split
- // into 1) an optional common prefix, 2) the fullnode
- // representing the two differing path, and 3) a leaf
- // for each of the differentiated subtrees.
- if diffidx == len(st.key) {
- // Ext key and key segment are identical, recurse into
- // the child node.
- t.insert(st.children[0], key[diffidx:], value, append(path, key[:diffidx]...))
- return
- }
- // Save the original part. Depending if the break is
- // at the extension's last byte or not, create an
- // intermediate extension or use the extension's child
- // node directly.
- var n *stNode
- if diffidx < len(st.key)-1 {
- // Break on the non-last byte, insert an intermediate
- // extension. The path prefix of the newly-inserted
- // extension should also contain the different byte.
- n = newExt(st.key[diffidx+1:], st.children[0])
- t.hash(n, append(path, st.key[:diffidx+1]...))
- } else {
- // Break on the last byte, no need to insert
- // an extension node: reuse the current node.
- // The path prefix of the original part should
- // still be same.
- n = st.children[0]
- t.hash(n, append(path, st.key...))
- }
- var p *stNode
- if diffidx == 0 {
- // the break is on the first byte, so
- // the current node is converted into
- // a branch node.
- st.children[0] = nil
- p = st
- st.typ = branchNode
- } else {
- // the common prefix is at least one byte
- // long, insert a new intermediate branch
- // node.
- st.children[0] = stPool.Get().(*stNode)
- st.children[0].typ = branchNode
- p = st.children[0]
- }
- // Create a leaf for the inserted part
- o := newLeaf(key[diffidx+1:], value)
-
- // Insert both child leaves where they belong:
- origIdx := st.key[diffidx]
- newIdx := key[diffidx]
- p.children[origIdx] = n
- p.children[newIdx] = o
- st.key = st.key[:diffidx]
-
- case leafNode: /* Leaf */
- // Compare both key chunks and see where they differ
- diffidx := st.getDiffIndex(key)
-
- // Overwriting a key isn't supported, which means that
- // the current leaf is expected to be split into 1) an
- // optional extension for the common prefix of these 2
- // keys, 2) a fullnode selecting the path on which the
- // keys differ, and 3) one leaf for the differentiated
- // component of each key.
- if diffidx >= len(st.key) {
- panic("Trying to insert into existing key")
- }
-
- // Check if the split occurs at the first nibble of the
- // chunk. In that case, no prefix extnode is necessary.
- // Otherwise, create that
- var p *stNode
- if diffidx == 0 {
- // Convert current leaf into a branch
- st.typ = branchNode
- p = st
- st.children[0] = nil
- } else {
- // Convert current node into an ext,
- // and insert a child branch node.
- st.typ = extNode
- st.children[0] = stPool.Get().(*stNode)
- st.children[0].typ = branchNode
- p = st.children[0]
- }
-
- // Create the two child leaves: one containing the original
- // value and another containing the new value. The child leaf
- // is hashed directly in order to free up some memory.
- origIdx := st.key[diffidx]
- p.children[origIdx] = newLeaf(st.key[diffidx+1:], st.val)
- t.hash(p.children[origIdx], append(path, st.key[:diffidx+1]...))
-
- newIdx := key[diffidx]
- p.children[newIdx] = newLeaf(key[diffidx+1:], value)
-
- // Finally, cut off the key part that has been passed
- // over to the children.
- st.key = st.key[:diffidx]
- st.val = nil
-
- case emptyNode: /* Empty */
- st.typ = leafNode
- st.key = key
- st.val = value
-
- case hashedNode:
- panic("trying to insert into hash")
-
- default:
- panic("invalid type")
- }
-}
-
-// hash converts st into a 'hashedNode', if possible. Possible outcomes:
-//
-// 1. The rlp-encoded value was >= 32 bytes:
-// - Then the 32-byte `hash` will be accessible in `st.val`.
-// - And the 'st.type' will be 'hashedNode'
-//
-// 2. The rlp-encoded value was < 32 bytes
-// - Then the <32 byte rlp-encoded value will be accessible in 'st.val'.
-// - And the 'st.type' will be 'hashedNode' AGAIN
-//
-// This method also sets 'st.type' to hashedNode, and clears 'st.key'.
-func (t *StackTrie) hash(st *stNode, path []byte) {
- var (
- blob []byte // RLP-encoded node blob
- internal [][]byte // List of node paths covered by the extension node
- )
- switch st.typ {
- case hashedNode:
- return
-
- case emptyNode:
- st.val = types.EmptyRootHash.Bytes()
- st.key = st.key[:0]
- st.typ = hashedNode
- return
-
- case branchNode:
- var nodes fullNode
- for i, child := range st.children {
- if child == nil {
- nodes.Children[i] = nilValueNode
- continue
- }
- t.hash(child, append(path, byte(i)))
-
- if len(child.val) < 32 {
- nodes.Children[i] = rawNode(child.val)
- } else {
- nodes.Children[i] = hashNode(child.val)
- }
- st.children[i] = nil
- stPool.Put(child.reset()) // Release child back to pool.
- }
- nodes.encode(t.h.encbuf)
- blob = t.h.encodedBytes()
-
- case extNode:
- // recursively hash and commit child as the first step
- t.hash(st.children[0], append(path, st.key...))
-
- // Collect the path of internal nodes between shortNode and its **in disk**
- // child. This is essential in the case of path mode scheme to avoid leaving
- // danging nodes within the range of this internal path on disk, which would
- // break the guarantee for state healing.
- if len(st.children[0].val) >= 32 && t.options.Cleaner != nil {
- for i := 1; i < len(st.key); i++ {
- internal = append(internal, append(path, st.key[:i]...))
- }
- }
- // encode the extension node
- n := shortNode{Key: hexToCompactInPlace(st.key)}
- if len(st.children[0].val) < 32 {
- n.Val = rawNode(st.children[0].val)
- } else {
- n.Val = hashNode(st.children[0].val)
- }
- n.encode(t.h.encbuf)
- blob = t.h.encodedBytes()
-
- stPool.Put(st.children[0].reset()) // Release child back to pool.
- st.children[0] = nil
-
- case leafNode:
- st.key = append(st.key, byte(16))
- n := shortNode{Key: hexToCompactInPlace(st.key), Val: valueNode(st.val)}
-
- n.encode(t.h.encbuf)
- blob = t.h.encodedBytes()
-
- default:
- panic("invalid node type")
- }
-
- st.typ = hashedNode
- st.key = st.key[:0]
-
- // Skip committing the non-root node if the size is smaller than 32 bytes.
- if len(blob) < 32 && len(path) > 0 {
- st.val = common.CopyBytes(blob)
- return
- }
- // Write the hash to the 'val'. We allocate a new val here to not mutate
- // input values.
- st.val = t.h.hashData(blob)
-
- // Short circuit if the stack trie is not configured for writing.
- if t.options.Writer == nil {
- return
- }
- // Skip committing if the node is on the left boundary and stackTrie is
- // configured to filter the boundary.
- if t.options.SkipLeftBoundary && bytes.HasPrefix(t.first, path) {
- if t.options.boundaryGauge != nil {
- t.options.boundaryGauge.Inc(1)
- }
- return
- }
- // Skip committing if the node is on the right boundary and stackTrie is
- // configured to filter the boundary.
- if t.options.SkipRightBoundary && bytes.HasPrefix(t.last, path) {
- if t.options.boundaryGauge != nil {
- t.options.boundaryGauge.Inc(1)
- }
- return
- }
- // Clean up the internal dangling nodes covered by the extension node.
- // This should be done before writing the node to adhere to the committing
- // order from bottom to top.
- for _, path := range internal {
- t.options.Cleaner(path)
- }
- t.options.Writer(path, common.BytesToHash(st.val), blob)
-}
-
-// Hash will firstly hash the entire trie if it's still not hashed and then commit
-// all nodes to the associated database. Actually most of the trie nodes have been
-// committed already. The main purpose here is to commit the nodes on right boundary.
-//
-// For stack trie, Hash and Commit are functionally identical.
-func (t *StackTrie) Hash() common.Hash {
- n := t.root
- t.hash(n, nil)
- return common.BytesToHash(n.val)
-}
-
-// Commit will firstly hash the entire trie if it's still not hashed and then commit
-// all nodes to the associated database. Actually most of the trie nodes have been
-// committed already. The main purpose here is to commit the nodes on right boundary.
-//
-// For stack trie, Hash and Commit are functionally identical.
-func (t *StackTrie) Commit() common.Hash {
- return t.Hash()
-}
diff --git a/trie/stacktrie_fuzzer_test.go b/trie/stacktrie_fuzzer_test.go
deleted file mode 100644
index 391c0a6c83..0000000000
--- a/trie/stacktrie_fuzzer_test.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "testing"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "golang.org/x/crypto/sha3"
- "golang.org/x/exp/slices"
-)
-
-func FuzzStackTrie(f *testing.F) {
- f.Fuzz(func(t *testing.T, data []byte) {
- fuzz(data, false)
- })
-}
-
-func fuzz(data []byte, debugging bool) {
- // This spongeDb is used to check the sequence of disk-db-writes
- var (
- input = bytes.NewReader(data)
- spongeA = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- dbA = newTestDatabase(rawdb.NewDatabase(spongeA), rawdb.HashScheme)
- trieA = NewEmpty(dbA)
- spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- dbB = newTestDatabase(rawdb.NewDatabase(spongeB), rawdb.HashScheme)
-
- options = NewStackTrieOptions().WithWriter(func(path []byte, hash common.Hash, blob []byte) {
- rawdb.WriteTrieNode(spongeB, common.Hash{}, path, hash, blob, dbB.Scheme())
- })
- trieB = NewStackTrie(options)
- vals []*kv
- maxElements = 10000
- // operate on unique keys only
- keys = make(map[string]struct{})
- )
- // Fill the trie with elements
- for i := 0; input.Len() > 0 && i < maxElements; i++ {
- k := make([]byte, 32)
- input.Read(k)
- var a uint16
- binary.Read(input, binary.LittleEndian, &a)
- a = 1 + a%100
- v := make([]byte, a)
- input.Read(v)
- if input.Len() == 0 {
- // If it was exhausted while reading, the value may be all zeroes,
- // thus 'deletion' which is not supported on stacktrie
- break
- }
- if _, present := keys[string(k)]; present {
- // This key is a duplicate, ignore it
- continue
- }
- keys[string(k)] = struct{}{}
- vals = append(vals, &kv{k: k, v: v})
- trieA.MustUpdate(k, v)
- }
- if len(vals) == 0 {
- return
- }
- // Flush trie -> database
- rootA, nodes, err := trieA.Commit(false)
- if err != nil {
- panic(err)
- }
- if nodes != nil {
- dbA.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- }
- // Flush memdb -> disk (sponge)
- dbA.Commit(rootA)
-
- // Stacktrie requires sorted insertion
- slices.SortFunc(vals, (*kv).cmp)
-
- for _, kv := range vals {
- if debugging {
- fmt.Printf("{\"%#x\" , \"%#x\"} // stacktrie.Update\n", kv.k, kv.v)
- }
- trieB.MustUpdate(kv.k, kv.v)
- }
- rootB := trieB.Hash()
- trieB.Commit()
- if rootA != rootB {
- panic(fmt.Sprintf("roots differ: (trie) %x != %x (stacktrie)", rootA, rootB))
- }
- sumA := spongeA.sponge.Sum(nil)
- sumB := spongeB.sponge.Sum(nil)
- if !bytes.Equal(sumA, sumB) {
- panic(fmt.Sprintf("sequence differ: (trie) %x != %x (stacktrie)", sumA, sumB))
- }
-
- // Ensure all the nodes are persisted correctly
- var (
- nodeset = make(map[string][]byte) // path -> blob
- optionsC = NewStackTrieOptions().WithWriter(func(path []byte, hash common.Hash, blob []byte) {
- if crypto.Keccak256Hash(blob) != hash {
- panic("invalid node blob")
- }
- nodeset[string(path)] = common.CopyBytes(blob)
- })
- trieC = NewStackTrie(optionsC)
- checked int
- )
- for _, kv := range vals {
- trieC.MustUpdate(kv.k, kv.v)
- }
- rootC := trieC.Commit()
- if rootA != rootC {
- panic(fmt.Sprintf("roots differ: (trie) %x != %x (stacktrie)", rootA, rootC))
- }
- trieA, _ = New(TrieID(rootA), dbA)
- iterA := trieA.MustNodeIterator(nil)
- for iterA.Next(true) {
- if iterA.Hash() == (common.Hash{}) {
- if _, present := nodeset[string(iterA.Path())]; present {
- panic("unexpected tiny node")
- }
- continue
- }
- nodeBlob, present := nodeset[string(iterA.Path())]
- if !present {
- panic("missing node")
- }
- if !bytes.Equal(nodeBlob, iterA.NodeBlob()) {
- panic("node blob is not matched")
- }
- checked += 1
- }
- if checked != len(nodeset) {
- panic("node number is not matched")
- }
-}
diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go
deleted file mode 100644
index 9131b2fea2..0000000000
--- a/trie/stacktrie_test.go
+++ /dev/null
@@ -1,497 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "math/big"
- "math/rand"
- "testing"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/trie/testutil"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/stretchr/testify/assert"
- "golang.org/x/exp/slices"
-)
-
-func TestStackTrieInsertAndHash(t *testing.T) {
- type KeyValueHash struct {
- K string // Hex string for key.
- V string // Value, directly converted to bytes.
- H string // Expected root hash after insert of (K, V) to an existing trie.
- }
- tests := [][]KeyValueHash{
- { // {0:0, 7:0, f:0}
- {"00", "v_______________________0___0", "5cb26357b95bb9af08475be00243ceb68ade0b66b5cd816b0c18a18c612d2d21"},
- {"70", "v_______________________0___1", "8ff64309574f7a437a7ad1628e690eb7663cfde10676f8a904a8c8291dbc1603"},
- {"f0", "v_______________________0___2", "9e3a01bd8d43efb8e9d4b5506648150b8e3ed1caea596f84ee28e01a72635470"},
- },
- { // {1:0cc, e:{1:fc, e:fc}}
- {"10cc", "v_______________________1___0", "233e9b257843f3dfdb1cce6676cdaf9e595ac96ee1b55031434d852bc7ac9185"},
- {"e1fc", "v_______________________1___1", "39c5e908ae83d0c78520c7c7bda0b3782daf594700e44546e93def8f049cca95"},
- {"eefc", "v_______________________1___2", "d789567559fd76fe5b7d9cc42f3750f942502ac1c7f2a466e2f690ec4b6c2a7c"},
- },
- { // {b:{a:ac, b:ac}, d:acc}
- {"baac", "v_______________________2___0", "8be1c86ba7ec4c61e14c1a9b75055e0464c2633ae66a055a24e75450156a5d42"},
- {"bbac", "v_______________________2___1", "8495159b9895a7d88d973171d737c0aace6fe6ac02a4769fff1bc43bcccce4cc"},
- {"dacc", "v_______________________2___2", "9bcfc5b220a27328deb9dc6ee2e3d46c9ebc9c69e78acda1fa2c7040602c63ca"},
- },
- { // {0:0cccc, 2:456{0:0, 2:2}
- {"00cccc", "v_______________________3___0", "e57dc2785b99ce9205080cb41b32ebea7ac3e158952b44c87d186e6d190a6530"},
- {"245600", "v_______________________3___1", "0335354adbd360a45c1871a842452287721b64b4234dfe08760b243523c998db"},
- {"245622", "v_______________________3___2", "9e6832db0dca2b5cf81c0e0727bfde6afc39d5de33e5720bccacc183c162104e"},
- },
- { // {1:4567{1:1c, 3:3c}, 3:0cccccc}
- {"1456711c", "v_______________________4___0", "f2389e78d98fed99f3e63d6d1623c1d4d9e8c91cb1d585de81fbc7c0e60d3529"},
- {"1456733c", "v_______________________4___1", "101189b3fab852be97a0120c03d95eefcf984d3ed639f2328527de6def55a9c0"},
- {"30cccccc", "v_______________________4___2", "3780ce111f98d15751dfde1eb21080efc7d3914b429e5c84c64db637c55405b3"},
- },
- { // 8800{1:f, 2:e, 3:d}
- {"88001f", "v_______________________5___0", "e817db50d84f341d443c6f6593cafda093fc85e773a762421d47daa6ac993bd5"},
- {"88002e", "v_______________________5___1", "d6e3e6047bdc110edd296a4d63c030aec451bee9d8075bc5a198eee8cda34f68"},
- {"88003d", "v_______________________5___2", "b6bdf8298c703342188e5f7f84921a402042d0e5fb059969dd53a6b6b1fb989e"},
- },
- { // 0{1:fc, 2:ec, 4:dc}
- {"01fc", "v_______________________6___0", "693268f2ca80d32b015f61cd2c4dba5a47a6b52a14c34f8e6945fad684e7a0d5"},
- {"02ec", "v_______________________6___1", "e24ddd44469310c2b785a2044618874bf486d2f7822603a9b8dce58d6524d5de"},
- {"04dc", "v_______________________6___2", "33fc259629187bbe54b92f82f0cd8083b91a12e41a9456b84fc155321e334db7"},
- },
- { // f{0:fccc, f:ff{0:f, f:f}}
- {"f0fccc", "v_______________________7___0", "b0966b5aa469a3e292bc5fcfa6c396ae7a657255eef552ea7e12f996de795b90"},
- {"ffff0f", "v_______________________7___1", "3b1ca154ec2a3d96d8d77bddef0abfe40a53a64eb03cecf78da9ec43799fa3d0"},
- {"ffffff", "v_______________________7___2", "e75463041f1be8252781be0ace579a44ea4387bf5b2739f4607af676f7719678"},
- },
- { // ff{0:f{0:f, f:f}, f:fcc}
- {"ff0f0f", "v_______________________8___0", "0928af9b14718ec8262ab89df430f1e5fbf66fac0fed037aff2b6767ae8c8684"},
- {"ff0fff", "v_______________________8___1", "d870f4d3ce26b0bf86912810a1960693630c20a48ba56be0ad04bc3e9ddb01e6"},
- {"ffffcc", "v_______________________8___2", "4239f10dd9d9915ecf2e047d6a576bdc1733ed77a30830f1bf29deaf7d8e966f"},
- },
- {
- {"123d", "x___________________________0", "fc453d88b6f128a77c448669710497380fa4588abbea9f78f4c20c80daa797d0"},
- {"123e", "x___________________________1", "5af48f2d8a9a015c1ff7fa8b8c7f6b676233bd320e8fb57fd7933622badd2cec"},
- {"123f", "x___________________________2", "1164d7299964e74ac40d761f9189b2a3987fae959800d0f7e29d3aaf3eae9e15"},
- },
- {
- {"123d", "x___________________________0", "fc453d88b6f128a77c448669710497380fa4588abbea9f78f4c20c80daa797d0"},
- {"123e", "x___________________________1", "5af48f2d8a9a015c1ff7fa8b8c7f6b676233bd320e8fb57fd7933622badd2cec"},
- {"124a", "x___________________________2", "661a96a669869d76b7231380da0649d013301425fbea9d5c5fae6405aa31cfce"},
- },
- {
- {"123d", "x___________________________0", "fc453d88b6f128a77c448669710497380fa4588abbea9f78f4c20c80daa797d0"},
- {"123e", "x___________________________1", "5af48f2d8a9a015c1ff7fa8b8c7f6b676233bd320e8fb57fd7933622badd2cec"},
- {"13aa", "x___________________________2", "6590120e1fd3ffd1a90e8de5bb10750b61079bb0776cca4414dd79a24e4d4356"},
- },
- {
- {"123d", "x___________________________0", "fc453d88b6f128a77c448669710497380fa4588abbea9f78f4c20c80daa797d0"},
- {"123e", "x___________________________1", "5af48f2d8a9a015c1ff7fa8b8c7f6b676233bd320e8fb57fd7933622badd2cec"},
- {"2aaa", "x___________________________2", "f869b40e0c55eace1918332ef91563616fbf0755e2b946119679f7ef8e44b514"},
- },
- {
- {"1234da", "x___________________________0", "1c4b4462e9f56a80ca0f5d77c0d632c41b0102290930343cf1791e971a045a79"},
- {"1234ea", "x___________________________1", "2f502917f3ba7d328c21c8b45ee0f160652e68450332c166d4ad02d1afe31862"},
- {"1234fa", "x___________________________2", "4f4e368ab367090d5bc3dbf25f7729f8bd60df84de309b4633a6b69ab66142c0"},
- },
- {
- {"1234da", "x___________________________0", "1c4b4462e9f56a80ca0f5d77c0d632c41b0102290930343cf1791e971a045a79"},
- {"1234ea", "x___________________________1", "2f502917f3ba7d328c21c8b45ee0f160652e68450332c166d4ad02d1afe31862"},
- {"1235aa", "x___________________________2", "21840121d11a91ac8bbad9a5d06af902a5c8d56a47b85600ba813814b7bfcb9b"},
- },
- {
- {"1234da", "x___________________________0", "1c4b4462e9f56a80ca0f5d77c0d632c41b0102290930343cf1791e971a045a79"},
- {"1234ea", "x___________________________1", "2f502917f3ba7d328c21c8b45ee0f160652e68450332c166d4ad02d1afe31862"},
- {"124aaa", "x___________________________2", "ea4040ddf6ae3fbd1524bdec19c0ab1581015996262006632027fa5cf21e441e"},
- },
- {
- {"1234da", "x___________________________0", "1c4b4462e9f56a80ca0f5d77c0d632c41b0102290930343cf1791e971a045a79"},
- {"1234ea", "x___________________________1", "2f502917f3ba7d328c21c8b45ee0f160652e68450332c166d4ad02d1afe31862"},
- {"13aaaa", "x___________________________2", "e4beb66c67e44f2dd8ba36036e45a44ff68f8d52942472b1911a45f886a34507"},
- },
- {
- {"1234da", "x___________________________0", "1c4b4462e9f56a80ca0f5d77c0d632c41b0102290930343cf1791e971a045a79"},
- {"1234ea", "x___________________________1", "2f502917f3ba7d328c21c8b45ee0f160652e68450332c166d4ad02d1afe31862"},
- {"2aaaaa", "x___________________________2", "5f5989b820ff5d76b7d49e77bb64f26602294f6c42a1a3becc669cd9e0dc8ec9"},
- },
- {
- {"000000", "x___________________________0", "3b32b7af0bddc7940e7364ee18b5a59702c1825e469452c8483b9c4e0218b55a"},
- {"1234da", "x___________________________1", "3ab152a1285dca31945566f872c1cc2f17a770440eda32aeee46a5e91033dde2"},
- {"1234ea", "x___________________________2", "0cccc87f96ddef55563c1b3be3c64fff6a644333c3d9cd99852cb53b6412b9b8"},
- {"1234fa", "x___________________________3", "65bb3aafea8121111d693ffe34881c14d27b128fd113fa120961f251fe28428d"},
- },
- {
- {"000000", "x___________________________0", "3b32b7af0bddc7940e7364ee18b5a59702c1825e469452c8483b9c4e0218b55a"},
- {"1234da", "x___________________________1", "3ab152a1285dca31945566f872c1cc2f17a770440eda32aeee46a5e91033dde2"},
- {"1234ea", "x___________________________2", "0cccc87f96ddef55563c1b3be3c64fff6a644333c3d9cd99852cb53b6412b9b8"},
- {"1235aa", "x___________________________3", "f670e4d2547c533c5f21e0045442e2ecb733f347ad6d29ef36e0f5ba31bb11a8"},
- },
- {
- {"000000", "x___________________________0", "3b32b7af0bddc7940e7364ee18b5a59702c1825e469452c8483b9c4e0218b55a"},
- {"1234da", "x___________________________1", "3ab152a1285dca31945566f872c1cc2f17a770440eda32aeee46a5e91033dde2"},
- {"1234ea", "x___________________________2", "0cccc87f96ddef55563c1b3be3c64fff6a644333c3d9cd99852cb53b6412b9b8"},
- {"124aaa", "x___________________________3", "c17464123050a9a6f29b5574bb2f92f6d305c1794976b475b7fb0316b6335598"},
- },
- {
- {"000000", "x___________________________0", "3b32b7af0bddc7940e7364ee18b5a59702c1825e469452c8483b9c4e0218b55a"},
- {"1234da", "x___________________________1", "3ab152a1285dca31945566f872c1cc2f17a770440eda32aeee46a5e91033dde2"},
- {"1234ea", "x___________________________2", "0cccc87f96ddef55563c1b3be3c64fff6a644333c3d9cd99852cb53b6412b9b8"},
- {"13aaaa", "x___________________________3", "aa8301be8cb52ea5cd249f5feb79fb4315ee8de2140c604033f4b3fff78f0105"},
- },
- {
- {"0000", "x___________________________0", "cb8c09ad07ae882136f602b3f21f8733a9f5a78f1d2525a8d24d1c13258000b2"},
- {"123d", "x___________________________1", "8f09663deb02f08958136410dc48565e077f76bb6c9d8c84d35fc8913a657d31"},
- {"123e", "x___________________________2", "0d230561e398c579e09a9f7b69ceaf7d3970f5a436fdb28b68b7a37c5bdd6b80"},
- {"123f", "x___________________________3", "80f7bad1893ca57e3443bb3305a517723a74d3ba831bcaca22a170645eb7aafb"},
- },
- {
- {"0000", "x___________________________0", "cb8c09ad07ae882136f602b3f21f8733a9f5a78f1d2525a8d24d1c13258000b2"},
- {"123d", "x___________________________1", "8f09663deb02f08958136410dc48565e077f76bb6c9d8c84d35fc8913a657d31"},
- {"123e", "x___________________________2", "0d230561e398c579e09a9f7b69ceaf7d3970f5a436fdb28b68b7a37c5bdd6b80"},
- {"124a", "x___________________________3", "383bc1bb4f019e6bc4da3751509ea709b58dd1ac46081670834bae072f3e9557"},
- },
- {
- {"0000", "x___________________________0", "cb8c09ad07ae882136f602b3f21f8733a9f5a78f1d2525a8d24d1c13258000b2"},
- {"123d", "x___________________________1", "8f09663deb02f08958136410dc48565e077f76bb6c9d8c84d35fc8913a657d31"},
- {"123e", "x___________________________2", "0d230561e398c579e09a9f7b69ceaf7d3970f5a436fdb28b68b7a37c5bdd6b80"},
- {"13aa", "x___________________________3", "ff0dc70ce2e5db90ee42a4c2ad12139596b890e90eb4e16526ab38fa465b35cf"},
- },
- { // branch node with short values
- {"01", "a", "b48605025f5f4b129d40a420e721aa7d504487f015fce85b96e52126365ef7dc"},
- {"80", "b", "2dc6b680daf74db067cb7aeaad73265ded93d96fce190fcbf64f498d475672ab"},
- {"ee", "c", "017dc705a54ac5328dd263fa1bae68d655310fb3e3f7b7bc57e9a43ddf99c4bf"},
- {"ff", "d", "bd5a3584d271d459bd4eb95247b2fc88656b3671b60c1125ffe7bc0b689470d0"},
- },
- { // ext node with short branch node, then becoming long
- {"a0", "a", "a83e028cb1e4365935661a9fd36a5c65c30b9ab416eaa877424146ca2a69d088"},
- {"a1", "b", "f586a4639b07b01798ca65e05c253b75d51135ebfbf6f8d6e87c0435089e65f0"},
- {"a2", "c", "63e297c295c008e09a8d531e18d57f270b6bc403e23179b915429db948cd62e3"},
- {"a3", "d", "94a7b721535578e9381f1f4e4b6ec29f8bdc5f0458a30320684c562f5d47b4b5"},
- {"a4", "e", "4b7e66d1c81965cdbe8fab8295ef56bc57fefdc5733d4782d2f8baf630f083c6"},
- {"a5", "f", "2997e7b502198ce1783b5277faacf52b25844fb55a99b63e88bdbbafac573106"},
- {"a6", "g", "bee629dd27a40772b2e1a67ec6db270d26acdf8d3b674dfae27866ad6ae1f48b"},
- },
- { // branch node with short values, then long ones
- {"a001", "v1", "b9cc982d995392b51e6787f1915f0b88efd4ad8b30f138da0a3e2242f2323e35"},
- {"b002", "v2", "a7b474bc77ef5097096fa0ee6298fdae8928c0bc3724e7311cd0fa9ed1942fc7"},
- {"c003", "v___________________________3", "dceb5bb7c92b0e348df988a8d9fc36b101397e38ebd405df55ba6ee5f14a264a"},
- {"d004", "v___________________________4", "36e60ecb86b9626165e1c6543c42ecbe4d83bca58e8e1124746961511fce362a"},
- },
- { // ext node to branch node with short values, then long ones
- {"8002", "v1", "3258fcb3e9e7d7234ecd3b8d4743999e4ab3a21592565e0a5ca64c141e8620d9"},
- {"8004", "v2", "b6cb95b7024a83c17624a3c9bed09b4b5e8ed426f49f54b8ad13c39028b1e75a"},
- {"8008", "v___________________________3", "c769d82963abe6f0900bf69754738eeb2f84559777cfa87a44f54e1aab417871"},
- {"800d", "v___________________________4", "1cad1fdaab1a6fa95d7b780fd680030e423eb76669971368ba04797a8d9cdfc9"},
- },
- { // ext node with a child of size 31 (Y) and branch node with a child of size 31 (X)
- {"000001", "ZZZZZZZZZ", "cef154b87c03c563408520ff9b26923c360cbc3ddb590c079bedeeb25a8c9c77"},
- {"000002", "Y", "2130735e600f612f6e657a32bd7be64ddcaec6512c5694844b19de713922895d"},
- {"000003", "XXXXXXXXXXXXXXXXXXXXXXXXXXXX", "962c0fffdeef7612a4f7bff1950d67e3e81c878e48b9ae45b3b374253b050bd8"},
- },
- }
- for i, test := range tests {
- // The StackTrie does not allow Insert(), Hash(), Insert(), ...
- // so we will create new trie for every sequence length of inserts.
- for l := 1; l <= len(test); l++ {
- st := NewStackTrie(nil)
- for j := 0; j < l; j++ {
- kv := &test[j]
- if err := st.Update(common.FromHex(kv.K), []byte(kv.V)); err != nil {
- t.Fatal(err)
- }
- }
- expected := common.HexToHash(test[l-1].H)
- if h := st.Hash(); h != expected {
- t.Errorf("%d(%d): root hash mismatch: %x, expected %x", i, l, h, expected)
- }
- }
- }
-}
-
-func TestSizeBug(t *testing.T) {
- st := NewStackTrie(nil)
- nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
-
- leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
- value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
-
- nt.Update(leaf, value)
- st.Update(leaf, value)
-
- if nt.Hash() != st.Hash() {
- t.Fatalf("error %x != %x", st.Hash(), nt.Hash())
- }
-}
-
-func TestEmptyBug(t *testing.T) {
- st := NewStackTrie(nil)
- nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
-
- //leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
- //value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
- kvs := []struct {
- K string
- V string
- }{
- {K: "405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace", V: "9496f4ec2bf9dab484cac6be589e8417d84781be08"},
- {K: "40edb63a35fcf86c08022722aa3287cdd36440d671b4918131b2514795fefa9c", V: "01"},
- {K: "b10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6", V: "947a30f7736e48d6599356464ba4c150d8da0302ff"},
- {K: "c2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b", V: "02"},
- }
-
- for _, kv := range kvs {
- nt.Update(common.FromHex(kv.K), common.FromHex(kv.V))
- st.Update(common.FromHex(kv.K), common.FromHex(kv.V))
- }
-
- if nt.Hash() != st.Hash() {
- t.Fatalf("error %x != %x", st.Hash(), nt.Hash())
- }
-}
-
-func TestValLength56(t *testing.T) {
- st := NewStackTrie(nil)
- nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
-
- //leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
- //value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
- kvs := []struct {
- K string
- V string
- }{
- {K: "405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace", V: "1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"},
- }
-
- for _, kv := range kvs {
- nt.Update(common.FromHex(kv.K), common.FromHex(kv.V))
- st.Update(common.FromHex(kv.K), common.FromHex(kv.V))
- }
-
- if nt.Hash() != st.Hash() {
- t.Fatalf("error %x != %x", st.Hash(), nt.Hash())
- }
-}
-
-// TestUpdateSmallNodes tests a case where the leaves are small (both key and value),
-// which causes a lot of node-within-node. This case was found via fuzzing.
-func TestUpdateSmallNodes(t *testing.T) {
- st := NewStackTrie(nil)
- nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- kvs := []struct {
- K string
- V string
- }{
- {"63303030", "3041"}, // stacktrie.Update
- {"65", "3000"}, // stacktrie.Update
- }
- for _, kv := range kvs {
- nt.Update(common.FromHex(kv.K), common.FromHex(kv.V))
- st.Update(common.FromHex(kv.K), common.FromHex(kv.V))
- }
- if nt.Hash() != st.Hash() {
- t.Fatalf("error %x != %x", st.Hash(), nt.Hash())
- }
-}
-
-// TestUpdateVariableKeys contains a case which stacktrie fails: when keys of different
-// sizes are used, and the second one has the same prefix as the first, then the
-// stacktrie fails, since it's unable to 'expand' on an already added leaf.
-// For all practical purposes, this is fine, since keys are fixed-size length
-// in account and storage tries.
-//
-// The test is marked as 'skipped', and exists just to have the behaviour documented.
-// This case was found via fuzzing.
-func TestUpdateVariableKeys(t *testing.T) {
- t.SkipNow()
- st := NewStackTrie(nil)
- nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- kvs := []struct {
- K string
- V string
- }{
- {"0x33303534636532393561313031676174", "303030"},
- {"0x3330353463653239356131303167617430", "313131"},
- }
- for _, kv := range kvs {
- nt.Update(common.FromHex(kv.K), common.FromHex(kv.V))
- st.Update(common.FromHex(kv.K), common.FromHex(kv.V))
- }
- if nt.Hash() != st.Hash() {
- t.Fatalf("error %x != %x", st.Hash(), nt.Hash())
- }
-}
-
-// TestStacktrieNotModifyValues checks that inserting blobs of data into the
-// stacktrie does not mutate the blobs
-func TestStacktrieNotModifyValues(t *testing.T) {
- st := NewStackTrie(nil)
- { // Test a very small trie
- // Give it the value as a slice with large backing alloc,
- // so if the stacktrie tries to append, it won't have to realloc
- value := make([]byte, 1, 100)
- value[0] = 0x2
- want := common.CopyBytes(value)
- st.Update([]byte{0x01}, value)
- st.Hash()
- if have := value; !bytes.Equal(have, want) {
- t.Fatalf("tiny trie: have %#x want %#x", have, want)
- }
- st = NewStackTrie(nil)
- }
- // Test with a larger trie
- keyB := big.NewInt(1)
- keyDelta := big.NewInt(1)
- var vals [][]byte
- getValue := func(i int) []byte {
- if i%2 == 0 { // large
- return crypto.Keccak256(big.NewInt(int64(i)).Bytes())
- } else { //small
- return big.NewInt(int64(i)).Bytes()
- }
- }
- for i := 0; i < 1000; i++ {
- key := common.BigToHash(keyB)
- value := getValue(i)
- st.Update(key.Bytes(), value)
- vals = append(vals, value)
- keyB = keyB.Add(keyB, keyDelta)
- keyDelta.Add(keyDelta, common.Big1)
- }
- st.Hash()
- for i := 0; i < 1000; i++ {
- want := getValue(i)
-
- have := vals[i]
- if !bytes.Equal(have, want) {
- t.Fatalf("item %d, have %#x want %#x", i, have, want)
- }
- }
-}
-
-func buildPartialTree(entries []*kv, t *testing.T) map[string]common.Hash {
- var (
- options = NewStackTrieOptions()
- nodes = make(map[string]common.Hash)
- )
- var (
- first int
- last = len(entries) - 1
-
- noLeft bool
- noRight bool
- )
- // Enter split mode if there are at least two elements
- if rand.Intn(5) != 0 {
- for {
- first = rand.Intn(len(entries))
- last = rand.Intn(len(entries))
- if first <= last {
- break
- }
- }
- if first != 0 {
- noLeft = true
- }
- if last != len(entries)-1 {
- noRight = true
- }
- }
- options = options.WithSkipBoundary(noLeft, noRight, nil)
- options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
- nodes[string(path)] = hash
- })
- tr := NewStackTrie(options)
-
- for i := first; i <= last; i++ {
- tr.MustUpdate(entries[i].k, entries[i].v)
- }
- tr.Commit()
- return nodes
-}
-
-func TestPartialStackTrie(t *testing.T) {
- for round := 0; round < 100; round++ {
- var (
- n = rand.Intn(100) + 1
- entries []*kv
- )
- for i := 0; i < n; i++ {
- var val []byte
- if rand.Intn(3) == 0 {
- val = testutil.RandBytes(3)
- } else {
- val = testutil.RandBytes(32)
- }
- entries = append(entries, &kv{
- k: testutil.RandBytes(32),
- v: val,
- })
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- var (
- nodes = make(map[string]common.Hash)
- options = NewStackTrieOptions().WithWriter(func(path []byte, hash common.Hash, blob []byte) {
- nodes[string(path)] = hash
- })
- )
- tr := NewStackTrie(options)
-
- for i := 0; i < len(entries); i++ {
- tr.MustUpdate(entries[i].k, entries[i].v)
- }
- tr.Commit()
-
- for j := 0; j < 100; j++ {
- for path, hash := range buildPartialTree(entries, t) {
- if nodes[path] != hash {
- t.Errorf("%v, want %x, got %x", []byte(path), nodes[path], hash)
- }
- }
- }
- }
-}
-
-func TestStackTrieErrors(t *testing.T) {
- s := NewStackTrie(nil)
- // Deletion
- if err := s.Update(nil, nil); err == nil {
- t.Fatal("expected error")
- }
- if err := s.Update(nil, []byte{}); err == nil {
- t.Fatal("expected error")
- }
- if err := s.Update([]byte{0xa}, []byte{}); err == nil {
- t.Fatal("expected error")
- }
- // Non-ascending keys (going backwards or repeating)
- assert.Nil(t, s.Update([]byte{0xaa}, []byte{0xa}))
- assert.NotNil(t, s.Update([]byte{0xaa}, []byte{0xa}), "repeat insert same key")
- assert.NotNil(t, s.Update([]byte{0xaa}, []byte{0xb}), "repeat insert same key")
- assert.Nil(t, s.Update([]byte{0xab}, []byte{0xa}))
- assert.NotNil(t, s.Update([]byte{0x10}, []byte{0xb}), "out of order insert")
- assert.NotNil(t, s.Update([]byte{0xaa}, []byte{0xb}), "repeat insert same key")
-}
diff --git a/trie/sync_test.go b/trie/sync_test.go
deleted file mode 100644
index ca4fcc9c26..0000000000
--- a/trie/sync_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "fmt"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
-)
-
-// makeTestTrie create a sample test trie to test node-wise reconstruction.
-func makeTestTrie(scheme string) (ethdb.Database, *testDb, *StateTrie, map[string][]byte) {
- // Create an empty trie
- db := rawdb.NewMemoryDatabase()
- triedb := newTestDatabase(db, scheme)
- trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb)
-
- // Fill it with some arbitrary data
- content := make(map[string][]byte)
- for i := byte(0); i < 255; i++ {
- // Map the same data under multiple keys
- key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i}
- content[string(key)] = val
- trie.MustUpdate(key, val)
-
- key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i}
- content[string(key)] = val
- trie.MustUpdate(key, val)
-
- // Add some other data to inflate the trie
- for j := byte(3); j < 13; j++ {
- key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i}
- content[string(key)] = val
- trie.MustUpdate(key, val)
- }
- }
- root, nodes, _ := trie.Commit(false)
- if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil {
- panic(fmt.Errorf("failed to commit db %v", err))
- }
- if err := triedb.Commit(root); err != nil {
- panic(err)
- }
- // Re-create the trie based on the new state
- trie, _ = NewStateTrie(TrieID(root), triedb)
- return db, triedb, trie, content
-}
-
-// checkTrieConsistency checks that all nodes in a trie are indeed present.
-func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash, rawTrie bool) error {
- ndb := newTestDatabase(db, scheme)
- var it NodeIterator
- if rawTrie {
- trie, err := New(TrieID(root), ndb)
- if err != nil {
- return nil // Consider a non existent state consistent
- }
- it = trie.MustNodeIterator(nil)
- } else {
- trie, err := NewStateTrie(TrieID(root), ndb)
- if err != nil {
- return nil // Consider a non existent state consistent
- }
- it = trie.MustNodeIterator(nil)
- }
- for it.Next(true) {
- }
- return it.Error()
-}
diff --git a/trie/testutil/utils.go b/trie/testutil/utils.go
deleted file mode 100644
index f3166c1ca9..0000000000
--- a/trie/testutil/utils.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// (c) 2024, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2023 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package testutil
-
-import (
- crand "crypto/rand"
- "encoding/binary"
- mrand "math/rand"
-
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
-)
-
-// Prng is a pseudo random number generator seeded by strong randomness.
-// The randomness is printed on startup in order to make failures reproducible.
-var prng = initRand()
-
-func initRand() *mrand.Rand {
- var seed [8]byte
- crand.Read(seed[:])
- rnd := mrand.New(mrand.NewSource(int64(binary.LittleEndian.Uint64(seed[:]))))
- return rnd
-}
-
-// RandBytes generates a random byte slice with specified length.
-func RandBytes(n int) []byte {
- r := make([]byte, n)
- prng.Read(r)
- return r
-}
-
-// RandomHash generates a random blob of data and returns it as a hash.
-func RandomHash() common.Hash {
- return common.BytesToHash(RandBytes(common.HashLength))
-}
-
-// RandomAddress generates a random blob of data and returns it as an address.
-func RandomAddress() common.Address {
- return common.BytesToAddress(RandBytes(common.AddressLength))
-}
-
-// RandomNode generates a random node.
-func RandomNode() *trienode.Node {
- val := RandBytes(100)
- return trienode.New(crypto.Keccak256Hash(val), val)
-}
diff --git a/trie/tracer.go b/trie/tracer.go
deleted file mode 100644
index c2b88699a7..0000000000
--- a/trie/tracer.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "github.com/ava-labs/libevm/common"
-)
-
-// tracer tracks the changes of trie nodes. During the trie operations,
-// some nodes can be deleted from the trie, while these deleted nodes
-// won't be captured by trie.Hasher or trie.Committer. Thus, these deleted
-// nodes won't be removed from the disk at all. Tracer is an auxiliary tool
-// used to track all insert and delete operations of trie and capture all
-// deleted nodes eventually.
-//
-// The changed nodes can be mainly divided into two categories: the leaf
-// node and intermediate node. The former is inserted/deleted by callers
-// while the latter is inserted/deleted in order to follow the rule of trie.
-// This tool can track all of them no matter the node is embedded in its
-// parent or not, but valueNode is never tracked.
-//
-// Besides, it's also used for recording the original value of the nodes
-// when they are resolved from the disk. The pre-value of the nodes will
-// be used to construct trie history in the future.
-//
-// Note tracer is not thread-safe, callers should be responsible for handling
-// the concurrency issues by themselves.
-type tracer struct {
- inserts map[string]struct{}
- deletes map[string]struct{}
- accessList map[string][]byte
-}
-
-// newTracer initializes the tracer for capturing trie changes.
-func newTracer() *tracer {
- return &tracer{
- inserts: make(map[string]struct{}),
- deletes: make(map[string]struct{}),
- accessList: make(map[string][]byte),
- }
-}
-
-// onRead tracks the newly loaded trie node and caches the rlp-encoded
-// blob internally. Don't change the value outside of function since
-// it's not deep-copied.
-func (t *tracer) onRead(path []byte, val []byte) {
- t.accessList[string(path)] = val
-}
-
-// onInsert tracks the newly inserted trie node. If it's already
-// in the deletion set (resurrected node), then just wipe it from
-// the deletion set as it's "untouched".
-func (t *tracer) onInsert(path []byte) {
- if _, present := t.deletes[string(path)]; present {
- delete(t.deletes, string(path))
- return
- }
- t.inserts[string(path)] = struct{}{}
-}
-
-// onDelete tracks the newly deleted trie node. If it's already
-// in the addition set, then just wipe it from the addition set
-// as it's untouched.
-func (t *tracer) onDelete(path []byte) {
- if _, present := t.inserts[string(path)]; present {
- delete(t.inserts, string(path))
- return
- }
- t.deletes[string(path)] = struct{}{}
-}
-
-// reset clears the content tracked by tracer.
-func (t *tracer) reset() {
- t.inserts = make(map[string]struct{})
- t.deletes = make(map[string]struct{})
- t.accessList = make(map[string][]byte)
-}
-
-// copy returns a deep copied tracer instance.
-func (t *tracer) copy() *tracer {
- var (
- inserts = make(map[string]struct{})
- deletes = make(map[string]struct{})
- accessList = make(map[string][]byte)
- )
- for path := range t.inserts {
- inserts[path] = struct{}{}
- }
- for path := range t.deletes {
- deletes[path] = struct{}{}
- }
- for path, blob := range t.accessList {
- accessList[path] = common.CopyBytes(blob)
- }
- return &tracer{
- inserts: inserts,
- deletes: deletes,
- accessList: accessList,
- }
-}
-
-// deletedNodes returns a list of node paths which are deleted from the trie.
-func (t *tracer) deletedNodes() []string {
- var paths []string
- for path := range t.deletes {
- // It's possible a few deleted nodes were embedded
- // in their parent before, the deletions can be no
- // effect by deleting nothing, filter them out.
- _, ok := t.accessList[path]
- if !ok {
- continue
- }
- paths = append(paths, path)
- }
- return paths
-}
diff --git a/trie/tracer_test.go b/trie/tracer_test.go
deleted file mode 100644
index f0c1cfc88d..0000000000
--- a/trie/tracer_test.go
+++ /dev/null
@@ -1,376 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "testing"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
-)
-
-var (
- tiny = []struct{ k, v string }{
- {"k1", "v1"},
- {"k2", "v2"},
- {"k3", "v3"},
- }
- nonAligned = []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- {"shaman", "horse"},
- {"doge", "coin"},
- {"dog", "puppy"},
- {"somethingveryoddindeedthis is", "myothernodedata"},
- }
- standard = []struct{ k, v string }{
- {string(randBytes(32)), "verb"},
- {string(randBytes(32)), "wookiedoo"},
- {string(randBytes(32)), "stallion"},
- {string(randBytes(32)), "horse"},
- {string(randBytes(32)), "coin"},
- {string(randBytes(32)), "puppy"},
- {string(randBytes(32)), "myothernodedata"},
- }
-)
-
-func TestTrieTracer(t *testing.T) {
- testTrieTracer(t, tiny)
- testTrieTracer(t, nonAligned)
- testTrieTracer(t, standard)
-}
-
-// Tests if the trie diffs are tracked correctly. Tracer should capture
-// all non-leaf dirty nodes, no matter the node is embedded or not.
-func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie := NewEmpty(db)
-
- // Determine all new nodes are tracked
- for _, val := range vals {
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- }
- insertSet := copySet(trie.tracer.inserts) // copy before commit
- deleteSet := copySet(trie.tracer.deletes) // copy before commit
- root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
-
- seen := setKeys(iterNodes(db, root))
- if !compareSet(insertSet, seen) {
- t.Fatal("Unexpected insertion set")
- }
- if !compareSet(deleteSet, nil) {
- t.Fatal("Unexpected deletion set")
- }
-
- // Determine all deletions are tracked
- trie, _ = New(TrieID(root), db)
- for _, val := range vals {
- trie.MustDelete([]byte(val.k))
- }
- insertSet, deleteSet = copySet(trie.tracer.inserts), copySet(trie.tracer.deletes)
- if !compareSet(insertSet, nil) {
- t.Fatal("Unexpected insertion set")
- }
- if !compareSet(deleteSet, seen) {
- t.Fatal("Unexpected deletion set")
- }
-}
-
-// Test that after inserting a new batch of nodes and deleting them immediately,
-// the trie tracer should be cleared normally as no operation happened.
-func TestTrieTracerNoop(t *testing.T) {
- testTrieTracerNoop(t, tiny)
- testTrieTracerNoop(t, nonAligned)
- testTrieTracerNoop(t, standard)
-}
-
-func testTrieTracerNoop(t *testing.T, vals []struct{ k, v string }) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie := NewEmpty(db)
- for _, val := range vals {
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- }
- for _, val := range vals {
- trie.MustDelete([]byte(val.k))
- }
- if len(trie.tracer.inserts) != 0 {
- t.Fatal("Unexpected insertion set")
- }
- if len(trie.tracer.deletes) != 0 {
- t.Fatal("Unexpected deletion set")
- }
-}
-
-// Tests if the accessList is correctly tracked.
-func TestAccessList(t *testing.T) {
- testAccessList(t, tiny)
- testAccessList(t, nonAligned)
- testAccessList(t, standard)
-}
-
-func testAccessList(t *testing.T, vals []struct{ k, v string }) {
- var (
- db = newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie = NewEmpty(db)
- orig = trie.Copy()
- )
- // Create trie from scratch
- for _, val := range vals {
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- }
- root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
-
- trie, _ = New(TrieID(root), db)
- if err := verifyAccessList(orig, trie, nodes); err != nil {
- t.Fatalf("Invalid accessList %v", err)
- }
-
- // Update trie
- parent := root
- trie, _ = New(TrieID(root), db)
- orig = trie.Copy()
- for _, val := range vals {
- trie.MustUpdate([]byte(val.k), randBytes(32))
- }
- root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
-
- trie, _ = New(TrieID(root), db)
- if err := verifyAccessList(orig, trie, nodes); err != nil {
- t.Fatalf("Invalid accessList %v", err)
- }
-
- // Add more new nodes
- parent = root
- trie, _ = New(TrieID(root), db)
- orig = trie.Copy()
- var keys []string
- for i := 0; i < 30; i++ {
- key := randBytes(32)
- keys = append(keys, string(key))
- trie.MustUpdate(key, randBytes(32))
- }
- root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
-
- trie, _ = New(TrieID(root), db)
- if err := verifyAccessList(orig, trie, nodes); err != nil {
- t.Fatalf("Invalid accessList %v", err)
- }
-
- // Partial deletions
- parent = root
- trie, _ = New(TrieID(root), db)
- orig = trie.Copy()
- for _, key := range keys {
- trie.MustUpdate([]byte(key), nil)
- }
- root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
-
- trie, _ = New(TrieID(root), db)
- if err := verifyAccessList(orig, trie, nodes); err != nil {
- t.Fatalf("Invalid accessList %v", err)
- }
-
- // Delete all
- parent = root
- trie, _ = New(TrieID(root), db)
- orig = trie.Copy()
- for _, val := range vals {
- trie.MustUpdate([]byte(val.k), nil)
- }
- root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
-
- trie, _ = New(TrieID(root), db)
- if err := verifyAccessList(orig, trie, nodes); err != nil {
- t.Fatalf("Invalid accessList %v", err)
- }
-}
-
-// Tests origin values won't be tracked in Iterator or Prover
-func TestAccessListLeak(t *testing.T) {
- var (
- db = newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie = NewEmpty(db)
- )
- // Create trie from scratch
- for _, val := range standard {
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- }
- root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
-
- var cases = []struct {
- op func(tr *Trie)
- }{
- {
- func(tr *Trie) {
- it := tr.MustNodeIterator(nil)
- for it.Next(true) {
- }
- },
- },
- {
- func(tr *Trie) {
- it := NewIterator(tr.MustNodeIterator(nil))
- for it.Next() {
- }
- },
- },
- {
- func(tr *Trie) {
- for _, val := range standard {
- tr.Prove([]byte(val.k), rawdb.NewMemoryDatabase())
- }
- },
- },
- }
- for _, c := range cases {
- trie, _ = New(TrieID(root), db)
- n1 := len(trie.tracer.accessList)
- c.op(trie)
- n2 := len(trie.tracer.accessList)
-
- if n1 != n2 {
- t.Fatalf("AccessList is leaked, prev %d after %d", n1, n2)
- }
- }
-}
-
-// Tests whether the original tree node is correctly deleted after being embedded
-// in its parent due to the smaller size of the original tree node.
-func TestTinyTree(t *testing.T) {
- var (
- db = newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie = NewEmpty(db)
- )
- for _, val := range tiny {
- trie.MustUpdate([]byte(val.k), randBytes(32))
- }
- root, set, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(set))
-
- parent := root
- trie, _ = New(TrieID(root), db)
- orig := trie.Copy()
- for _, val := range tiny {
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- }
- root, set, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(set))
-
- trie, _ = New(TrieID(root), db)
- if err := verifyAccessList(orig, trie, set); err != nil {
- t.Fatalf("Invalid accessList %v", err)
- }
-}
-
-func compareSet(setA, setB map[string]struct{}) bool {
- if len(setA) != len(setB) {
- return false
- }
- for key := range setA {
- if _, ok := setB[key]; !ok {
- return false
- }
- }
- return true
-}
-
-func forNodes(tr *Trie) map[string][]byte {
- var (
- it = tr.MustNodeIterator(nil)
- nodes = make(map[string][]byte)
- )
- for it.Next(true) {
- if it.Leaf() {
- continue
- }
- nodes[string(it.Path())] = common.CopyBytes(it.NodeBlob())
- }
- return nodes
-}
-
-func iterNodes(db *testDb, root common.Hash) map[string][]byte {
- tr, _ := New(TrieID(root), db)
- return forNodes(tr)
-}
-
-func forHashedNodes(tr *Trie) map[string][]byte {
- var (
- it = tr.MustNodeIterator(nil)
- nodes = make(map[string][]byte)
- )
- for it.Next(true) {
- if it.Hash() == (common.Hash{}) {
- continue
- }
- nodes[string(it.Path())] = common.CopyBytes(it.NodeBlob())
- }
- return nodes
-}
-
-func diffTries(trieA, trieB *Trie) (map[string][]byte, map[string][]byte, map[string][]byte) {
- var (
- nodesA = forHashedNodes(trieA)
- nodesB = forHashedNodes(trieB)
- inA = make(map[string][]byte) // hashed nodes in trie a but not b
- inB = make(map[string][]byte) // hashed nodes in trie b but not a
- both = make(map[string][]byte) // hashed nodes in both tries but different value
- )
- for path, blobA := range nodesA {
- if blobB, ok := nodesB[path]; ok {
- if bytes.Equal(blobA, blobB) {
- continue
- }
- both[path] = blobA
- continue
- }
- inA[path] = blobA
- }
- for path, blobB := range nodesB {
- if _, ok := nodesA[path]; ok {
- continue
- }
- inB[path] = blobB
- }
- return inA, inB, both
-}
-
-func setKeys(set map[string][]byte) map[string]struct{} {
- keys := make(map[string]struct{})
- for k := range set {
- keys[k] = struct{}{}
- }
- return keys
-}
-
-func copySet(set map[string]struct{}) map[string]struct{} {
- copied := make(map[string]struct{})
- for k := range set {
- copied[k] = struct{}{}
- }
- return copied
-}
diff --git a/trie/trie.go b/trie/trie.go
deleted file mode 100644
index c3927cc517..0000000000
--- a/trie/trie.go
+++ /dev/null
@@ -1,683 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Package trie implements Merkle Patricia Tries.
-package trie
-
-import (
- "bytes"
- "errors"
- "fmt"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/triedb/database"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/log"
-)
-
-// Trie is a Merkle Patricia Trie. Use New to create a trie that sits on
-// top of a database. Whenever trie performs a commit operation, the generated
-// nodes will be gathered and returned in a set. Once the trie is committed,
-// it's not usable anymore. Callers have to re-create the trie with new root
-// based on the updated trie database.
-//
-// Trie is not safe for concurrent use.
-type Trie struct {
- root node
- owner common.Hash
-
- // Flag whether the commit operation is already performed. If so the
- // trie is not usable(latest states is invisible).
- committed bool
-
- // Keep track of the number leaves which have been inserted since the last
- // hashing operation. This number will not directly map to the number of
- // actually unhashed nodes.
- unhashed int
-
- // reader is the handler trie can retrieve nodes from.
- reader *trieReader
-
- // tracer is the tool to track the trie changes.
- // It will be reset after each commit operation.
- tracer *tracer
-}
-
-// newFlag returns the cache flag value for a newly created node.
-func (t *Trie) newFlag() nodeFlag {
- return nodeFlag{dirty: true}
-}
-
-// Copy returns a copy of Trie.
-func (t *Trie) Copy() *Trie {
- return &Trie{
- root: t.root,
- owner: t.owner,
- committed: t.committed,
- unhashed: t.unhashed,
- reader: t.reader,
- tracer: t.tracer.copy(),
- }
-}
-
-// New creates the trie instance with provided trie id and the read-only
-// database. The state specified by trie id must be available, otherwise
-// an error will be returned. The trie root specified by trie id can be
-// zero hash or the sha3 hash of an empty string, then trie is initially
-// empty, otherwise, the root node must be present in database or returns
-// a MissingNodeError if not.
-func New(id *ID, db database.Database) (*Trie, error) {
- reader, err := newTrieReader(id.StateRoot, id.Owner, db)
- if err != nil {
- return nil, err
- }
- trie := &Trie{
- owner: id.Owner,
- reader: reader,
- tracer: newTracer(),
- }
- if id.Root != (common.Hash{}) && id.Root != types.EmptyRootHash {
- rootnode, err := trie.resolveAndTrack(id.Root[:], nil)
- if err != nil {
- return nil, err
- }
- trie.root = rootnode
- }
- return trie, nil
-}
-
-// NewEmpty is a shortcut to create empty tree. It's mostly used in tests.
-func NewEmpty(db database.Database) *Trie {
- tr, _ := New(TrieID(types.EmptyRootHash), db)
- return tr
-}
-
-// MustNodeIterator is a wrapper of NodeIterator and will omit any encountered
-// error but just print out an error message.
-func (t *Trie) MustNodeIterator(start []byte) NodeIterator {
- it, err := t.NodeIterator(start)
- if err != nil {
- log.Error("Unhandled trie error in Trie.NodeIterator", "err", err)
- }
- return it
-}
-
-// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at
-// the key after the given start key.
-func (t *Trie) NodeIterator(start []byte) (NodeIterator, error) {
- // Short circuit if the trie is already committed and not usable.
- if t.committed {
- return nil, ErrCommitted
- }
- return newNodeIterator(t, start), nil
-}
-
-// MustGet is a wrapper of Get and will omit any encountered error but just
-// print out an error message.
-func (t *Trie) MustGet(key []byte) []byte {
- res, err := t.Get(key)
- if err != nil {
- log.Error("Unhandled trie error in Trie.Get", "err", err)
- }
- return res
-}
-
-// Get returns the value for key stored in the trie.
-// The value bytes must not be modified by the caller.
-//
-// If the requested node is not present in trie, no error will be returned.
-// If the trie is corrupted, a MissingNodeError is returned.
-func (t *Trie) Get(key []byte) ([]byte, error) {
- // Short circuit if the trie is already committed and not usable.
- if t.committed {
- return nil, ErrCommitted
- }
- value, newroot, didResolve, err := t.get(t.root, keybytesToHex(key), 0)
- if err == nil && didResolve {
- t.root = newroot
- }
- return value, err
-}
-
-func (t *Trie) get(origNode node, key []byte, pos int) (value []byte, newnode node, didResolve bool, err error) {
- switch n := (origNode).(type) {
- case nil:
- return nil, nil, false, nil
- case valueNode:
- return n, n, false, nil
- case *shortNode:
- if len(key)-pos < len(n.Key) || !bytes.Equal(n.Key, key[pos:pos+len(n.Key)]) {
- // key not found in trie
- return nil, n, false, nil
- }
- value, newnode, didResolve, err = t.get(n.Val, key, pos+len(n.Key))
- if err == nil && didResolve {
- n = n.copy()
- n.Val = newnode
- }
- return value, n, didResolve, err
- case *fullNode:
- value, newnode, didResolve, err = t.get(n.Children[key[pos]], key, pos+1)
- if err == nil && didResolve {
- n = n.copy()
- n.Children[key[pos]] = newnode
- }
- return value, n, didResolve, err
- case hashNode:
- child, err := t.resolveAndTrack(n, key[:pos])
- if err != nil {
- return nil, n, true, err
- }
- value, newnode, _, err := t.get(child, key, pos)
- return value, newnode, true, err
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", origNode, origNode))
- }
-}
-
-// MustGetNode is a wrapper of GetNode and will omit any encountered error but
-// just print out an error message.
-func (t *Trie) MustGetNode(path []byte) ([]byte, int) {
- item, resolved, err := t.GetNode(path)
- if err != nil {
- log.Error("Unhandled trie error in Trie.GetNode", "err", err)
- }
- return item, resolved
-}
-
-// GetNode retrieves a trie node by compact-encoded path. It is not possible
-// to use keybyte-encoding as the path might contain odd nibbles.
-//
-// If the requested node is not present in trie, no error will be returned.
-// If the trie is corrupted, a MissingNodeError is returned.
-func (t *Trie) GetNode(path []byte) ([]byte, int, error) {
- // Short circuit if the trie is already committed and not usable.
- if t.committed {
- return nil, 0, ErrCommitted
- }
- item, newroot, resolved, err := t.getNode(t.root, compactToHex(path), 0)
- if err != nil {
- return nil, resolved, err
- }
- if resolved > 0 {
- t.root = newroot
- }
- if item == nil {
- return nil, resolved, nil
- }
- return item, resolved, nil
-}
-
-func (t *Trie) getNode(origNode node, path []byte, pos int) (item []byte, newnode node, resolved int, err error) {
- // If non-existent path requested, abort
- if origNode == nil {
- return nil, nil, 0, nil
- }
- // If we reached the requested path, return the current node
- if pos >= len(path) {
- // Although we most probably have the original node expanded, encoding
- // that into consensus form can be nasty (needs to cascade down) and
- // time consuming. Instead, just pull the hash up from disk directly.
- var hash hashNode
- if node, ok := origNode.(hashNode); ok {
- hash = node
- } else {
- hash, _ = origNode.cache()
- }
- if hash == nil {
- return nil, origNode, 0, errors.New("non-consensus node")
- }
- blob, err := t.reader.node(path, common.BytesToHash(hash))
- return blob, origNode, 1, err
- }
- // Path still needs to be traversed, descend into children
- switch n := (origNode).(type) {
- case valueNode:
- // Path prematurely ended, abort
- return nil, nil, 0, nil
-
- case *shortNode:
- if len(path)-pos < len(n.Key) || !bytes.Equal(n.Key, path[pos:pos+len(n.Key)]) {
- // Path branches off from short node
- return nil, n, 0, nil
- }
- item, newnode, resolved, err = t.getNode(n.Val, path, pos+len(n.Key))
- if err == nil && resolved > 0 {
- n = n.copy()
- n.Val = newnode
- }
- return item, n, resolved, err
-
- case *fullNode:
- item, newnode, resolved, err = t.getNode(n.Children[path[pos]], path, pos+1)
- if err == nil && resolved > 0 {
- n = n.copy()
- n.Children[path[pos]] = newnode
- }
- return item, n, resolved, err
-
- case hashNode:
- child, err := t.resolveAndTrack(n, path[:pos])
- if err != nil {
- return nil, n, 1, err
- }
- item, newnode, resolved, err := t.getNode(child, path, pos)
- return item, newnode, resolved + 1, err
-
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", origNode, origNode))
- }
-}
-
-// MustUpdate is a wrapper of Update and will omit any encountered error but
-// just print out an error message.
-func (t *Trie) MustUpdate(key, value []byte) {
- if err := t.Update(key, value); err != nil {
- log.Error("Unhandled trie error in Trie.Update", "err", err)
- }
-}
-
-// Update associates key with value in the trie. Subsequent calls to
-// Get will return value. If value has length zero, any existing value
-// is deleted from the trie and calls to Get will return nil.
-//
-// The value bytes must not be modified by the caller while they are
-// stored in the trie.
-//
-// If the requested node is not present in trie, no error will be returned.
-// If the trie is corrupted, a MissingNodeError is returned.
-func (t *Trie) Update(key, value []byte) error {
- // Short circuit if the trie is already committed and not usable.
- if t.committed {
- return ErrCommitted
- }
- return t.update(key, value)
-}
-
-func (t *Trie) update(key, value []byte) error {
- t.unhashed++
- k := keybytesToHex(key)
- if len(value) != 0 {
- _, n, err := t.insert(t.root, nil, k, valueNode(value))
- if err != nil {
- return err
- }
- t.root = n
- } else {
- _, n, err := t.delete(t.root, nil, k)
- if err != nil {
- return err
- }
- t.root = n
- }
- return nil
-}
-
-func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error) {
- if len(key) == 0 {
- if v, ok := n.(valueNode); ok {
- return !bytes.Equal(v, value.(valueNode)), value, nil
- }
- return true, value, nil
- }
- switch n := n.(type) {
- case *shortNode:
- matchlen := prefixLen(key, n.Key)
- // If the whole key matches, keep this short node as is
- // and only update the value.
- if matchlen == len(n.Key) {
- dirty, nn, err := t.insert(n.Val, append(prefix, key[:matchlen]...), key[matchlen:], value)
- if !dirty || err != nil {
- return false, n, err
- }
- return true, &shortNode{n.Key, nn, t.newFlag()}, nil
- }
- // Otherwise branch out at the index where they differ.
- branch := &fullNode{flags: t.newFlag()}
- var err error
- _, branch.Children[n.Key[matchlen]], err = t.insert(nil, append(prefix, n.Key[:matchlen+1]...), n.Key[matchlen+1:], n.Val)
- if err != nil {
- return false, nil, err
- }
- _, branch.Children[key[matchlen]], err = t.insert(nil, append(prefix, key[:matchlen+1]...), key[matchlen+1:], value)
- if err != nil {
- return false, nil, err
- }
- // Replace this shortNode with the branch if it occurs at index 0.
- if matchlen == 0 {
- return true, branch, nil
- }
- // New branch node is created as a child of the original short node.
- // Track the newly inserted node in the tracer. The node identifier
- // passed is the path from the root node.
- t.tracer.onInsert(append(prefix, key[:matchlen]...))
-
- // Replace it with a short node leading up to the branch.
- return true, &shortNode{key[:matchlen], branch, t.newFlag()}, nil
-
- case *fullNode:
- dirty, nn, err := t.insert(n.Children[key[0]], append(prefix, key[0]), key[1:], value)
- if !dirty || err != nil {
- return false, n, err
- }
- n = n.copy()
- n.flags = t.newFlag()
- n.Children[key[0]] = nn
- return true, n, nil
-
- case nil:
- // New short node is created and track it in the tracer. The node identifier
- // passed is the path from the root node. Note the valueNode won't be tracked
- // since it's always embedded in its parent.
- t.tracer.onInsert(prefix)
-
- return true, &shortNode{key, value, t.newFlag()}, nil
-
- case hashNode:
- // We've hit a part of the trie that isn't loaded yet. Load
- // the node and insert into it. This leaves all child nodes on
- // the path to the value in the trie.
- rn, err := t.resolveAndTrack(n, prefix)
- if err != nil {
- return false, nil, err
- }
- dirty, nn, err := t.insert(rn, prefix, key, value)
- if !dirty || err != nil {
- return false, rn, err
- }
- return true, nn, nil
-
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", n, n))
- }
-}
-
-// MustDelete is a wrapper of Delete and will omit any encountered error but
-// just print out an error message.
-func (t *Trie) MustDelete(key []byte) {
- if err := t.Delete(key); err != nil {
- log.Error("Unhandled trie error in Trie.Delete", "err", err)
- }
-}
-
-// Delete removes any existing value for key from the trie.
-//
-// If the requested node is not present in trie, no error will be returned.
-// If the trie is corrupted, a MissingNodeError is returned.
-func (t *Trie) Delete(key []byte) error {
- // Short circuit if the trie is already committed and not usable.
- if t.committed {
- return ErrCommitted
- }
- t.unhashed++
- k := keybytesToHex(key)
- _, n, err := t.delete(t.root, nil, k)
- if err != nil {
- return err
- }
- t.root = n
- return nil
-}
-
-// delete returns the new root of the trie with key deleted.
-// It reduces the trie to minimal form by simplifying
-// nodes on the way up after deleting recursively.
-func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
- switch n := n.(type) {
- case *shortNode:
- matchlen := prefixLen(key, n.Key)
- if matchlen < len(n.Key) {
- return false, n, nil // don't replace n on mismatch
- }
- if matchlen == len(key) {
- // The matched short node is deleted entirely and track
- // it in the deletion set. The same the valueNode doesn't
- // need to be tracked at all since it's always embedded.
- t.tracer.onDelete(prefix)
-
- return true, nil, nil // remove n entirely for whole matches
- }
- // The key is longer than n.Key. Remove the remaining suffix
- // from the subtrie. Child can never be nil here since the
- // subtrie must contain at least two other values with keys
- // longer than n.Key.
- dirty, child, err := t.delete(n.Val, append(prefix, key[:len(n.Key)]...), key[len(n.Key):])
- if !dirty || err != nil {
- return false, n, err
- }
- switch child := child.(type) {
- case *shortNode:
- // The child shortNode is merged into its parent, track
- // is deleted as well.
- t.tracer.onDelete(append(prefix, n.Key...))
-
- // Deleting from the subtrie reduced it to another
- // short node. Merge the nodes to avoid creating a
- // shortNode{..., shortNode{...}}. Use concat (which
- // always creates a new slice) instead of append to
- // avoid modifying n.Key since it might be shared with
- // other nodes.
- return true, &shortNode{concat(n.Key, child.Key...), child.Val, t.newFlag()}, nil
- default:
- return true, &shortNode{n.Key, child, t.newFlag()}, nil
- }
-
- case *fullNode:
- dirty, nn, err := t.delete(n.Children[key[0]], append(prefix, key[0]), key[1:])
- if !dirty || err != nil {
- return false, n, err
- }
- n = n.copy()
- n.flags = t.newFlag()
- n.Children[key[0]] = nn
-
- // Because n is a full node, it must've contained at least two children
- // before the delete operation. If the new child value is non-nil, n still
- // has at least two children after the deletion, and cannot be reduced to
- // a short node.
- if nn != nil {
- return true, n, nil
- }
- // Reduction:
- // Check how many non-nil entries are left after deleting and
- // reduce the full node to a short node if only one entry is
- // left. Since n must've contained at least two children
- // before deletion (otherwise it would not be a full node) n
- // can never be reduced to nil.
- //
- // When the loop is done, pos contains the index of the single
- // value that is left in n or -2 if n contains at least two
- // values.
- pos := -1
- for i, cld := range &n.Children {
- if cld != nil {
- if pos == -1 {
- pos = i
- } else {
- pos = -2
- break
- }
- }
- }
- if pos >= 0 {
- if pos != 16 {
- // If the remaining entry is a short node, it replaces
- // n and its key gets the missing nibble tacked to the
- // front. This avoids creating an invalid
- // shortNode{..., shortNode{...}}. Since the entry
- // might not be loaded yet, resolve it just for this
- // check.
- cnode, err := t.resolve(n.Children[pos], append(prefix, byte(pos)))
- if err != nil {
- return false, nil, err
- }
- if cnode, ok := cnode.(*shortNode); ok {
- // Replace the entire full node with the short node.
- // Mark the original short node as deleted since the
- // value is embedded into the parent now.
- t.tracer.onDelete(append(prefix, byte(pos)))
-
- k := append([]byte{byte(pos)}, cnode.Key...)
- return true, &shortNode{k, cnode.Val, t.newFlag()}, nil
- }
- }
- // Otherwise, n is replaced by a one-nibble short node
- // containing the child.
- return true, &shortNode{[]byte{byte(pos)}, n.Children[pos], t.newFlag()}, nil
- }
- // n still contains at least two values and cannot be reduced.
- return true, n, nil
-
- case valueNode:
- return true, nil, nil
-
- case nil:
- return false, nil, nil
-
- case hashNode:
- // We've hit a part of the trie that isn't loaded yet. Load
- // the node and delete from it. This leaves all child nodes on
- // the path to the value in the trie.
- rn, err := t.resolveAndTrack(n, prefix)
- if err != nil {
- return false, nil, err
- }
- dirty, nn, err := t.delete(rn, prefix, key)
- if !dirty || err != nil {
- return false, rn, err
- }
- return true, nn, nil
-
- default:
- panic(fmt.Sprintf("%T: invalid node: %v (%v)", n, n, key))
- }
-}
-
-func concat(s1 []byte, s2 ...byte) []byte {
- r := make([]byte, len(s1)+len(s2))
- copy(r, s1)
- copy(r[len(s1):], s2)
- return r
-}
-
-func (t *Trie) resolve(n node, prefix []byte) (node, error) {
- if n, ok := n.(hashNode); ok {
- return t.resolveAndTrack(n, prefix)
- }
- return n, nil
-}
-
-// resolveAndTrack loads node from the underlying store with the given node hash
-// and path prefix and also tracks the loaded node blob in tracer treated as the
-// node's original value. The rlp-encoded blob is preferred to be loaded from
-// database because it's easy to decode node while complex to encode node to blob.
-func (t *Trie) resolveAndTrack(n hashNode, prefix []byte) (node, error) {
- blob, err := t.reader.node(prefix, common.BytesToHash(n))
- if err != nil {
- return nil, err
- }
- t.tracer.onRead(prefix, blob)
- return mustDecodeNode(n, blob), nil
-}
-
-// Hash returns the root hash of the trie. It does not write to the
-// database and can be used even if the trie doesn't have one.
-func (t *Trie) Hash() common.Hash {
- hash, cached := t.hashRoot()
- t.root = cached
- return common.BytesToHash(hash.(hashNode))
-}
-
-// Commit collects all dirty nodes in the trie and replaces them with the
-// corresponding node hash. All collected nodes (including dirty leaves if
-// collectLeaf is true) will be encapsulated into a nodeset for return.
-// The returned nodeset can be nil if the trie is clean (nothing to commit).
-// Once the trie is committed, it's not usable anymore. A new trie must
-// be created with new root and updated trie database for following usage
-func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
- defer t.tracer.reset()
- defer func() {
- t.committed = true
- }()
- // Trie is empty and can be classified into two types of situations:
- // (a) The trie was empty and no update happens => return nil
- // (b) The trie was non-empty and all nodes are dropped => return
- // the node set includes all deleted nodes
- if t.root == nil {
- paths := t.tracer.deletedNodes()
- if len(paths) == 0 {
- return types.EmptyRootHash, nil, nil // case (a)
- }
- nodes := trienode.NewNodeSet(t.owner)
- for _, path := range paths {
- nodes.AddNode([]byte(path), trienode.NewDeleted())
- }
- return types.EmptyRootHash, nodes, nil // case (b)
- }
- // Derive the hash for all dirty nodes first. We hold the assumption
- // in the following procedure that all nodes are hashed.
- rootHash := t.Hash()
-
- // Do a quick check if we really need to commit. This can happen e.g.
- // if we load a trie for reading storage values, but don't write to it.
- if hashedNode, dirty := t.root.cache(); !dirty {
- // Replace the root node with the origin hash in order to
- // ensure all resolved nodes are dropped after the commit.
- t.root = hashedNode
- return rootHash, nil, nil
- }
- nodes := trienode.NewNodeSet(t.owner)
- for _, path := range t.tracer.deletedNodes() {
- nodes.AddNode([]byte(path), trienode.NewDeleted())
- }
- t.root = newCommitter(nodes, t.tracer, collectLeaf).Commit(t.root)
- return rootHash, nodes, nil
-}
-
-// hashRoot calculates the root hash of the given trie
-func (t *Trie) hashRoot() (node, node) {
- if t.root == nil {
- return hashNode(types.EmptyRootHash.Bytes()), nil
- }
- // If the number of changes is below 100, we let one thread handle it
- h := newHasher(t.unhashed >= 100)
- defer func() {
- returnHasherToPool(h)
- t.unhashed = 0
- }()
- hashed, cached := h.hash(t.root, true)
- return hashed, cached
-}
-
-// Reset drops the referenced root node and cleans all internal state.
-func (t *Trie) Reset() {
- t.root = nil
- t.owner = common.Hash{}
- t.unhashed = 0
- t.tracer.reset()
- t.committed = false
-}
diff --git a/trie/trie_id.go b/trie/trie_id.go
deleted file mode 100644
index 2cab016e4d..0000000000
--- a/trie/trie_id.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// (c) 2023, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see
-
-package trie
-
-import "github.com/ava-labs/libevm/common"
-
-// ID is the identifier for uniquely identifying a trie.
-type ID struct {
- StateRoot common.Hash // The root of the corresponding state(block.root)
- Owner common.Hash // The contract address hash which the trie belongs to
- Root common.Hash // The root hash of trie
-}
-
-// StateTrieID constructs an identifier for state trie with the provided state root.
-func StateTrieID(root common.Hash) *ID {
- return &ID{
- StateRoot: root,
- Owner: common.Hash{},
- Root: root,
- }
-}
-
-// StorageTrieID constructs an identifier for storage trie which belongs to a certain
-// state and contract specified by the stateRoot and owner.
-func StorageTrieID(stateRoot common.Hash, owner common.Hash, root common.Hash) *ID {
- return &ID{
- StateRoot: stateRoot,
- Owner: owner,
- Root: root,
- }
-}
-
-// TrieID constructs an identifier for a standard trie(not a second-layer trie)
-// with provided root. It's mostly used in tests and some other tries like CHT trie.
-func TrieID(root common.Hash) *ID {
- return &ID{
- StateRoot: root,
- Owner: common.Hash{},
- Root: root,
- }
-}
diff --git a/trie/trie_reader.go b/trie/trie_reader.go
deleted file mode 100644
index 1341b9fe33..0000000000
--- a/trie/trie_reader.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// (c) 2023, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/triestate"
- "github.com/ava-labs/coreth/triedb/database"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/log"
-)
-
-// trieReader is a wrapper of the underlying node reader. It's not safe
-// for concurrent usage.
-type trieReader struct {
- owner common.Hash
- reader database.Reader
- banned map[string]struct{} // Marker to prevent node from being accessed, for tests
-}
-
-// newTrieReader initializes the trie reader with the given node reader.
-func newTrieReader(stateRoot, owner common.Hash, db database.Database) (*trieReader, error) {
- if stateRoot == (common.Hash{}) || stateRoot == types.EmptyRootHash {
- if stateRoot == (common.Hash{}) {
- log.Error("Zero state root hash!")
- }
- return &trieReader{owner: owner}, nil
- }
- reader, err := db.Reader(stateRoot)
- if err != nil {
- return nil, &MissingNodeError{Owner: owner, NodeHash: stateRoot, err: err}
- }
- return &trieReader{owner: owner, reader: reader}, nil
-}
-
-// newEmptyReader initializes the pure in-memory reader. All read operations
-// should be forbidden and returns the MissingNodeError.
-func newEmptyReader() *trieReader {
- return &trieReader{}
-}
-
-// node retrieves the rlp-encoded trie node with the provided trie node
-// information. An MissingNodeError will be returned in case the node is
-// not found or any error is encountered.
-func (r *trieReader) node(path []byte, hash common.Hash) ([]byte, error) {
- // Perform the logics in tests for preventing trie node access.
- if r.banned != nil {
- if _, ok := r.banned[string(path)]; ok {
- return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
- }
- }
- if r.reader == nil {
- return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
- }
- blob, err := r.reader.Node(r.owner, path, hash)
- if err != nil || len(blob) == 0 {
- return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path, err: err}
- }
- return blob, nil
-}
-
-// MerkleLoader implements triestate.TrieLoader for constructing tries.
-type MerkleLoader struct {
- db database.Database
-}
-
-// NewMerkleLoader creates the merkle trie loader.
-func NewMerkleLoader(db database.Database) *MerkleLoader {
- return &MerkleLoader{db: db}
-}
-
-// OpenTrie opens the main account trie.
-func (l *MerkleLoader) OpenTrie(root common.Hash) (triestate.Trie, error) {
- return New(TrieID(root), l.db)
-}
-
-// OpenStorageTrie opens the storage trie of an account.
-func (l *MerkleLoader) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (triestate.Trie, error) {
- return New(StorageTrieID(stateRoot, addrHash, root), l.db)
-}
diff --git a/trie/trie_test.go b/trie/trie_test.go
deleted file mode 100644
index ba62047709..0000000000
--- a/trie/trie_test.go
+++ /dev/null
@@ -1,1224 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
- "hash"
- "io"
- "math/rand"
- "reflect"
- "sort"
- "testing"
- "testing/quick"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/rlp"
- "github.com/davecgh/go-spew/spew"
- "github.com/holiman/uint256"
- "github.com/stretchr/testify/require"
- "golang.org/x/crypto/sha3"
-)
-
-func init() {
- spew.Config.Indent = " "
- spew.Config.DisableMethods = false
-}
-
-func TestEmptyTrie(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- res := trie.Hash()
- exp := types.EmptyRootHash
- if res != exp {
- t.Errorf("expected %x got %x", exp, res)
- }
-}
-
-func TestNull(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- key := make([]byte, 32)
- value := []byte("test")
- trie.MustUpdate(key, value)
- if !bytes.Equal(trie.MustGet(key), value) {
- t.Fatal("wrong value")
- }
-}
-
-func TestMissingRoot(t *testing.T) {
- testMissingRoot(t, rawdb.HashScheme)
- testMissingRoot(t, rawdb.PathScheme)
-}
-
-func testMissingRoot(t *testing.T, scheme string) {
- root := common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33")
- trie, err := New(TrieID(root), newTestDatabase(rawdb.NewMemoryDatabase(), scheme))
- if trie != nil {
- t.Error("New returned non-nil trie for invalid root")
- }
- if _, ok := err.(*MissingNodeError); !ok {
- t.Errorf("New returned wrong error: %v", err)
- }
-}
-
-func TestMissingNode(t *testing.T) {
- testMissingNode(t, false, rawdb.HashScheme)
- testMissingNode(t, false, rawdb.PathScheme)
- testMissingNode(t, true, rawdb.HashScheme)
- testMissingNode(t, true, rawdb.PathScheme)
-}
-
-func testMissingNode(t *testing.T, memonly bool, scheme string) {
- diskdb := rawdb.NewMemoryDatabase()
- triedb := newTestDatabase(diskdb, scheme)
-
- trie := NewEmpty(triedb)
- updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer")
- updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf")
- root, nodes, _ := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
-
- if !memonly {
- require.NoError(t, triedb.Commit(root))
- }
-
- trie, _ = New(TrieID(root), triedb)
- _, err := trie.Get([]byte("120000"))
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- trie, _ = New(TrieID(root), triedb)
- _, err = trie.Get([]byte("120099"))
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- trie, _ = New(TrieID(root), triedb)
- _, err = trie.Get([]byte("123456"))
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- trie, _ = New(TrieID(root), triedb)
- err = trie.Update([]byte("120099"), []byte("zxcvzxcvzxcvzxcvzxcvzxcvzxcvzxcv"))
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- trie, _ = New(TrieID(root), triedb)
- err = trie.Delete([]byte("123456"))
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
-
- var (
- path []byte
- hash = common.HexToHash("0xe1d943cc8f061a0c0b98162830b970395ac9315654824bf21b73b891365262f9")
- )
- for p, n := range nodes.Nodes {
- if n.Hash == hash {
- path = common.CopyBytes([]byte(p))
- break
- }
- }
- trie, _ = New(TrieID(root), triedb)
- if memonly {
- trie.reader.banned = map[string]struct{}{string(path): {}}
- } else {
- rawdb.DeleteTrieNode(diskdb, common.Hash{}, path, hash, scheme)
- }
-
- _, err = trie.Get([]byte("120000"))
- if _, ok := err.(*MissingNodeError); !ok {
- t.Errorf("Wrong error: %v", err)
- }
- _, err = trie.Get([]byte("120099"))
- if _, ok := err.(*MissingNodeError); !ok {
- t.Errorf("Wrong error: %v", err)
- }
- _, err = trie.Get([]byte("123456"))
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- err = trie.Update([]byte("120099"), []byte("zxcv"))
- if _, ok := err.(*MissingNodeError); !ok {
- t.Errorf("Wrong error: %v", err)
- }
- err = trie.Delete([]byte("123456"))
- if _, ok := err.(*MissingNodeError); !ok {
- t.Errorf("Wrong error: %v", err)
- }
-}
-
-func TestInsert(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
-
- updateString(trie, "doe", "reindeer")
- updateString(trie, "dog", "puppy")
- updateString(trie, "dogglesworth", "cat")
-
- exp := common.HexToHash("8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3")
- root := trie.Hash()
- if root != exp {
- t.Errorf("case 1: exp %x got %x", exp, root)
- }
-
- trie = NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
-
- exp = common.HexToHash("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab")
- root, _, _ = trie.Commit(false)
- if root != exp {
- t.Errorf("case 2: exp %x got %x", exp, root)
- }
-}
-
-func TestGet(t *testing.T) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie := NewEmpty(db)
- updateString(trie, "doe", "reindeer")
- updateString(trie, "dog", "puppy")
- updateString(trie, "dogglesworth", "cat")
-
- for i := 0; i < 2; i++ {
- res := getString(trie, "dog")
- if !bytes.Equal(res, []byte("puppy")) {
- t.Errorf("expected puppy got %x", res)
- }
- unknown := getString(trie, "unknown")
- if unknown != nil {
- t.Errorf("expected nil got %x", unknown)
- }
- if i == 1 {
- return
- }
- root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- trie, _ = New(TrieID(root), db)
- }
-}
-
-func TestDelete(t *testing.T) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie := NewEmpty(db)
- vals := []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- {"shaman", "horse"},
- {"doge", "coin"},
- {"ether", ""},
- {"dog", "puppy"},
- {"shaman", ""},
- }
- for _, val := range vals {
- if val.v != "" {
- updateString(trie, val.k, val.v)
- } else {
- deleteString(trie, val.k)
- }
- }
-
- hash := trie.Hash()
- exp := common.HexToHash("5991bb8c6514148a29db676a14ac506cd2cd5775ace63c30a4fe457715e9ac84")
- if hash != exp {
- t.Errorf("expected %x got %x", exp, hash)
- }
-}
-
-func TestEmptyValues(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
-
- vals := []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- {"shaman", "horse"},
- {"doge", "coin"},
- {"ether", ""},
- {"dog", "puppy"},
- {"shaman", ""},
- }
- for _, val := range vals {
- updateString(trie, val.k, val.v)
- }
-
- hash := trie.Hash()
- exp := common.HexToHash("5991bb8c6514148a29db676a14ac506cd2cd5775ace63c30a4fe457715e9ac84")
- if hash != exp {
- t.Errorf("expected %x got %x", exp, hash)
- }
-}
-
-func TestReplication(t *testing.T) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie := NewEmpty(db)
- vals := []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- {"shaman", "horse"},
- {"doge", "coin"},
- {"dog", "puppy"},
- {"somethingveryoddindeedthis is", "myothernodedata"},
- }
- for _, val := range vals {
- updateString(trie, val.k, val.v)
- }
- root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
-
- // create a new trie on top of the database and check that lookups work.
- trie2, err := New(TrieID(root), db)
- if err != nil {
- t.Fatalf("can't recreate trie at %x: %v", root, err)
- }
- for _, kv := range vals {
- if string(getString(trie2, kv.k)) != kv.v {
- t.Errorf("trie2 doesn't have %q => %q", kv.k, kv.v)
- }
- }
- hash, nodes, _ := trie2.Commit(false)
- if hash != root {
- t.Errorf("root failure. expected %x got %x", root, hash)
- }
-
- // recreate the trie after commit
- if nodes != nil {
- db.Update(hash, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- }
- trie2, err = New(TrieID(hash), db)
- if err != nil {
- t.Fatalf("can't recreate trie at %x: %v", hash, err)
- }
- // perform some insertions on the new trie.
- vals2 := []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- // {"shaman", "horse"},
- // {"doge", "coin"},
- // {"ether", ""},
- // {"dog", "puppy"},
- // {"somethingveryoddindeedthis is", "myothernodedata"},
- // {"shaman", ""},
- }
- for _, val := range vals2 {
- updateString(trie2, val.k, val.v)
- }
- if trie2.Hash() != hash {
- t.Errorf("root failure. expected %x got %x", hash, hash)
- }
-}
-
-func TestLargeValue(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- trie.MustUpdate([]byte("key1"), []byte{99, 99, 99, 99})
- trie.MustUpdate([]byte("key2"), bytes.Repeat([]byte{1}, 32))
- trie.Hash()
-}
-
-// TestRandomCases tests some cases that were found via random fuzzing
-func TestRandomCases(t *testing.T) {
- var rt = []randTestStep{
- {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 0
- {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 1
- {op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000002")}, // step 2
- {op: 2, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("")}, // step 3
- {op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 4
- {op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 5
- {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 6
- {op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 7
- {op: 0, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("0000000000000008")}, // step 8
- {op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000009")}, // step 9
- {op: 2, key: common.Hex2Bytes("fd"), value: common.Hex2Bytes("")}, // step 10
- {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 11
- {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 12
- {op: 0, key: common.Hex2Bytes("fd"), value: common.Hex2Bytes("000000000000000d")}, // step 13
- {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 14
- {op: 1, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("")}, // step 15
- {op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 16
- {op: 0, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("0000000000000011")}, // step 17
- {op: 5, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 18
- {op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 19
- {op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000014")}, // step 20
- {op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000015")}, // step 21
- {op: 0, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("0000000000000016")}, // step 22
- {op: 5, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 23
- {op: 1, key: common.Hex2Bytes("980c393656413a15c8da01978ed9f89feb80b502f58f2d640e3a2f5f7a99a7018f1b573befd92053ac6f78fca4a87268"), value: common.Hex2Bytes("")}, // step 24
- {op: 1, key: common.Hex2Bytes("fd"), value: common.Hex2Bytes("")}, // step 25
- }
- if err := runRandTest(rt); err != nil {
- t.Fatal(err)
- }
-}
-
-// randTest performs random trie operations.
-// Instances of this test are created by Generate.
-type randTest []randTestStep
-
-// compile-time interface check
-var _ quick.Generator = (randTest)(nil)
-
-type randTestStep struct {
- op int
- key []byte // for opUpdate, opDelete, opGet
- value []byte // for opUpdate
- err error // for debugging
-}
-
-const (
- opUpdate = iota
- opDelete
- opGet
- opHash
- opCommit
- opItercheckhash
- opNodeDiff
- opProve
- opMax // boundary value, not an actual op
-)
-
-func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
- var finishedFn = func() bool {
- size--
- return size == 0
- }
- return reflect.ValueOf(generateSteps(finishedFn, r))
-}
-
-func generateSteps(finished func() bool, r io.Reader) randTest {
- var allKeys [][]byte
- var one = []byte{0}
- genKey := func() []byte {
- r.Read(one)
- if len(allKeys) < 2 || one[0]%100 > 90 {
- // new key
- size := one[0] % 50
- key := make([]byte, size)
- r.Read(key)
- allKeys = append(allKeys, key)
- return key
- }
- // use existing key
- idx := int(one[0]) % len(allKeys)
- return allKeys[idx]
- }
- var steps randTest
- for !finished() {
- r.Read(one)
- step := randTestStep{op: int(one[0]) % opMax}
- switch step.op {
- case opUpdate:
- step.key = genKey()
- step.value = make([]byte, 8)
- binary.BigEndian.PutUint64(step.value, uint64(len(steps)))
- case opGet, opDelete, opProve:
- step.key = genKey()
- }
- steps = append(steps, step)
- }
- return steps
-}
-
-func verifyAccessList(old *Trie, new *Trie, set *trienode.NodeSet) error {
- deletes, inserts, updates := diffTries(old, new)
-
- // Check insertion set
- for path := range inserts {
- n, ok := set.Nodes[path]
- if !ok || n.IsDeleted() {
- return errors.New("expect new node")
- }
- //if len(n.Prev) > 0 {
- // return errors.New("unexpected origin value")
- //}
- }
- // Check deletion set
- for path := range deletes {
- n, ok := set.Nodes[path]
- if !ok || !n.IsDeleted() {
- return errors.New("expect deleted node")
- }
- //if len(n.Prev) == 0 {
- // return errors.New("expect origin value")
- //}
- //if !bytes.Equal(n.Prev, blob) {
- // return errors.New("invalid origin value")
- //}
- }
- // Check update set
- for path := range updates {
- n, ok := set.Nodes[path]
- if !ok || n.IsDeleted() {
- return errors.New("expect updated node")
- }
- //if len(n.Prev) == 0 {
- // return errors.New("expect origin value")
- //}
- //if !bytes.Equal(n.Prev, blob) {
- // return errors.New("invalid origin value")
- //}
- }
- return nil
-}
-
-// runRandTestBool coerces error to boolean, for use in quick.Check
-func runRandTestBool(rt randTest) bool {
- return runRandTest(rt) == nil
-}
-
-func runRandTest(rt randTest) error {
- var scheme = rawdb.HashScheme
- if rand.Intn(2) == 0 {
- scheme = rawdb.PathScheme
- }
- var (
- origin = types.EmptyRootHash
- triedb = newTestDatabase(rawdb.NewMemoryDatabase(), scheme)
- tr = NewEmpty(triedb)
- values = make(map[string]string) // tracks content of the trie
- origTrie = NewEmpty(triedb)
- )
- for i, step := range rt {
- // fmt.Printf("{op: %d, key: common.Hex2Bytes(\"%x\"), value: common.Hex2Bytes(\"%x\")}, // step %d\n",
- // step.op, step.key, step.value, i)
- switch step.op {
- case opUpdate:
- tr.MustUpdate(step.key, step.value)
- values[string(step.key)] = string(step.value)
- case opDelete:
- tr.MustDelete(step.key)
- delete(values, string(step.key))
- case opGet:
- v := tr.MustGet(step.key)
- want := values[string(step.key)]
- if string(v) != want {
- rt[i].err = fmt.Errorf("mismatch for key %#x, got %#x want %#x", step.key, v, want)
- }
- case opProve:
- hash := tr.Hash()
- if hash == types.EmptyRootHash {
- continue
- }
- proofDb := rawdb.NewMemoryDatabase()
- err := tr.Prove(step.key, proofDb)
- if err != nil {
- rt[i].err = fmt.Errorf("failed for proving key %#x, %v", step.key, err)
- }
- _, err = VerifyProof(hash, step.key, proofDb)
- if err != nil {
- rt[i].err = fmt.Errorf("failed for verifying key %#x, %v", step.key, err)
- }
- case opHash:
- tr.Hash()
- case opCommit:
- root, nodes, _ := tr.Commit(true)
- if nodes != nil {
- triedb.Update(root, origin, trienode.NewWithNodeSet(nodes))
- }
- newtr, err := New(TrieID(root), triedb)
- if err != nil {
- rt[i].err = err
- return err
- }
- if nodes != nil {
- if err := verifyAccessList(origTrie, newtr, nodes); err != nil {
- rt[i].err = err
- return err
- }
- }
- tr = newtr
- origTrie = tr.Copy()
- origin = root
- case opItercheckhash:
- checktr := NewEmpty(triedb)
- it := NewIterator(tr.MustNodeIterator(nil))
- for it.Next() {
- checktr.MustUpdate(it.Key, it.Value)
- }
- if tr.Hash() != checktr.Hash() {
- rt[i].err = fmt.Errorf("hash mismatch in opItercheckhash")
- }
- case opNodeDiff:
- var (
- origIter = origTrie.MustNodeIterator(nil)
- curIter = tr.MustNodeIterator(nil)
- origSeen = make(map[string]struct{})
- curSeen = make(map[string]struct{})
- )
- for origIter.Next(true) {
- if origIter.Leaf() {
- continue
- }
- origSeen[string(origIter.Path())] = struct{}{}
- }
- for curIter.Next(true) {
- if curIter.Leaf() {
- continue
- }
- curSeen[string(curIter.Path())] = struct{}{}
- }
- var (
- insertExp = make(map[string]struct{})
- deleteExp = make(map[string]struct{})
- )
- for path := range curSeen {
- _, present := origSeen[path]
- if !present {
- insertExp[path] = struct{}{}
- }
- }
- for path := range origSeen {
- _, present := curSeen[path]
- if !present {
- deleteExp[path] = struct{}{}
- }
- }
- if len(insertExp) != len(tr.tracer.inserts) {
- rt[i].err = fmt.Errorf("insert set mismatch")
- }
- if len(deleteExp) != len(tr.tracer.deletes) {
- rt[i].err = fmt.Errorf("delete set mismatch")
- }
- for insert := range tr.tracer.inserts {
- if _, present := insertExp[insert]; !present {
- rt[i].err = fmt.Errorf("missing inserted node")
- }
- }
- for del := range tr.tracer.deletes {
- if _, present := deleteExp[del]; !present {
- rt[i].err = fmt.Errorf("missing deleted node")
- }
- }
- }
- // Abort the test on error.
- if rt[i].err != nil {
- return rt[i].err
- }
- }
- return nil
-}
-
-func TestRandom(t *testing.T) {
- if err := quick.Check(runRandTestBool, nil); err != nil {
- if cerr, ok := err.(*quick.CheckError); ok {
- t.Fatalf("random test iteration %d failed: %s", cerr.Count, spew.Sdump(cerr.In))
- }
- t.Fatal(err)
- }
-}
-
-func BenchmarkGet(b *testing.B) { benchGet(b) }
-func BenchmarkUpdateBE(b *testing.B) { benchUpdate(b, binary.BigEndian) }
-func BenchmarkUpdateLE(b *testing.B) { benchUpdate(b, binary.LittleEndian) }
-
-const benchElemCount = 20000
-
-func benchGet(b *testing.B) {
- triedb := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie := NewEmpty(triedb)
- k := make([]byte, 32)
- for i := 0; i < benchElemCount; i++ {
- binary.LittleEndian.PutUint64(k, uint64(i))
- v := make([]byte, 32)
- binary.LittleEndian.PutUint64(v, uint64(i))
- trie.MustUpdate(k, v)
- }
- binary.LittleEndian.PutUint64(k, benchElemCount/2)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- trie.MustGet(k)
- }
- b.StopTimer()
-}
-
-func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- k := make([]byte, 32)
- b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- v := make([]byte, 32)
- e.PutUint64(k, uint64(i))
- e.PutUint64(v, uint64(i))
- trie.MustUpdate(k, v)
- }
- return trie
-}
-
-// Benchmarks the trie hashing. Since the trie caches the result of any operation,
-// we cannot use b.N as the number of hashing rounds, since all rounds apart from
-// the first one will be NOOP. As such, we'll use b.N as the number of account to
-// insert into the trie before measuring the hashing.
-// BenchmarkHash-6 288680 4561 ns/op 682 B/op 9 allocs/op
-// BenchmarkHash-6 275095 4800 ns/op 685 B/op 9 allocs/op
-// pure hasher:
-// BenchmarkHash-6 319362 4230 ns/op 675 B/op 9 allocs/op
-// BenchmarkHash-6 257460 4674 ns/op 689 B/op 9 allocs/op
-// With hashing in-between and pure hasher:
-// BenchmarkHash-6 225417 7150 ns/op 982 B/op 12 allocs/op
-// BenchmarkHash-6 220378 6197 ns/op 983 B/op 12 allocs/op
-// same with old hasher
-// BenchmarkHash-6 229758 6437 ns/op 981 B/op 12 allocs/op
-// BenchmarkHash-6 212610 7137 ns/op 986 B/op 12 allocs/op
-func BenchmarkHash(b *testing.B) {
- // Create a realistic account trie to hash. We're first adding and hashing N
- // entries, then adding N more.
- addresses, accounts := makeAccounts(2 * b.N)
- // Insert the accounts into the trie and hash it
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- i := 0
- for ; i < len(addresses)/2; i++ {
- trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
- }
- trie.Hash()
- for ; i < len(addresses); i++ {
- trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
- }
- b.ResetTimer()
- b.ReportAllocs()
- //trie.hashRoot(nil, nil)
- trie.Hash()
-}
-
-// Benchmarks the trie Commit following a Hash. Since the trie caches the result of any operation,
-// we cannot use b.N as the number of hashing rounds, since all rounds apart from
-// the first one will be NOOP. As such, we'll use b.N as the number of account to
-// insert into the trie before measuring the hashing.
-func BenchmarkCommitAfterHash(b *testing.B) {
- b.Run("no-onleaf", func(b *testing.B) {
- benchmarkCommitAfterHash(b, false)
- })
- b.Run("with-onleaf", func(b *testing.B) {
- benchmarkCommitAfterHash(b, true)
- })
-}
-
-func benchmarkCommitAfterHash(b *testing.B, collectLeaf bool) {
- // Make the random benchmark deterministic
- addresses, accounts := makeAccounts(b.N)
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- for i := 0; i < len(addresses); i++ {
- trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
- }
- // Insert the accounts into the trie and hash it
- trie.Hash()
- b.ResetTimer()
- b.ReportAllocs()
- trie.Commit(collectLeaf)
-}
-
-func TestTinyTrie(t *testing.T) {
- // Create a realistic account trie to hash
- _, accounts := makeAccounts(5)
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3])
- if exp, root := common.HexToHash("dfb9311ba769a2bdb9d4126d0ae49046f9551063c738d10b9021343fb6550b3f"), trie.Hash(); exp != root {
- t.Errorf("1: got %x, exp %x", root, exp)
- }
- trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001338"), accounts[4])
- if exp, root := common.HexToHash("21d0f2f4c72fed985d1196993a784d36321a44085bbe60990cb65b7bc478f52b"), trie.Hash(); exp != root {
- t.Errorf("2: got %x, exp %x", root, exp)
- }
- trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001339"), accounts[4])
- if exp, root := common.HexToHash("e71f7f0bbcd0daf37bc03a3389408eced206e796ed6d76186387847e2193ac4e"), trie.Hash(); exp != root {
- t.Errorf("3: got %x, exp %x", root, exp)
- }
- checktr := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- it := NewIterator(trie.MustNodeIterator(nil))
- for it.Next() {
- checktr.MustUpdate(it.Key, it.Value)
- }
- if troot, itroot := trie.Hash(), checktr.Hash(); troot != itroot {
- t.Fatalf("hash mismatch in opItercheckhash, trie: %x, check: %x", troot, itroot)
- }
-}
-
-func TestCommitAfterHash(t *testing.T) {
- // Create a realistic account trie to hash
- addresses, accounts := makeAccounts(1000)
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- for i := 0; i < len(addresses); i++ {
- trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
- }
- // Insert the accounts into the trie and hash it
- trie.Hash()
- trie.Commit(false)
- root := trie.Hash()
- exp := common.HexToHash("6dcf62a0c1575866467426b55e3acab075312b38c6b112457c3cd23ab9b94fc1")
- if exp != root {
- t.Errorf("got %x, exp %x", root, exp)
- }
- root, _, _ = trie.Commit(false)
- if exp != root {
- t.Errorf("got %x, exp %x", root, exp)
- }
-}
-
-func makeAccounts(size int) (addresses [][20]byte, accounts [][]byte) {
- // Make the random benchmark deterministic
- random := rand.New(rand.NewSource(0))
- // Create a realistic account trie to hash
- addresses = make([][20]byte, size)
- for i := 0; i < len(addresses); i++ {
- data := make([]byte, 20)
- random.Read(data)
- copy(addresses[i][:], data)
- }
- accounts = make([][]byte, len(addresses))
- for i := 0; i < len(accounts); i++ {
- var (
- nonce = uint64(random.Int63())
- root = types.EmptyRootHash
- code = crypto.Keccak256(nil)
- )
- // The big.Rand function is not deterministic with regards to 64 vs 32 bit systems,
- // and will consume different amount of data from the rand source.
- //balance = new(big.Int).Rand(random, new(big.Int).Exp(common.Big2, common.Big256, nil))
- // Therefore, we instead just read via byte buffer
- numBytes := random.Uint32() % 33 // [0, 32] bytes
- balanceBytes := make([]byte, numBytes)
- random.Read(balanceBytes)
- balance := new(uint256.Int).SetBytes(balanceBytes)
- data, _ := rlp.EncodeToBytes(&types.StateAccount{Nonce: nonce, Balance: balance, Root: root, CodeHash: code})
- accounts[i] = data
- }
- return addresses, accounts
-}
-
-// spongeDb is a dummy db backend which accumulates writes in a sponge
-type spongeDb struct {
- sponge hash.Hash
- id string
- journal []string
- keys []string
- values map[string]string
-}
-
-func (s *spongeDb) Has(key []byte) (bool, error) { panic("implement me") }
-func (s *spongeDb) Get(key []byte) ([]byte, error) { return nil, errors.New("no such elem") }
-func (s *spongeDb) Delete(key []byte) error { panic("implement me") }
-func (s *spongeDb) NewBatch() ethdb.Batch { return &spongeBatch{s} }
-func (s *spongeDb) NewBatchWithSize(size int) ethdb.Batch { return &spongeBatch{s} }
-func (s *spongeDb) NewSnapshot() (ethdb.Snapshot, error) { panic("implement me") }
-func (s *spongeDb) Stat(property string) (string, error) { panic("implement me") }
-func (s *spongeDb) Compact(start []byte, limit []byte) error { panic("implement me") }
-func (s *spongeDb) Close() error { return nil }
-func (s *spongeDb) Put(key []byte, value []byte) error {
- var (
- keybrief = key
- valbrief = value
- )
- if len(keybrief) > 8 {
- keybrief = keybrief[:8]
- }
- if len(valbrief) > 8 {
- valbrief = valbrief[:8]
- }
- s.journal = append(s.journal, fmt.Sprintf("%v: PUT([%x...], [%d bytes] %x...)\n", s.id, keybrief, len(value), valbrief))
-
- if s.values == nil {
- s.sponge.Write(key)
- s.sponge.Write(value)
- } else {
- s.keys = append(s.keys, string(key))
- s.values[string(key)] = string(value)
- }
- return nil
-}
-func (s *spongeDb) NewIterator(prefix []byte, start []byte) ethdb.Iterator { panic("implement me") }
-
-func (s *spongeDb) Flush() {
- // Bottom-up, the longest path first
- sort.Sort(sort.Reverse(sort.StringSlice(s.keys)))
- for _, key := range s.keys {
- s.sponge.Write([]byte(key))
- s.sponge.Write([]byte(s.values[key]))
- }
-}
-
-// spongeBatch is a dummy batch which immediately writes to the underlying spongedb
-type spongeBatch struct {
- db *spongeDb
-}
-
-func (b *spongeBatch) Put(key, value []byte) error {
- b.db.Put(key, value)
- return nil
-}
-func (b *spongeBatch) Delete(key []byte) error { panic("implement me") }
-func (b *spongeBatch) ValueSize() int { return 100 }
-func (b *spongeBatch) Write() error { return nil }
-func (b *spongeBatch) Reset() {}
-func (b *spongeBatch) Replay(w ethdb.KeyValueWriter) error { return nil }
-
-// TestCommitSequence tests that the trie.Commit operation writes the elements of the trie
-// in the expected order.
-// The test data was based on the 'master' code, and is basically random. It can be used
-// to check whether changes to the trie modifies the write order or data in any way.
-func TestCommitSequence(t *testing.T) {
- for i, tc := range []struct {
- count int
- expWriteSeqHash []byte
- }{
- {20, common.FromHex("2e4ec8744409f17d6a3fe1540282e3ba0cf434b3a11974a2a033e3caa476a83c")},
- {200, common.FromHex("f7abb2c93e89e7e68696d855fa8982cb454190dcd7e4e7f4c7d60fd5c9f465f3")},
- {2000, common.FromHex("226f735a06e25b5306216d52ce0652ba9df17341bb0d1ae8be5484d691e8fe5c")},
- } {
- addresses, accounts := makeAccounts(tc.count)
- // This spongeDb is used to check the sequence of disk-db-writes
- s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme)
- trie := NewEmpty(db)
- // Fill the trie with elements
- for i := 0; i < tc.count; i++ {
- trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
- }
- // Flush trie -> database
- root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- // Flush memdb -> disk (sponge)
- db.Commit(root)
- if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
- t.Errorf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
- }
- }
-}
-
-// TestCommitSequenceRandomBlobs is identical to TestCommitSequence
-// but uses random blobs instead of 'accounts'
-func TestCommitSequenceRandomBlobs(t *testing.T) {
- for i, tc := range []struct {
- count int
- expWriteSeqHash []byte
- }{
- {20, common.FromHex("8016650c7a50cf88485fd06cde52d634a89711051107f00d21fae98234f2f13d")},
- {200, common.FromHex("dde92ca9812e068e6982d04b40846dc65a61a9fd4996fc0f55f2fde172a8e13c")},
- {2000, common.FromHex("ab553a7f9aff82e3929c382908e30ef7dd17a332933e92ba3fe873fc661ef382")},
- } {
- prng := rand.New(rand.NewSource(int64(i)))
- // This spongeDb is used to check the sequence of disk-db-writes
- s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme)
- trie := NewEmpty(db)
- // Fill the trie with elements
- for i := 0; i < tc.count; i++ {
- key := make([]byte, 32)
- var val []byte
- // 50% short elements, 50% large elements
- if prng.Intn(2) == 0 {
- val = make([]byte, 1+prng.Intn(32))
- } else {
- val = make([]byte, 1+prng.Intn(4096))
- }
- prng.Read(key)
- prng.Read(val)
- trie.MustUpdate(key, val)
- }
- // Flush trie -> database
- root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- // Flush memdb -> disk (sponge)
- db.Commit(root)
- if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
- t.Fatalf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
- }
- }
-}
-
-func TestCommitSequenceStackTrie(t *testing.T) {
- for count := 1; count < 200; count++ {
- prng := rand.New(rand.NewSource(int64(count)))
- // This spongeDb is used to check the sequence of disk-db-writes
- s := &spongeDb{
- sponge: sha3.NewLegacyKeccak256(),
- id: "a",
- values: make(map[string]string),
- }
- db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme)
- trie := NewEmpty(db)
-
- // Another sponge is used for the stacktrie commits
- stackTrieSponge := &spongeDb{
- sponge: sha3.NewLegacyKeccak256(),
- id: "b",
- values: make(map[string]string),
- }
- options := NewStackTrieOptions()
- options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
- rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme())
- })
- stTrie := NewStackTrie(options)
-
- // Fill the trie with elements
- for i := 0; i < count; i++ {
- // For the stack trie, we need to do inserts in proper order
- key := make([]byte, 32)
- binary.BigEndian.PutUint64(key, uint64(i))
- var val []byte
- // 50% short elements, 50% large elements
- if prng.Intn(2) == 0 {
- val = make([]byte, 1+prng.Intn(32))
- } else {
- val = make([]byte, 1+prng.Intn(1024))
- }
- prng.Read(val)
- trie.Update(key, val)
- stTrie.Update(key, val)
- }
- // Flush trie -> database
- root, nodes, _ := trie.Commit(false)
- // Flush memdb -> disk (sponge)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- db.Commit(root)
- s.Flush()
-
- // And flush stacktrie -> disk
- stRoot := stTrie.Commit()
- if stRoot != root {
- t.Fatalf("root wrong, got %x exp %x", stRoot, root)
- }
- stackTrieSponge.Flush()
- if got, exp := stackTrieSponge.sponge.Sum(nil), s.sponge.Sum(nil); !bytes.Equal(got, exp) {
- // Show the journal
- t.Logf("Expected:")
- for i, v := range s.journal {
- t.Logf("op %d: %v", i, v)
- }
- t.Logf("Stacktrie:")
- for i, v := range stackTrieSponge.journal {
- t.Logf("op %d: %v", i, v)
- }
- t.Fatalf("test %d, disk write sequence wrong:\ngot %x exp %x\n", count, got, exp)
- }
- }
-}
-
-// TestCommitSequenceSmallRoot tests that a trie which is essentially only a
-// small (<32 byte) shortnode with an included value is properly committed to a
-// database.
-// This case might not matter, since in practice, all keys are 32 bytes, which means
-// that even a small trie which contains a leaf will have an extension making it
-// not fit into 32 bytes, rlp-encoded. However, it's still the correct thing to do.
-func TestCommitSequenceSmallRoot(t *testing.T) {
- s := &spongeDb{
- sponge: sha3.NewLegacyKeccak256(),
- id: "a",
- values: make(map[string]string),
- }
- db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme)
- trie := NewEmpty(db)
-
- // Another sponge is used for the stacktrie commits
- stackTrieSponge := &spongeDb{
- sponge: sha3.NewLegacyKeccak256(),
- id: "b",
- values: make(map[string]string),
- }
- options := NewStackTrieOptions()
- options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
- rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme())
- })
- stTrie := NewStackTrie(options)
-
- // Add a single small-element to the trie(s)
- key := make([]byte, 5)
- key[0] = 1
- trie.Update(key, []byte{0x1})
- stTrie.Update(key, []byte{0x1})
-
- // Flush trie -> database
- root, nodes, _ := trie.Commit(false)
- // Flush memdb -> disk (sponge)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- db.Commit(root)
-
- // And flush stacktrie -> disk
- stRoot := stTrie.Commit()
- if stRoot != root {
- t.Fatalf("root wrong, got %x exp %x", stRoot, root)
- }
- t.Logf("root: %x\n", stRoot)
-
- s.Flush()
- stackTrieSponge.Flush()
- if got, exp := stackTrieSponge.sponge.Sum(nil), s.sponge.Sum(nil); !bytes.Equal(got, exp) {
- t.Fatalf("test, disk write sequence wrong:\ngot %x exp %x\n", got, exp)
- }
-}
-
-// BenchmarkCommitAfterHashFixedSize benchmarks the Commit (after Hash) of a fixed number of updates to a trie.
-// This benchmark is meant to capture the difference on efficiency of small versus large changes. Typically,
-// storage tries are small (a couple of entries), whereas the full post-block account trie update is large (a couple
-// of thousand entries)
-func BenchmarkHashFixedSize(b *testing.B) {
- b.Run("10", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(20)
- for i := 0; i < b.N; i++ {
- benchmarkHashFixedSize(b, acc, add)
- }
- })
- b.Run("100", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(100)
- for i := 0; i < b.N; i++ {
- benchmarkHashFixedSize(b, acc, add)
- }
- })
-
- b.Run("1K", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(1000)
- for i := 0; i < b.N; i++ {
- benchmarkHashFixedSize(b, acc, add)
- }
- })
- b.Run("10K", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(10000)
- for i := 0; i < b.N; i++ {
- benchmarkHashFixedSize(b, acc, add)
- }
- })
- b.Run("100K", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(100000)
- for i := 0; i < b.N; i++ {
- benchmarkHashFixedSize(b, acc, add)
- }
- })
-}
-
-func benchmarkHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
- b.ReportAllocs()
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- for i := 0; i < len(addresses); i++ {
- trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
- }
- // Insert the accounts into the trie and hash it
- b.StartTimer()
- trie.Hash()
- b.StopTimer()
-}
-
-func BenchmarkCommitAfterHashFixedSize(b *testing.B) {
- b.Run("10", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(20)
- for i := 0; i < b.N; i++ {
- benchmarkCommitAfterHashFixedSize(b, acc, add)
- }
- })
- b.Run("100", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(100)
- for i := 0; i < b.N; i++ {
- benchmarkCommitAfterHashFixedSize(b, acc, add)
- }
- })
-
- b.Run("1K", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(1000)
- for i := 0; i < b.N; i++ {
- benchmarkCommitAfterHashFixedSize(b, acc, add)
- }
- })
- b.Run("10K", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(10000)
- for i := 0; i < b.N; i++ {
- benchmarkCommitAfterHashFixedSize(b, acc, add)
- }
- })
- b.Run("100K", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(100000)
- for i := 0; i < b.N; i++ {
- benchmarkCommitAfterHashFixedSize(b, acc, add)
- }
- })
-}
-
-func benchmarkCommitAfterHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
- b.ReportAllocs()
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- for i := 0; i < len(addresses); i++ {
- trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
- }
- // Insert the accounts into the trie and hash it
- trie.Hash()
- b.StartTimer()
- trie.Commit(false)
- b.StopTimer()
-}
-
-func getString(trie *Trie, k string) []byte {
- return trie.MustGet([]byte(k))
-}
-
-func updateString(trie *Trie, k, v string) {
- trie.MustUpdate([]byte(k), []byte(v))
-}
-
-func deleteString(trie *Trie, k string) {
- trie.MustDelete([]byte(k))
-}
-
-func TestDecodeNode(t *testing.T) {
- t.Parallel()
-
- var (
- hash = make([]byte, 20)
- elems = make([]byte, 20)
- )
- for i := 0; i < 5000000; i++ {
- prng.Read(hash)
- prng.Read(elems)
- decodeNode(hash, elems)
- }
-}
-
-func FuzzTrie(f *testing.F) {
- f.Fuzz(func(t *testing.T, data []byte) {
- var steps = 500
- var input = bytes.NewReader(data)
- var finishedFn = func() bool {
- steps--
- return steps < 0 || input.Len() == 0
- }
- if err := runRandTest(generateSteps(finishedFn, input)); err != nil {
- t.Fatal(err)
- }
- })
-}
diff --git a/trie/trienode/node.go b/trie/trienode/node.go
deleted file mode 100644
index 8bd0a18ba3..0000000000
--- a/trie/trienode/node.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2023 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see
-
-package trienode
-
-import (
- "fmt"
- "sort"
- "strings"
-
- "github.com/ava-labs/libevm/common"
-)
-
-// Node is a wrapper which contains the encoded blob of the trie node and its
-// node hash. It is general enough that can be used to represent trie node
-// corresponding to different trie implementations.
-type Node struct {
- Hash common.Hash // Node hash, empty for deleted node
- Blob []byte // Encoded node blob, nil for the deleted node
-}
-
-// Size returns the total memory size used by this node.
-func (n *Node) Size() int {
- return len(n.Blob) + common.HashLength
-}
-
-// IsDeleted returns the indicator if the node is marked as deleted.
-func (n *Node) IsDeleted() bool {
- return len(n.Blob) == 0
-}
-
-// New constructs a node with provided node information.
-func New(hash common.Hash, blob []byte) *Node {
- return &Node{Hash: hash, Blob: blob}
-}
-
-// NewDeleted constructs a node which is deleted.
-func NewDeleted() *Node { return New(common.Hash{}, nil) }
-
-// leaf represents a trie leaf node
-type leaf struct {
- Blob []byte // raw blob of leaf
- Parent common.Hash // the hash of parent node
-}
-
-// NodeSet contains a set of nodes collected during the commit operation.
-// Each node is keyed by path. It's not thread-safe to use.
-type NodeSet struct {
- Owner common.Hash
- Leaves []*leaf
- Nodes map[string]*Node
- updates int // the count of updated and inserted nodes
- deletes int // the count of deleted nodes
-}
-
-// NewNodeSet initializes a node set. The owner is zero for the account trie and
-// the owning account address hash for storage tries.
-func NewNodeSet(owner common.Hash) *NodeSet {
- return &NodeSet{
- Owner: owner,
- Nodes: make(map[string]*Node),
- }
-}
-
-// ForEachWithOrder iterates the nodes with the order from bottom to top,
-// right to left, nodes with the longest path will be iterated first.
-func (set *NodeSet) ForEachWithOrder(callback func(path string, n *Node)) {
- var paths []string
- for path := range set.Nodes {
- paths = append(paths, path)
- }
- // Bottom-up, the longest path first
- sort.Sort(sort.Reverse(sort.StringSlice(paths)))
- for _, path := range paths {
- callback(path, set.Nodes[path])
- }
-}
-
-// AddNode adds the provided node into set.
-func (set *NodeSet) AddNode(path []byte, n *Node) {
- if n.IsDeleted() {
- set.deletes += 1
- } else {
- set.updates += 1
- }
- set.Nodes[string(path)] = n
-}
-
-// Merge adds a set of nodes into the set.
-func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*Node) error {
- if set.Owner != owner {
- return fmt.Errorf("nodesets belong to different owner are not mergeable %x-%x", set.Owner, owner)
- }
- for path, node := range nodes {
- prev, ok := set.Nodes[path]
- if ok {
- // overwrite happens, revoke the counter
- if prev.IsDeleted() {
- set.deletes -= 1
- } else {
- set.updates -= 1
- }
- }
- set.AddNode([]byte(path), node)
- }
- return nil
-}
-
-// AddLeaf adds the provided leaf node into set. TODO(rjl493456442) how can
-// we get rid of it?
-func (set *NodeSet) AddLeaf(parent common.Hash, blob []byte) {
- set.Leaves = append(set.Leaves, &leaf{Blob: blob, Parent: parent})
-}
-
-// Size returns the number of dirty nodes in set.
-func (set *NodeSet) Size() (int, int) {
- return set.updates, set.deletes
-}
-
-// Hashes returns the hashes of all updated nodes. TODO(rjl493456442) how can
-// we get rid of it?
-func (set *NodeSet) Hashes() []common.Hash {
- var ret []common.Hash
- for _, node := range set.Nodes {
- ret = append(ret, node.Hash)
- }
- return ret
-}
-
-// Summary returns a string-representation of the NodeSet.
-func (set *NodeSet) Summary() string {
- var out = new(strings.Builder)
- fmt.Fprintf(out, "nodeset owner: %v\n", set.Owner)
- if set.Nodes != nil {
- for path, n := range set.Nodes {
- // Deletion
- if n.IsDeleted() {
- fmt.Fprintf(out, " [-]: %x\n", path)
- continue
- }
- // Insertion or update
- fmt.Fprintf(out, " [+/*]: %x -> %v \n", path, n.Hash)
- }
- }
- for _, n := range set.Leaves {
- fmt.Fprintf(out, "[leaf]: %v\n", n)
- }
- return out.String()
-}
-
-// MergedNodeSet represents a merged node set for a group of tries.
-type MergedNodeSet struct {
- Sets map[common.Hash]*NodeSet
-}
-
-// NewMergedNodeSet initializes an empty merged set.
-func NewMergedNodeSet() *MergedNodeSet {
- return &MergedNodeSet{Sets: make(map[common.Hash]*NodeSet)}
-}
-
-// NewWithNodeSet constructs a merged nodeset with the provided single set.
-func NewWithNodeSet(set *NodeSet) *MergedNodeSet {
- merged := NewMergedNodeSet()
- merged.Merge(set)
- return merged
-}
-
-// Merge merges the provided dirty nodes of a trie into the set. The assumption
-// is held that no duplicated set belonging to the same trie will be merged twice.
-func (set *MergedNodeSet) Merge(other *NodeSet) error {
- subset, present := set.Sets[other.Owner]
- if present {
- return subset.Merge(other.Owner, other.Nodes)
- }
- set.Sets[other.Owner] = other
- return nil
-}
-
-// Flatten returns a two-dimensional map for internal nodes.
-func (set *MergedNodeSet) Flatten() map[common.Hash]map[string]*Node {
- nodes := make(map[common.Hash]map[string]*Node)
- for owner, set := range set.Sets {
- nodes[owner] = set.Nodes
- }
- return nodes
-}
diff --git a/trie/triestate/state.go b/trie/triestate/state.go
deleted file mode 100644
index 9bdc7bb9e8..0000000000
--- a/trie/triestate/state.go
+++ /dev/null
@@ -1,286 +0,0 @@
-// (c) 2024, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2023 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see
-
-package triestate
-
-import (
- "errors"
- "fmt"
- "sync"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/rlp"
- "golang.org/x/crypto/sha3"
-)
-
-// Trie is an Ethereum state trie, can be implemented by Ethereum Merkle Patricia
-// tree or Verkle tree.
-type Trie interface {
- // Get returns the value for key stored in the trie.
- Get(key []byte) ([]byte, error)
-
- // Update associates key with value in the trie.
- Update(key, value []byte) error
-
- // Delete removes any existing value for key from the trie.
- Delete(key []byte) error
-
- // Commit the trie and returns a set of dirty nodes generated along with
- // the new root hash.
- Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error)
-}
-
-// TrieLoader wraps functions to load tries.
-type TrieLoader interface {
- // OpenTrie opens the main account trie.
- OpenTrie(root common.Hash) (Trie, error)
-
- // OpenStorageTrie opens the storage trie of an account.
- OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error)
-}
-
-// Set represents a collection of mutated states during a state transition.
-// The value refers to the original content of state before the transition
-// is made. Nil means that the state was not present previously.
-type Set struct {
- Accounts map[common.Address][]byte // Mutated account set, nil means the account was not present
- Storages map[common.Address]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present
- Incomplete map[common.Address]struct{} // Indicator whether the storage is incomplete due to large deletion
- size common.StorageSize // Approximate size of set
-}
-
-// New constructs the state set with provided data.
-func New(accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, incomplete map[common.Address]struct{}) *Set {
- return &Set{
- Accounts: accounts,
- Storages: storages,
- Incomplete: incomplete,
- }
-}
-
-// Size returns the approximate memory size occupied by the set.
-func (s *Set) Size() common.StorageSize {
- if s.size != 0 {
- return s.size
- }
- for _, account := range s.Accounts {
- s.size += common.StorageSize(common.AddressLength + len(account))
- }
- for _, slots := range s.Storages {
- for _, val := range slots {
- s.size += common.StorageSize(common.HashLength + len(val))
- }
- s.size += common.StorageSize(common.AddressLength)
- }
- s.size += common.StorageSize(common.AddressLength * len(s.Incomplete))
- return s.size
-}
-
-// context wraps all fields for executing state diffs.
-type context struct {
- prevRoot common.Hash
- postRoot common.Hash
- accounts map[common.Address][]byte
- storages map[common.Address]map[common.Hash][]byte
- accountTrie Trie
- nodes *trienode.MergedNodeSet
-}
-
-// Apply traverses the provided state diffs, apply them in the associated
-// post-state and return the generated dirty trie nodes. The state can be
-// loaded via the provided trie loader.
-func Apply(prevRoot common.Hash, postRoot common.Hash, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, loader TrieLoader) (map[common.Hash]map[string]*trienode.Node, error) {
- tr, err := loader.OpenTrie(postRoot)
- if err != nil {
- return nil, err
- }
- ctx := &context{
- prevRoot: prevRoot,
- postRoot: postRoot,
- accounts: accounts,
- storages: storages,
- accountTrie: tr,
- nodes: trienode.NewMergedNodeSet(),
- }
- for addr, account := range accounts {
- var err error
- if len(account) == 0 {
- err = deleteAccount(ctx, loader, addr)
- } else {
- err = updateAccount(ctx, loader, addr)
- }
- if err != nil {
- return nil, fmt.Errorf("failed to revert state, err: %w", err)
- }
- }
- root, result, err := tr.Commit(false)
- if err != nil {
- return nil, err
- }
- if root != prevRoot {
- return nil, fmt.Errorf("failed to revert state, want %#x, got %#x", prevRoot, root)
- }
- if err := ctx.nodes.Merge(result); err != nil {
- return nil, err
- }
- return ctx.nodes.Flatten(), nil
-}
-
-// updateAccount the account was present in prev-state, and may or may not
-// existent in post-state. Apply the reverse diff and verify if the storage
-// root matches the one in prev-state account.
-func updateAccount(ctx *context, loader TrieLoader, addr common.Address) error {
- // The account was present in prev-state, decode it from the
- // 'slim-rlp' format bytes.
- h := newHasher()
- defer h.release()
-
- addrHash := h.hash(addr.Bytes())
- prev, err := types.FullAccount(ctx.accounts[addr])
- if err != nil {
- return err
- }
- // The account may or may not existent in post-state, try to
- // load it and decode if it's found.
- blob, err := ctx.accountTrie.Get(addrHash.Bytes())
- if err != nil {
- return err
- }
- post := types.NewEmptyStateAccount()
- if len(blob) != 0 {
- if err := rlp.DecodeBytes(blob, &post); err != nil {
- return err
- }
- }
- // Apply all storage changes into the post-state storage trie.
- st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root)
- if err != nil {
- return err
- }
- for key, val := range ctx.storages[addr] {
- var err error
- if len(val) == 0 {
- err = st.Delete(key.Bytes())
- } else {
- err = st.Update(key.Bytes(), val)
- }
- if err != nil {
- return err
- }
- }
- root, result, err := st.Commit(false)
- if err != nil {
- return err
- }
- if root != prev.Root {
- return errors.New("failed to reset storage trie")
- }
- // The returned set can be nil if storage trie is not changed
- // at all.
- if result != nil {
- if err := ctx.nodes.Merge(result); err != nil {
- return err
- }
- }
- // Write the prev-state account into the main trie
- full, err := rlp.EncodeToBytes(prev)
- if err != nil {
- return err
- }
- return ctx.accountTrie.Update(addrHash.Bytes(), full)
-}
-
-// deleteAccount the account was not present in prev-state, and is expected
-// to be existent in post-state. Apply the reverse diff and verify if the
-// account and storage is wiped out correctly.
-func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error {
- // The account must be existent in post-state, load the account.
- h := newHasher()
- defer h.release()
-
- addrHash := h.hash(addr.Bytes())
- blob, err := ctx.accountTrie.Get(addrHash.Bytes())
- if err != nil {
- return err
- }
- if len(blob) == 0 {
- return fmt.Errorf("account is non-existent %#x", addrHash)
- }
- var post types.StateAccount
- if err := rlp.DecodeBytes(blob, &post); err != nil {
- return err
- }
- st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root)
- if err != nil {
- return err
- }
- for key, val := range ctx.storages[addr] {
- if len(val) != 0 {
- return errors.New("expect storage deletion")
- }
- if err := st.Delete(key.Bytes()); err != nil {
- return err
- }
- }
- root, result, err := st.Commit(false)
- if err != nil {
- return err
- }
- if root != types.EmptyRootHash {
- return errors.New("failed to clear storage trie")
- }
- // The returned set can be nil if storage trie is not changed
- // at all.
- if result != nil {
- if err := ctx.nodes.Merge(result); err != nil {
- return err
- }
- }
- // Delete the post-state account from the main trie.
- return ctx.accountTrie.Delete(addrHash.Bytes())
-}
-
-// hasher is used to compute the sha256 hash of the provided data.
-type hasher struct{ sha crypto.KeccakState }
-
-var hasherPool = sync.Pool{
- New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
-}
-
-func newHasher() *hasher {
- return hasherPool.Get().(*hasher)
-}
-
-func (h *hasher) hash(data []byte) common.Hash {
- return crypto.HashData(h.sha, data)
-}
-
-func (h *hasher) release() {
- hasherPool.Put(h)
-}
diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go
deleted file mode 100644
index 0287a7d879..0000000000
--- a/trie/utils/verkle.go
+++ /dev/null
@@ -1,342 +0,0 @@
-// Copyright 2023 go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package utils
-
-import (
- "encoding/binary"
- "sync"
-
- "github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/libevm/common/lru"
- "github.com/crate-crypto/go-ipa/bandersnatch/fr"
- "github.com/gballet/go-verkle"
- "github.com/holiman/uint256"
-)
-
-const (
- // The spec of verkle key encoding can be found here.
- // https://notes.ethereum.org/@vbuterin/verkle_tree_eip#Tree-embedding
- VersionLeafKey = 0
- BalanceLeafKey = 1
- NonceLeafKey = 2
- CodeKeccakLeafKey = 3
- CodeSizeLeafKey = 4
-)
-
-var (
- zero = uint256.NewInt(0)
- verkleNodeWidthLog2 = 8
- headerStorageOffset = uint256.NewInt(64)
- mainStorageOffsetLshVerkleNodeWidth = new(uint256.Int).Lsh(uint256.NewInt(256), 31-uint(verkleNodeWidthLog2))
- codeOffset = uint256.NewInt(128)
- verkleNodeWidth = uint256.NewInt(256)
- codeStorageDelta = uint256.NewInt(0).Sub(codeOffset, headerStorageOffset)
-
- index0Point *verkle.Point // pre-computed commitment of polynomial [2+256*64]
-
- // cacheHitGauge is the metric to track how many cache hit occurred.
- cacheHitGauge = metrics.NewRegisteredGauge("trie/verkle/cache/hit", nil)
-
- // cacheMissGauge is the metric to track how many cache miss occurred.
- cacheMissGauge = metrics.NewRegisteredGauge("trie/verkle/cache/miss", nil)
-)
-
-func init() {
- // The byte array is the Marshalled output of the point computed as such:
- //
- // var (
- // config = verkle.GetConfig()
- // fr verkle.Fr
- // )
- // verkle.FromLEBytes(&fr, []byte{2, 64})
- // point := config.CommitToPoly([]verkle.Fr{fr}, 1)
- index0Point = new(verkle.Point)
- err := index0Point.SetBytes([]byte{34, 25, 109, 242, 193, 5, 144, 224, 76, 52, 189, 92, 197, 126, 9, 145, 27, 152, 199, 130, 165, 3, 210, 27, 193, 131, 142, 28, 110, 26, 16, 191})
- if err != nil {
- panic(err)
- }
-}
-
-// PointCache is the LRU cache for storing evaluated address commitment.
-type PointCache struct {
- lru lru.BasicLRU[string, *verkle.Point]
- lock sync.RWMutex
-}
-
-// NewPointCache returns the cache with specified size.
-func NewPointCache(maxItems int) *PointCache {
- return &PointCache{
- lru: lru.NewBasicLRU[string, *verkle.Point](maxItems),
- }
-}
-
-// Get returns the cached commitment for the specified address, or computing
-// it on the flight.
-func (c *PointCache) Get(addr []byte) *verkle.Point {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- p, ok := c.lru.Get(string(addr))
- if ok {
- cacheHitGauge.Inc(1)
- return p
- }
- cacheMissGauge.Inc(1)
- p = evaluateAddressPoint(addr)
- c.lru.Add(string(addr), p)
- return p
-}
-
-// GetStem returns the first 31 bytes of the tree key as the tree stem. It only
-// works for the account metadata whose treeIndex is 0.
-func (c *PointCache) GetStem(addr []byte) []byte {
- p := c.Get(addr)
- return pointToHash(p, 0)[:31]
-}
-
-// GetTreeKey performs both the work of the spec's get_tree_key function, and that
-// of pedersen_hash: it builds the polynomial in pedersen_hash without having to
-// create a mostly zero-filled buffer and "type cast" it to a 128-long 16-byte
-// array. Since at most the first 5 coefficients of the polynomial will be non-zero,
-// these 5 coefficients are created directly.
-func GetTreeKey(address []byte, treeIndex *uint256.Int, subIndex byte) []byte {
- if len(address) < 32 {
- var aligned [32]byte
- address = append(aligned[:32-len(address)], address...)
- }
- // poly = [2+256*64, address_le_low, address_le_high, tree_index_le_low, tree_index_le_high]
- var poly [5]fr.Element
-
- // 32-byte address, interpreted as two little endian
- // 16-byte numbers.
- verkle.FromLEBytes(&poly[1], address[:16])
- verkle.FromLEBytes(&poly[2], address[16:])
-
- // treeIndex must be interpreted as a 32-byte aligned little-endian integer.
- // e.g: if treeIndex is 0xAABBCC, we need the byte representation to be 0xCCBBAA00...00.
- // poly[3] = LE({CC,BB,AA,00...0}) (16 bytes), poly[4]=LE({00,00,...}) (16 bytes).
- //
- // To avoid unnecessary endianness conversions for go-ipa, we do some trick:
- // - poly[3]'s byte representation is the same as the *top* 16 bytes (trieIndexBytes[16:]) of
- // 32-byte aligned big-endian representation (BE({00,...,AA,BB,CC})).
- // - poly[4]'s byte representation is the same as the *low* 16 bytes (trieIndexBytes[:16]) of
- // the 32-byte aligned big-endian representation (BE({00,00,...}).
- trieIndexBytes := treeIndex.Bytes32()
- verkle.FromBytes(&poly[3], trieIndexBytes[16:])
- verkle.FromBytes(&poly[4], trieIndexBytes[:16])
-
- cfg := verkle.GetConfig()
- ret := cfg.CommitToPoly(poly[:], 0)
-
- // add a constant point corresponding to poly[0]=[2+256*64].
- ret.Add(ret, index0Point)
-
- return pointToHash(ret, subIndex)
-}
-
-// GetTreeKeyWithEvaluatedAddress is basically identical to GetTreeKey, the only
-// difference is a part of polynomial is already evaluated.
-//
-// Specifically, poly = [2+256*64, address_le_low, address_le_high] is already
-// evaluated.
-func GetTreeKeyWithEvaluatedAddress(evaluated *verkle.Point, treeIndex *uint256.Int, subIndex byte) []byte {
- var poly [5]fr.Element
-
- poly[0].SetZero()
- poly[1].SetZero()
- poly[2].SetZero()
-
- // little-endian, 32-byte aligned treeIndex
- var index [32]byte
- for i := 0; i < len(treeIndex); i++ {
- binary.LittleEndian.PutUint64(index[i*8:(i+1)*8], treeIndex[i])
- }
- verkle.FromLEBytes(&poly[3], index[:16])
- verkle.FromLEBytes(&poly[4], index[16:])
-
- cfg := verkle.GetConfig()
- ret := cfg.CommitToPoly(poly[:], 0)
-
- // add the pre-evaluated address
- ret.Add(ret, evaluated)
-
- return pointToHash(ret, subIndex)
-}
-
-// VersionKey returns the verkle tree key of the version field for the specified account.
-func VersionKey(address []byte) []byte {
- return GetTreeKey(address, zero, VersionLeafKey)
-}
-
-// BalanceKey returns the verkle tree key of the balance field for the specified account.
-func BalanceKey(address []byte) []byte {
- return GetTreeKey(address, zero, BalanceLeafKey)
-}
-
-// NonceKey returns the verkle tree key of the nonce field for the specified account.
-func NonceKey(address []byte) []byte {
- return GetTreeKey(address, zero, NonceLeafKey)
-}
-
-// CodeKeccakKey returns the verkle tree key of the code keccak field for
-// the specified account.
-func CodeKeccakKey(address []byte) []byte {
- return GetTreeKey(address, zero, CodeKeccakLeafKey)
-}
-
-// CodeSizeKey returns the verkle tree key of the code size field for the
-// specified account.
-func CodeSizeKey(address []byte) []byte {
- return GetTreeKey(address, zero, CodeSizeLeafKey)
-}
-
-func codeChunkIndex(chunk *uint256.Int) (*uint256.Int, byte) {
- var (
- chunkOffset = new(uint256.Int).Add(codeOffset, chunk)
- treeIndex = new(uint256.Int).Div(chunkOffset, verkleNodeWidth)
- subIndexMod = new(uint256.Int).Mod(chunkOffset, verkleNodeWidth)
- )
- var subIndex byte
- if len(subIndexMod) != 0 {
- subIndex = byte(subIndexMod[0])
- }
- return treeIndex, subIndex
-}
-
-// CodeChunkKey returns the verkle tree key of the code chunk for the
-// specified account.
-func CodeChunkKey(address []byte, chunk *uint256.Int) []byte {
- treeIndex, subIndex := codeChunkIndex(chunk)
- return GetTreeKey(address, treeIndex, subIndex)
-}
-
-func storageIndex(bytes []byte) (*uint256.Int, byte) {
- // If the storage slot is in the header, we need to add the header offset.
- var key uint256.Int
- key.SetBytes(bytes)
- if key.Cmp(codeStorageDelta) < 0 {
- // This addition is always safe; it can't ever overflow since pos
-
-package utils
-
-import (
- "bytes"
- "testing"
-
- "github.com/gballet/go-verkle"
- "github.com/holiman/uint256"
-)
-
-func TestTreeKey(t *testing.T) {
- var (
- address = []byte{0x01}
- addressEval = evaluateAddressPoint(address)
- smallIndex = uint256.NewInt(1)
- largeIndex = uint256.NewInt(10000)
- smallStorage = []byte{0x1}
- largeStorage = bytes.Repeat([]byte{0xff}, 16)
- )
- if !bytes.Equal(VersionKey(address), VersionKeyWithEvaluatedAddress(addressEval)) {
- t.Fatal("Unmatched version key")
- }
- if !bytes.Equal(BalanceKey(address), BalanceKeyWithEvaluatedAddress(addressEval)) {
- t.Fatal("Unmatched balance key")
- }
- if !bytes.Equal(NonceKey(address), NonceKeyWithEvaluatedAddress(addressEval)) {
- t.Fatal("Unmatched nonce key")
- }
- if !bytes.Equal(CodeKeccakKey(address), CodeKeccakKeyWithEvaluatedAddress(addressEval)) {
- t.Fatal("Unmatched code keccak key")
- }
- if !bytes.Equal(CodeSizeKey(address), CodeSizeKeyWithEvaluatedAddress(addressEval)) {
- t.Fatal("Unmatched code size key")
- }
- if !bytes.Equal(CodeChunkKey(address, smallIndex), CodeChunkKeyWithEvaluatedAddress(addressEval, smallIndex)) {
- t.Fatal("Unmatched code chunk key")
- }
- if !bytes.Equal(CodeChunkKey(address, largeIndex), CodeChunkKeyWithEvaluatedAddress(addressEval, largeIndex)) {
- t.Fatal("Unmatched code chunk key")
- }
- if !bytes.Equal(StorageSlotKey(address, smallStorage), StorageSlotKeyWithEvaluatedAddress(addressEval, smallStorage)) {
- t.Fatal("Unmatched storage slot key")
- }
- if !bytes.Equal(StorageSlotKey(address, largeStorage), StorageSlotKeyWithEvaluatedAddress(addressEval, largeStorage)) {
- t.Fatal("Unmatched storage slot key")
- }
-}
-
-// goos: darwin
-// goarch: amd64
-// pkg: github.com/ava-labs/coreth/trie/utils
-// cpu: VirtualApple @ 2.50GHz
-// BenchmarkTreeKey
-// BenchmarkTreeKey-8 398731 2961 ns/op 32 B/op 1 allocs/op
-func BenchmarkTreeKey(b *testing.B) {
- // Initialize the IPA settings which can be pretty expensive.
- verkle.GetConfig()
-
- b.ReportAllocs()
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- BalanceKey([]byte{0x01})
- }
-}
-
-// goos: darwin
-// goarch: amd64
-// pkg: github.com/ava-labs/coreth/trie/utils
-// cpu: VirtualApple @ 2.50GHz
-// BenchmarkTreeKeyWithEvaluation
-// BenchmarkTreeKeyWithEvaluation-8 513855 2324 ns/op 32 B/op 1 allocs/op
-func BenchmarkTreeKeyWithEvaluation(b *testing.B) {
- // Initialize the IPA settings which can be pretty expensive.
- verkle.GetConfig()
-
- addr := []byte{0x01}
- eval := evaluateAddressPoint(addr)
-
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- BalanceKeyWithEvaluatedAddress(eval)
- }
-}
-
-// goos: darwin
-// goarch: amd64
-// pkg: github.com/ava-labs/coreth/trie/utils
-// cpu: VirtualApple @ 2.50GHz
-// BenchmarkStorageKey
-// BenchmarkStorageKey-8 230516 4584 ns/op 96 B/op 3 allocs/op
-func BenchmarkStorageKey(b *testing.B) {
- // Initialize the IPA settings which can be pretty expensive.
- verkle.GetConfig()
-
- b.ReportAllocs()
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- StorageSlotKey([]byte{0x01}, bytes.Repeat([]byte{0xff}, 32))
- }
-}
-
-// goos: darwin
-// goarch: amd64
-// pkg: github.com/ava-labs/coreth/trie/utils
-// cpu: VirtualApple @ 2.50GHz
-// BenchmarkStorageKeyWithEvaluation
-// BenchmarkStorageKeyWithEvaluation-8 320125 3753 ns/op 96 B/op 3 allocs/op
-func BenchmarkStorageKeyWithEvaluation(b *testing.B) {
- // Initialize the IPA settings which can be pretty expensive.
- verkle.GetConfig()
-
- addr := []byte{0x01}
- eval := evaluateAddressPoint(addr)
-
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- StorageSlotKeyWithEvaluatedAddress(eval, bytes.Repeat([]byte{0xff}, 32))
- }
-}
diff --git a/trie/verkle.go b/trie/verkle.go
deleted file mode 100644
index 7c6eed0f43..0000000000
--- a/trie/verkle.go
+++ /dev/null
@@ -1,372 +0,0 @@
-// Copyright 2023 go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "encoding/binary"
- "errors"
- "fmt"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/utils"
- "github.com/ava-labs/coreth/triedb/database"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/gballet/go-verkle"
- "github.com/holiman/uint256"
-)
-
-var (
- zero [32]byte
- errInvalidRootType = errors.New("invalid node type for root")
-)
-
-// VerkleTrie is a wrapper around VerkleNode that implements the trie.Trie
-// interface so that Verkle trees can be reused verbatim.
-type VerkleTrie struct {
- root verkle.VerkleNode
- cache *utils.PointCache
- reader *trieReader
-}
-
-// NewVerkleTrie constructs a verkle tree based on the specified root hash.
-func NewVerkleTrie(root common.Hash, db database.Database, cache *utils.PointCache) (*VerkleTrie, error) {
- reader, err := newTrieReader(root, common.Hash{}, db)
- if err != nil {
- return nil, err
- }
- // Parse the root verkle node if it's not empty.
- node := verkle.New()
- if root != types.EmptyVerkleHash && root != types.EmptyRootHash {
- blob, err := reader.node(nil, common.Hash{})
- if err != nil {
- return nil, err
- }
- node, err = verkle.ParseNode(blob, 0)
- if err != nil {
- return nil, err
- }
- }
- return &VerkleTrie{
- root: node,
- cache: cache,
- reader: reader,
- }, nil
-}
-
-// GetKey returns the sha3 preimage of a hashed key that was previously used
-// to store a value.
-func (t *VerkleTrie) GetKey(key []byte) []byte {
- return key
-}
-
-// GetAccount implements state.Trie, retrieving the account with the specified
-// account address. If the specified account is not in the verkle tree, nil will
-// be returned. If the tree is corrupted, an error will be returned.
-func (t *VerkleTrie) GetAccount(addr common.Address) (*types.StateAccount, error) {
- var (
- acc = &types.StateAccount{}
- values [][]byte
- err error
- )
- switch n := t.root.(type) {
- case *verkle.InternalNode:
- values, err = n.GetValuesAtStem(t.cache.GetStem(addr[:]), t.nodeResolver)
- if err != nil {
- return nil, fmt.Errorf("GetAccount (%x) error: %v", addr, err)
- }
- default:
- return nil, errInvalidRootType
- }
- if values == nil {
- return nil, nil
- }
- // Decode nonce in little-endian
- if len(values[utils.NonceLeafKey]) > 0 {
- acc.Nonce = binary.LittleEndian.Uint64(values[utils.NonceLeafKey])
- }
- // Decode balance in little-endian
- var balance [32]byte
- copy(balance[:], values[utils.BalanceLeafKey])
- for i := 0; i < len(balance)/2; i++ {
- balance[len(balance)-i-1], balance[i] = balance[i], balance[len(balance)-i-1]
- }
- acc.Balance = new(uint256.Int).SetBytes32(balance[:])
-
- // Decode codehash
- acc.CodeHash = values[utils.CodeKeccakLeafKey]
-
- // TODO account.Root is leave as empty. How should we handle the legacy account?
- return acc, nil
-}
-
-// GetStorage implements state.Trie, retrieving the storage slot with the specified
-// account address and storage key. If the specified slot is not in the verkle tree,
-// nil will be returned. If the tree is corrupted, an error will be returned.
-func (t *VerkleTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) {
- k := utils.StorageSlotKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes()), key)
- val, err := t.root.Get(k, t.nodeResolver)
- if err != nil {
- return nil, err
- }
- return common.TrimLeftZeroes(val), nil
-}
-
-// UpdateAccount implements state.Trie, writing the provided account into the tree.
-// If the tree is corrupted, an error will be returned.
-func (t *VerkleTrie) UpdateAccount(addr common.Address, acc *types.StateAccount) error {
- var (
- err error
- nonce, balance [32]byte
- values = make([][]byte, verkle.NodeWidth)
- )
- values[utils.VersionLeafKey] = zero[:]
- values[utils.CodeKeccakLeafKey] = acc.CodeHash[:]
-
- // Encode nonce in little-endian
- binary.LittleEndian.PutUint64(nonce[:], acc.Nonce)
- values[utils.NonceLeafKey] = nonce[:]
-
- // Encode balance in little-endian
- bytes := acc.Balance.Bytes()
- if len(bytes) > 0 {
- for i, b := range bytes {
- balance[len(bytes)-i-1] = b
- }
- }
- values[utils.BalanceLeafKey] = balance[:]
-
- switch n := t.root.(type) {
- case *verkle.InternalNode:
- err = n.InsertValuesAtStem(t.cache.GetStem(addr[:]), values, t.nodeResolver)
- if err != nil {
- return fmt.Errorf("UpdateAccount (%x) error: %v", addr, err)
- }
- default:
- return errInvalidRootType
- }
- // TODO figure out if the code size needs to be updated, too
- return nil
-}
-
-// UpdateStorage implements state.Trie, writing the provided storage slot into
-// the tree. If the tree is corrupted, an error will be returned.
-func (t *VerkleTrie) UpdateStorage(address common.Address, key, value []byte) error {
- // Left padding the slot value to 32 bytes.
- var v [32]byte
- if len(value) >= 32 {
- copy(v[:], value[:32])
- } else {
- copy(v[32-len(value):], value[:])
- }
- k := utils.StorageSlotKeyWithEvaluatedAddress(t.cache.Get(address.Bytes()), key)
- return t.root.Insert(k, v[:], t.nodeResolver)
-}
-
-// DeleteAccount implements state.Trie, deleting the specified account from the
-// trie. If the account was not existent in the trie, no error will be returned.
-// If the trie is corrupted, an error will be returned.
-func (t *VerkleTrie) DeleteAccount(addr common.Address) error {
- var (
- err error
- values = make([][]byte, verkle.NodeWidth)
- )
- for i := 0; i < verkle.NodeWidth; i++ {
- values[i] = zero[:]
- }
- switch n := t.root.(type) {
- case *verkle.InternalNode:
- err = n.InsertValuesAtStem(t.cache.GetStem(addr.Bytes()), values, t.nodeResolver)
- if err != nil {
- return fmt.Errorf("DeleteAccount (%x) error: %v", addr, err)
- }
- default:
- return errInvalidRootType
- }
- return nil
-}
-
-// DeleteStorage implements state.Trie, deleting the specified storage slot from
-// the trie. If the storage slot was not existent in the trie, no error will be
-// returned. If the trie is corrupted, an error will be returned.
-func (t *VerkleTrie) DeleteStorage(addr common.Address, key []byte) error {
- var zero [32]byte
- k := utils.StorageSlotKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes()), key)
- return t.root.Insert(k, zero[:], t.nodeResolver)
-}
-
-// Hash returns the root hash of the tree. It does not write to the database and
-// can be used even if the tree doesn't have one.
-func (t *VerkleTrie) Hash() common.Hash {
- return t.root.Commit().Bytes()
-}
-
-// Commit writes all nodes to the tree's memory database.
-func (t *VerkleTrie) Commit(_ bool) (common.Hash, *trienode.NodeSet, error) {
- root, ok := t.root.(*verkle.InternalNode)
- if !ok {
- return common.Hash{}, nil, errors.New("unexpected root node type")
- }
- nodes, err := root.BatchSerialize()
- if err != nil {
- return common.Hash{}, nil, fmt.Errorf("serializing tree nodes: %s", err)
- }
- nodeset := trienode.NewNodeSet(common.Hash{})
- for _, node := range nodes {
- // hash parameter is not used in pathdb
- nodeset.AddNode(node.Path, trienode.New(common.Hash{}, node.SerializedBytes))
- }
- // Serialize root commitment form
- return t.Hash(), nodeset, nil
-}
-
-// NodeIterator implements state.Trie, returning an iterator that returns
-// nodes of the trie. Iteration starts at the key after the given start key.
-//
-// TODO(gballet, rjl493456442) implement it.
-func (t *VerkleTrie) NodeIterator(startKey []byte) (NodeIterator, error) {
- panic("not implemented")
-}
-
-// Prove implements state.Trie, constructing a Merkle proof for key. The result
-// contains all encoded nodes on the path to the value at key. The value itself
-// is also included in the last node and can be retrieved by verifying the proof.
-//
-// If the trie does not contain a value for key, the returned proof contains all
-// nodes of the longest existing prefix of the key (at least the root), ending
-// with the node that proves the absence of the key.
-//
-// TODO(gballet, rjl493456442) implement it.
-func (t *VerkleTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
- panic("not implemented")
-}
-
-// Copy returns a deep-copied verkle tree.
-func (t *VerkleTrie) Copy() *VerkleTrie {
- return &VerkleTrie{
- root: t.root.Copy(),
- cache: t.cache,
- reader: t.reader,
- }
-}
-
-// IsVerkle indicates if the trie is a Verkle trie.
-func (t *VerkleTrie) IsVerkle() bool {
- return true
-}
-
-// ChunkedCode represents a sequence of 32-bytes chunks of code (31 bytes of which
-// are actual code, and 1 byte is the pushdata offset).
-type ChunkedCode []byte
-
-// Copy the values here so as to avoid an import cycle
-const (
- PUSH1 = byte(0x60)
- PUSH32 = byte(0x7f)
-)
-
-// ChunkifyCode generates the chunked version of an array representing EVM bytecode
-func ChunkifyCode(code []byte) ChunkedCode {
- var (
- chunkOffset = 0 // offset in the chunk
- chunkCount = len(code) / 31
- codeOffset = 0 // offset in the code
- )
- if len(code)%31 != 0 {
- chunkCount++
- }
- chunks := make([]byte, chunkCount*32)
- for i := 0; i < chunkCount; i++ {
- // number of bytes to copy, 31 unless the end of the code has been reached.
- end := 31 * (i + 1)
- if len(code) < end {
- end = len(code)
- }
- copy(chunks[i*32+1:], code[31*i:end]) // copy the code itself
-
- // chunk offset = taken from the last chunk.
- if chunkOffset > 31 {
- // skip offset calculation if push data covers the whole chunk
- chunks[i*32] = 31
- chunkOffset = 1
- continue
- }
- chunks[32*i] = byte(chunkOffset)
- chunkOffset = 0
-
- // Check each instruction and update the offset it should be 0 unless
- // a PUSH-N overflows.
- for ; codeOffset < end; codeOffset++ {
- if code[codeOffset] >= PUSH1 && code[codeOffset] <= PUSH32 {
- codeOffset += int(code[codeOffset] - PUSH1 + 1)
- if codeOffset+1 >= 31*(i+1) {
- codeOffset++
- chunkOffset = codeOffset - 31*(i+1)
- break
- }
- }
- }
- }
- return chunks
-}
-
-// UpdateContractCode implements state.Trie, writing the provided contract code
-// into the trie.
-func (t *VerkleTrie) UpdateContractCode(addr common.Address, codeHash common.Hash, code []byte) error {
- var (
- chunks = ChunkifyCode(code)
- values [][]byte
- key []byte
- err error
- )
- for i, chunknr := 0, uint64(0); i < len(chunks); i, chunknr = i+32, chunknr+1 {
- groupOffset := (chunknr + 128) % 256
- if groupOffset == 0 /* start of new group */ || chunknr == 0 /* first chunk in header group */ {
- values = make([][]byte, verkle.NodeWidth)
- key = utils.CodeChunkKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes()), uint256.NewInt(chunknr))
- }
- values[groupOffset] = chunks[i : i+32]
-
- // Reuse the calculated key to also update the code size.
- if i == 0 {
- cs := make([]byte, 32)
- binary.LittleEndian.PutUint64(cs, uint64(len(code)))
- values[utils.CodeSizeLeafKey] = cs
- }
- if groupOffset == 255 || len(chunks)-i <= 32 {
- switch root := t.root.(type) {
- case *verkle.InternalNode:
- err = root.InsertValuesAtStem(key[:31], values, t.nodeResolver)
- if err != nil {
- return fmt.Errorf("UpdateContractCode (addr=%x) error: %w", addr[:], err)
- }
- default:
- return errInvalidRootType
- }
- }
- }
- return nil
-}
-
-func (t *VerkleTrie) ToDot() string {
- return verkle.ToDot(t.root)
-}
-
-func (t *VerkleTrie) nodeResolver(path []byte) ([]byte, error) {
- return t.reader.node(path, common.Hash{})
-}
diff --git a/trie/verkle_test.go b/trie/verkle_test.go
deleted file mode 100644
index cd21b57d15..0000000000
--- a/trie/verkle_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2023 go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "reflect"
- "testing"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/utils"
- "github.com/ava-labs/libevm/common"
- "github.com/holiman/uint256"
-)
-
-var (
- accounts = map[common.Address]*types.StateAccount{
- {1}: {
- Nonce: 100,
- Balance: uint256.NewInt(100),
- CodeHash: common.Hash{0x1}.Bytes(),
- },
- {2}: {
- Nonce: 200,
- Balance: uint256.NewInt(200),
- CodeHash: common.Hash{0x2}.Bytes(),
- },
- }
- storages = map[common.Address]map[common.Hash][]byte{
- {1}: {
- common.Hash{10}: []byte{10},
- common.Hash{11}: []byte{11},
- common.MaxHash: []byte{0xff},
- },
- {2}: {
- common.Hash{20}: []byte{20},
- common.Hash{21}: []byte{21},
- common.MaxHash: []byte{0xff},
- },
- }
-)
-
-func TestVerkleTreeReadWrite(t *testing.T) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
- tr, _ := NewVerkleTrie(types.EmptyVerkleHash, db, utils.NewPointCache(100))
-
- for addr, acct := range accounts {
- if err := tr.UpdateAccount(addr, acct); err != nil {
- t.Fatalf("Failed to update account, %v", err)
- }
- for key, val := range storages[addr] {
- if err := tr.UpdateStorage(addr, key.Bytes(), val); err != nil {
- t.Fatalf("Failed to update account, %v", err)
- }
- }
- }
-
- for addr, acct := range accounts {
- stored, err := tr.GetAccount(addr)
- if err != nil {
- t.Fatalf("Failed to get account, %v", err)
- }
- if !reflect.DeepEqual(stored, acct) {
- t.Fatal("account is not matched")
- }
- for key, val := range storages[addr] {
- stored, err := tr.GetStorage(addr, key.Bytes())
- if err != nil {
- t.Fatalf("Failed to get storage, %v", err)
- }
- if !bytes.Equal(stored, val) {
- t.Fatal("storage is not matched")
- }
- }
- }
-}
diff --git a/triedb/database.go b/triedb/database.go
deleted file mode 100644
index 88d6add3a8..0000000000
--- a/triedb/database.go
+++ /dev/null
@@ -1,349 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package triedb
-
-import (
- "errors"
-
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
- "github.com/ava-labs/coreth/triedb/database"
- "github.com/ava-labs/coreth/triedb/hashdb"
- "github.com/ava-labs/coreth/triedb/pathdb"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
-)
-
-// Config defines all necessary options for database.
-type Config struct {
- Preimages bool // Flag whether the preimage of node key is recorded
- IsVerkle bool // Flag whether the db is holding a verkle tree
- HashDB *hashdb.Config // Configs for hash-based scheme
- PathDB *pathdb.Config // Configs for experimental path-based scheme
-}
-
-// HashDefaults represents a config for using hash-based scheme with
-// default settings.
-var HashDefaults = &Config{
- Preimages: false,
- HashDB: hashdb.Defaults,
-}
-
-// backend defines the methods needed to access/update trie nodes in different
-// state scheme.
-type backend interface {
- // Scheme returns the identifier of used storage scheme.
- Scheme() string
-
- // Initialized returns an indicator if the state data is already initialized
- // according to the state scheme.
- Initialized(genesisRoot common.Hash) bool
-
- // Size returns the current storage size of the diff layers on top of the
- // disk layer and the storage size of the nodes cached in the disk layer.
- //
- // For hash scheme, there is no differentiation between diff layer nodes
- // and dirty disk layer nodes, so both are merged into the second return.
- Size() (common.StorageSize, common.StorageSize)
-
- // Update performs a state transition by committing dirty nodes contained
- // in the given set in order to update state from the specified parent to
- // the specified root.
- //
- // The passed in maps(nodes, states) will be retained to avoid copying
- // everything. Therefore, these maps must not be changed afterwards.
- Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error
-
- // Commit writes all relevant trie nodes belonging to the specified state
- // to disk. Report specifies whether logs will be displayed in info level.
- Commit(root common.Hash, report bool) error
-
- // Close closes the trie database backend and releases all held resources.
- Close() error
-}
-
-// Database is the wrapper of the underlying backend which is shared by different
-// types of node backend as an entrypoint. It's responsible for all interactions
-// relevant with trie nodes and node preimages.
-type Database struct {
- config *Config // Configuration for trie database
- diskdb ethdb.Database // Persistent database to store the snapshot
- preimages *preimageStore // The store for caching preimages
- backend backend // The backend for managing trie nodes
-}
-
-// NewDatabase initializes the trie database with default settings, note
-// the legacy hash-based scheme is used by default.
-func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
- // Sanitize the config and use the default one if it's not specified.
- if config == nil {
- config = HashDefaults
- }
- var preimages *preimageStore
- if config.Preimages {
- preimages = newPreimageStore(diskdb)
- }
- db := &Database{
- config: config,
- diskdb: diskdb,
- preimages: preimages,
- }
- if config.HashDB != nil && config.PathDB != nil {
- log.Crit("Both 'hash' and 'path' mode are configured")
- }
- if config.PathDB != nil {
- db.backend = pathdb.New(diskdb, config.PathDB)
- } else {
- var resolver hashdb.ChildResolver
- if config.IsVerkle {
- // TODO define verkle resolver
- log.Crit("Verkle node resolver is not defined")
- } else {
- resolver = trie.MerkleResolver{}
- }
- db.backend = hashdb.New(diskdb, config.HashDB, resolver)
- }
- return db
-}
-
-// Reader returns a reader for accessing all trie nodes with provided state root.
-// An error will be returned if the requested state is not available.
-func (db *Database) Reader(blockRoot common.Hash) (database.Reader, error) {
- switch b := db.backend.(type) {
- case *hashdb.Database:
- return b.Reader(blockRoot)
- case *pathdb.Database:
- return b.Reader(blockRoot)
- }
- return nil, errors.New("unknown backend")
-}
-
-// Update performs a state transition by committing dirty nodes contained in the
-// given set in order to update state from the specified parent to the specified
-// root. The held pre-images accumulated up to this point will be flushed in case
-// the size exceeds the threshold.
-//
-// The passed in maps(nodes, states) will be retained to avoid copying everything.
-// Therefore, these maps must not be changed afterwards.
-func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
- if db.preimages != nil {
- db.preimages.commit(false)
- }
- return db.backend.Update(root, parent, block, nodes, states)
-}
-
-func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
- if db.preimages != nil {
- db.preimages.commit(false)
- }
- hdb, ok := db.backend.(*hashdb.Database)
- if ok {
- return hdb.UpdateAndReferenceRoot(root, parent, block, nodes, states)
- }
- return db.backend.Update(root, parent, block, nodes, states)
-}
-
-// Commit iterates over all the children of a particular node, writes them out
-// to disk. As a side effect, all pre-images accumulated up to this point are
-// also written.
-func (db *Database) Commit(root common.Hash, report bool) error {
- if db.preimages != nil {
- db.preimages.commit(true)
- }
- return db.backend.Commit(root, report)
-}
-
-// Size returns the storage size of diff layer nodes above the persistent disk
-// layer, the dirty nodes buffered within the disk layer, and the size of cached
-// preimages.
-func (db *Database) Size() (common.StorageSize, common.StorageSize, common.StorageSize) {
- var (
- diffs, nodes common.StorageSize
- preimages common.StorageSize
- )
- diffs, nodes = db.backend.Size()
- if db.preimages != nil {
- preimages = db.preimages.size()
- }
- return diffs, nodes, preimages
-}
-
-// Initialized returns an indicator if the state data is already initialized
-// according to the state scheme.
-func (db *Database) Initialized(genesisRoot common.Hash) bool {
- return db.backend.Initialized(genesisRoot)
-}
-
-// Scheme returns the node scheme used in the database.
-func (db *Database) Scheme() string {
- return db.backend.Scheme()
-}
-
-// Close flushes the dangling preimages to disk and closes the trie database.
-// It is meant to be called when closing the blockchain object, so that all
-// resources held can be released correctly.
-func (db *Database) Close() error {
- db.WritePreimages()
- return db.backend.Close()
-}
-
-// WritePreimages flushes all accumulated preimages to disk forcibly.
-func (db *Database) WritePreimages() {
- if db.preimages != nil {
- db.preimages.commit(true)
- }
-}
-
-// Preimage retrieves a cached trie node pre-image from preimage store.
-func (db *Database) Preimage(hash common.Hash) []byte {
- if db.preimages == nil {
- return nil
- }
- return db.preimages.preimage(hash)
-}
-
-// InsertPreimage writes pre-images of trie node to the preimage store.
-func (db *Database) InsertPreimage(preimages map[common.Hash][]byte) {
- if db.preimages == nil {
- return
- }
- db.preimages.insertPreimage(preimages)
-}
-
-// Cap iteratively flushes old but still referenced trie nodes until the total
-// memory usage goes below the given threshold. The held pre-images accumulated
-// up to this point will be flushed in case the size exceeds the threshold.
-//
-// It's only supported by hash-based database and will return an error for others.
-func (db *Database) Cap(limit common.StorageSize) error {
- hdb, ok := db.backend.(*hashdb.Database)
- if !ok {
- return errors.New("not supported")
- }
- if db.preimages != nil {
- db.preimages.commit(false)
- }
- return hdb.Cap(limit)
-}
-
-// Reference adds a new reference from a parent node to a child node. This function
-// is used to add reference between internal trie node and external node(e.g. storage
-// trie root), all internal trie nodes are referenced together by database itself.
-//
-// It's only supported by hash-based database and will return an error for others.
-func (db *Database) Reference(root common.Hash, parent common.Hash) error {
- hdb, ok := db.backend.(*hashdb.Database)
- if !ok {
- return errors.New("not supported")
- }
- hdb.Reference(root, parent)
- return nil
-}
-
-// Dereference removes an existing reference from a root node. It's only
-// supported by hash-based database and will return an error for others.
-func (db *Database) Dereference(root common.Hash) error {
- hdb, ok := db.backend.(*hashdb.Database)
- if !ok {
- return errors.New("not supported")
- }
- hdb.Dereference(root)
- return nil
-}
-
-// Recover rollbacks the database to a specified historical point. The state is
-// supported as the rollback destination only if it's canonical state and the
-// corresponding trie histories are existent. It's only supported by path-based
-// database and will return an error for others.
-func (db *Database) Recover(target common.Hash) error {
- pdb, ok := db.backend.(*pathdb.Database)
- if !ok {
- return errors.New("not supported")
- }
- var loader triestate.TrieLoader
- if db.config.IsVerkle {
- // TODO define verkle loader
- log.Crit("Verkle loader is not defined")
- } else {
- loader = trie.NewMerkleLoader(db)
- }
- return pdb.Recover(target, loader)
-}
-
-// Recoverable returns the indicator if the specified state is enabled to be
-// recovered. It's only supported by path-based database and will return an
-// error for others.
-func (db *Database) Recoverable(root common.Hash) (bool, error) {
- pdb, ok := db.backend.(*pathdb.Database)
- if !ok {
- return false, errors.New("not supported")
- }
- return pdb.Recoverable(root), nil
-}
-
-// Disable deactivates the database and invalidates all available state layers
-// as stale to prevent access to the persistent state, which is in the syncing
-// stage.
-//
-// It's only supported by path-based database and will return an error for others.
-func (db *Database) Disable() error {
- pdb, ok := db.backend.(*pathdb.Database)
- if !ok {
- return errors.New("not supported")
- }
- return pdb.Disable()
-}
-
-// Enable activates database and resets the state tree with the provided persistent
-// state root once the state sync is finished.
-func (db *Database) Enable(root common.Hash) error {
- pdb, ok := db.backend.(*pathdb.Database)
- if !ok {
- return errors.New("not supported")
- }
- return pdb.Enable(root)
-}
-
-// Journal commits an entire diff hierarchy to disk into a single journal entry.
-// This is meant to be used during shutdown to persist the snapshot without
-// flattening everything down (bad for reorgs). It's only supported by path-based
-// database and will return an error for others.
-func (db *Database) Journal(root common.Hash) error {
- pdb, ok := db.backend.(*pathdb.Database)
- if !ok {
- return errors.New("not supported")
- }
- return pdb.Journal(root)
-}
-
-// SetBufferSize sets the node buffer size to the provided value(in bytes).
-// It's only supported by path-based database and will return an error for
-// others.
-func (db *Database) SetBufferSize(size int) error {
- pdb, ok := db.backend.(*pathdb.Database)
- if !ok {
- return errors.New("not supported")
- }
- return pdb.SetBufferSize(size)
-}
-
-// IsVerkle returns the indicator if the database is holding a verkle tree.
-func (db *Database) IsVerkle() bool {
- return db.config.IsVerkle
-}
diff --git a/triedb/database/database.go b/triedb/database/database.go
deleted file mode 100644
index 44d841016b..0000000000
--- a/triedb/database/database.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2024 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package database
-
-import (
- "github.com/ava-labs/libevm/common"
-)
-
-// Reader wraps the Node method of a backing trie reader.
-type Reader interface {
- // Node retrieves the trie node blob with the provided trie identifier,
- // node path and the corresponding node hash. No error will be returned
- // if the node is not found.
- Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
-}
-
-// PreimageStore wraps the methods of a backing store for reading and writing
-// trie node preimages.
-type PreimageStore interface {
- // Preimage retrieves the preimage of the specified hash.
- Preimage(hash common.Hash) []byte
-
- // InsertPreimage commits a set of preimages along with their hashes.
- InsertPreimage(preimages map[common.Hash][]byte)
-}
-
-// Database wraps the methods of a backing trie store.
-type Database interface {
- PreimageStore
-
- // Reader returns a node reader associated with the specific state.
- // An error will be returned if the specified state is not available.
- Reader(stateRoot common.Hash) (Reader, error)
-}
diff --git a/triedb/hashdb/database.go b/triedb/hashdb/database.go
index 681675132a..7005a44966 100644
--- a/triedb/hashdb/database.go
+++ b/triedb/hashdb/database.go
@@ -36,13 +36,16 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/triestate"
+ "github.com/ava-labs/libevm/triedb"
+ "github.com/ava-labs/libevm/triedb/database"
)
const (
@@ -99,6 +102,18 @@ type cache interface {
type Config struct {
CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes
StatsPrefix string // Prefix for cache stats (disabled if empty)
+ ReferenceRoot bool // Whether to reference the root node on update
+}
+
+func (c Config) BackendConstructor(diskdb ethdb.Database, config *triedb.Config) triedb.DBOverride {
+ var resolver ChildResolver
+ if config.IsVerkle {
+ // TODO define verkle resolver
+ log.Crit("Verkle node resolver is not defined")
+ } else {
+ resolver = trie.MerkleResolver{}
+ }
+ return New(diskdb, &c, resolver)
}
// Defaults is the default setting for database if it's not specified.
@@ -137,6 +152,8 @@ type Database struct {
childrenSize common.StorageSize // Storage size of the external children tracking
lock sync.RWMutex
+
+ referenceRoot bool
}
// cachedNode is all the information we know about a single cached trie node
@@ -174,10 +191,11 @@ func New(diskdb ethdb.Database, config *Config, resolver ChildResolver) *Databas
cleans = utils.NewMeteredCache(config.CleanCacheSize, config.StatsPrefix, cacheStatsUpdateFrequency)
}
return &Database{
- diskdb: diskdb,
- resolver: resolver,
- cleans: cleans,
- dirties: make(map[common.Hash]*cachedNode),
+ diskdb: diskdb,
+ resolver: resolver,
+ cleans: cleans,
+ dirties: make(map[common.Hash]*cachedNode),
+ referenceRoot: config.ReferenceRoot,
}
}
@@ -627,6 +645,8 @@ func (db *Database) Initialized(genesisRoot common.Hash) bool {
// Update inserts the dirty nodes in provided nodeset into database and link the
// account trie with multiple storage tries if necessary.
+// If ReferenceRoot was enabled in the config, it will also add a reference from
+// the root to the metaroot while holding the db's lock.
func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
// Ensure the parent state is present and signal a warning if not.
if parent != types.EmptyRootHash {
@@ -637,26 +657,13 @@ func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, n
db.lock.Lock()
defer db.lock.Unlock()
- return db.update(root, parent, nodes)
-}
-
-// UpdateAndReferenceRoot inserts the dirty nodes in provided nodeset into
-// database and links the account trie with multiple storage tries if necessary,
-// then adds a reference [from] root to the metaroot while holding the db's lock.
-func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
- // Ensure the parent state is present and signal a warning if not.
- if parent != types.EmptyRootHash {
- if blob, _ := db.node(parent); len(blob) == 0 {
- log.Error("parent state is not present")
- }
- }
- db.lock.Lock()
- defer db.lock.Unlock()
-
if err := db.update(root, parent, nodes); err != nil {
return err
}
- db.reference(root, common.Hash{})
+
+ if db.referenceRoot {
+ db.reference(root, common.Hash{})
+ }
return nil
}
@@ -733,7 +740,7 @@ func (db *Database) Scheme() string {
// Reader retrieves a node reader belonging to the given state root.
// An error will be returned if the requested state is not available.
-func (db *Database) Reader(root common.Hash) (*reader, error) {
+func (db *Database) Reader(root common.Hash) (database.Reader, error) {
if _, err := db.node(root); err != nil {
return nil, fmt.Errorf("state %#x is not available, %v", root, err)
}
diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go
index 88d63c8ec2..c38c54fca8 100644
--- a/triedb/pathdb/database.go
+++ b/triedb/pathdb/database.go
@@ -35,11 +35,13 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/triestate"
+ "github.com/ava-labs/libevm/triedb"
+ "github.com/ava-labs/libevm/triedb/database"
)
const (
@@ -101,6 +103,10 @@ type Config struct {
ReadOnly bool // Flag whether the database is opened in read only mode.
}
+func (c Config) BackendConstructor(diskdb ethdb.Database, _ *triedb.Config) triedb.DBOverride {
+ return New(diskdb, &c)
+}
+
// sanitize checks the provided user configurations and changes anything that's
// unreasonable or unworkable.
func (c *Config) sanitize() *Config {
@@ -220,7 +226,7 @@ func New(diskdb ethdb.Database, config *Config) *Database {
}
// Reader retrieves a layer belonging to the given state root.
-func (db *Database) Reader(root common.Hash) (layer, error) {
+func (db *Database) Reader(root common.Hash) (database.Reader, error) {
l := db.tree.get(root)
if l == nil {
return nil, fmt.Errorf("state %#x is not available", root)
diff --git a/triedb/pathdb/database_test.go b/triedb/pathdb/database_test.go
index 83f32ff263..37be932816 100644
--- a/triedb/pathdb/database_test.go
+++ b/triedb/pathdb/database_test.go
@@ -35,12 +35,12 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/testutil"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie/testutil"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/triestate"
"github.com/holiman/uint256"
"github.com/stretchr/testify/require"
)
diff --git a/triedb/pathdb/difflayer.go b/triedb/pathdb/difflayer.go
index dbe8dd63fc..eea8dc4126 100644
--- a/triedb/pathdb/difflayer.go
+++ b/triedb/pathdb/difflayer.go
@@ -30,10 +30,10 @@ import (
"fmt"
"sync"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/triestate"
)
// diffLayer represents a collection of modifications made to the in-memory tries
diff --git a/triedb/pathdb/difflayer_test.go b/triedb/pathdb/difflayer_test.go
index 05ffca3314..d7f975ffad 100644
--- a/triedb/pathdb/difflayer_test.go
+++ b/triedb/pathdb/difflayer_test.go
@@ -31,9 +31,9 @@ import (
"testing"
"github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/trie/testutil"
- "github.com/ava-labs/coreth/trie/trienode"
"github.com/ava-labs/libevm/common"
+ "github.com/ava-labs/libevm/trie/testutil"
+ "github.com/ava-labs/libevm/trie/trienode"
)
func emptyLayer() *diskLayer {
diff --git a/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go
index 2d645c3e40..c13b46e710 100644
--- a/triedb/pathdb/disklayer.go
+++ b/triedb/pathdb/disklayer.go
@@ -33,11 +33,11 @@ import (
"github.com/VictoriaMetrics/fastcache"
"github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/triestate"
"golang.org/x/crypto/sha3"
)
diff --git a/triedb/pathdb/history.go b/triedb/pathdb/history.go
index ab72fcf958..14e53383a3 100644
--- a/triedb/pathdb/history.go
+++ b/triedb/pathdb/history.go
@@ -32,8 +32,8 @@ import (
"errors"
"fmt"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
+ "github.com/ava-labs/libevm/trie/triestate"
"golang.org/x/exp/slices"
)
diff --git a/triedb/pathdb/history_test.go b/triedb/pathdb/history_test.go
index 3bf5f7cf0e..4d30831c81 100644
--- a/triedb/pathdb/history_test.go
+++ b/triedb/pathdb/history_test.go
@@ -32,10 +32,10 @@ import (
"testing"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/testutil"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie/testutil"
+ "github.com/ava-labs/libevm/trie/triestate"
)
// randomStateSet generates a random state change set.
diff --git a/triedb/pathdb/journal.go b/triedb/pathdb/journal.go
index e7156157fa..a245e7f8c3 100644
--- a/triedb/pathdb/journal.go
+++ b/triedb/pathdb/journal.go
@@ -35,12 +35,12 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/triestate"
)
var (
diff --git a/triedb/pathdb/layertree.go b/triedb/pathdb/layertree.go
index ec78876da1..a52eded0f6 100644
--- a/triedb/pathdb/layertree.go
+++ b/triedb/pathdb/layertree.go
@@ -32,9 +32,9 @@ import (
"sync"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/triestate"
)
// layerTree is a group of state layers identified by the state root.
diff --git a/triedb/pathdb/nodebuffer.go b/triedb/pathdb/nodebuffer.go
index 71dc809021..7a461b60ad 100644
--- a/triedb/pathdb/nodebuffer.go
+++ b/triedb/pathdb/nodebuffer.go
@@ -32,11 +32,11 @@ import (
"github.com/VictoriaMetrics/fastcache"
"github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/trie/trienode"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie/trienode"
)
// nodebuffer is a collection of modified trie nodes to aggregate the disk
diff --git a/triedb/pathdb/testutils.go b/triedb/pathdb/testutils.go
index 27abed8aad..a7ada9947e 100644
--- a/triedb/pathdb/testutils.go
+++ b/triedb/pathdb/testutils.go
@@ -31,10 +31,10 @@ import (
"fmt"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/triestate"
"golang.org/x/exp/slices"
)
diff --git a/triedb/preimages.go b/triedb/preimages.go
deleted file mode 100644
index 538c05163e..0000000000
--- a/triedb/preimages.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// (c) 2022, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package triedb
-
-import (
- "sync"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
-)
-
-const defaultPreimagesLimit = 4 * 1024 * 1024 // 4 MB
-
-// preimageStore is the store for caching preimages of node key.
-type preimageStore struct {
- lock sync.RWMutex
- disk ethdb.KeyValueStore
- preimages map[common.Hash][]byte // Preimages of nodes from the secure trie
- preimagesSize common.StorageSize // Storage size of the preimages cache
-}
-
-// newPreimageStore initializes the store for caching preimages.
-func newPreimageStore(disk ethdb.KeyValueStore) *preimageStore {
- return &preimageStore{
- disk: disk,
- preimages: make(map[common.Hash][]byte),
- }
-}
-
-// insertPreimage writes a new trie node pre-image to the memory database if it's
-// yet unknown. The method will NOT make a copy of the slice, only use if the
-// preimage will NOT be changed later on.
-func (store *preimageStore) insertPreimage(preimages map[common.Hash][]byte) {
- store.lock.Lock()
- defer store.lock.Unlock()
-
- for hash, preimage := range preimages {
- if _, ok := store.preimages[hash]; ok {
- continue
- }
- store.preimages[hash] = preimage
- store.preimagesSize += common.StorageSize(common.HashLength + len(preimage))
- }
-}
-
-// preimage retrieves a cached trie node pre-image from memory. If it cannot be
-// found cached, the method queries the persistent database for the content.
-func (store *preimageStore) preimage(hash common.Hash) []byte {
- store.lock.RLock()
- preimage := store.preimages[hash]
- store.lock.RUnlock()
-
- if preimage != nil {
- return preimage
- }
- return rawdb.ReadPreimage(store.disk, hash)
-}
-
-// commit flushes the cached preimages into the disk.
-func (store *preimageStore) commit(force bool) error {
- store.lock.Lock()
- defer store.lock.Unlock()
-
- if store.preimagesSize <= defaultPreimagesLimit && !force {
- return nil
- }
- batch := store.disk.NewBatch()
- rawdb.WritePreimages(batch, store.preimages)
- if err := batch.Write(); err != nil {
- return err
- }
- store.preimages, store.preimagesSize = make(map[common.Hash][]byte), 0
- return nil
-}
-
-// size returns the current storage size of accumulated preimages.
-func (store *preimageStore) size() common.StorageSize {
- store.lock.RLock()
- defer store.lock.RUnlock()
-
- return store.preimagesSize
-}