diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index 5303d432fb6b..44f15c322c4c 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -18,6 +18,7 @@ package t8ntool import ( "fmt" + stdmath "math" "math/big" "github.com/ethereum/go-ethereum/common" @@ -43,8 +44,9 @@ import ( ) type Prestate struct { - Env stEnv `json:"env"` - Pre types.GenesisAlloc `json:"pre"` + Env stEnv `json:"env"` + Pre types.GenesisAlloc `json:"pre"` + TreeLeaves map[common.Hash]hexutil.Bytes `json:"vkt,omitempty"` } //go:generate go run github.com/fjl/gencodec -type ExecutionResult -field-override executionResultMarshaling -out gen_execresult.go @@ -142,7 +144,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, return h } var ( - statedb = MakePreState(rawdb.NewMemoryDatabase(), pre.Pre) + isEIP4762 = chainConfig.IsVerkle(big.NewInt(int64(pre.Env.Number)), pre.Env.Timestamp) + statedb = MakePreState(rawdb.NewMemoryDatabase(), pre.Pre, isEIP4762) signer = types.MakeSigner(chainConfig, new(big.Int).SetUint64(pre.Env.Number), pre.Env.Timestamp) gaspool = new(core.GasPool) blockHash = common.Hash{0x13, 0x37} @@ -301,6 +304,10 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, // Amount is in gwei, turn into wei amount := new(big.Int).Mul(new(big.Int).SetUint64(w.Amount), big.NewInt(params.GWei)) statedb.AddBalance(w.Address, uint256.MustFromBig(amount), tracing.BalanceIncreaseWithdrawal) + + if isEIP4762 { + statedb.AccessEvents().AddAccount(w.Address, true, stdmath.MaxUint64) + } } // Gather the execution-layer triggered requests. @@ -361,8 +368,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, execRs.Requests = requests } - // Re-create statedb instance with new root upon the updated database - // for accessing latest states. + // Re-create statedb instance with new root for MPT mode statedb, err = state.New(root, statedb.Database()) if err != nil { return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not reopen state: %v", err)) @@ -371,12 +377,17 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, return statedb, execRs, body, nil } -func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB { - tdb := triedb.NewDatabase(db, &triedb.Config{Preimages: true}) +func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, isBintrie bool) *state.StateDB { + tdb := triedb.NewDatabase(db, &triedb.Config{Preimages: true, IsVerkle: isBintrie}) sdb := state.NewDatabase(tdb, nil) - statedb, err := state.New(types.EmptyRootHash, sdb) + + root := types.EmptyRootHash + if isBintrie { + root = types.EmptyBinaryHash + } + statedb, err := state.New(root, sdb) if err != nil { - panic(fmt.Errorf("failed to create initial state: %v", err)) + panic(fmt.Errorf("failed to create initial statedb: %v", err)) } for addr, a := range accounts { statedb.SetCode(addr, a.Code, tracing.CodeChangeUnspecified) @@ -387,10 +398,15 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB } } // Commit and re-open to start with a clean state. - root, err := statedb.Commit(0, false, false) + root, err = statedb.Commit(0, false, false) if err != nil { panic(fmt.Errorf("failed to commit initial state: %v", err)) } + // If bintrie mode started, check if conversion happened + if isBintrie { + return statedb + } + // For MPT mode, reopen the state with the committed root statedb, err = state.New(root, sdb) if err != nil { panic(fmt.Errorf("failed to reopen state after commit: %v", err)) @@ -398,7 +414,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB return statedb } -func rlpHash(x interface{}) (h common.Hash) { +func rlpHash(x any) (h common.Hash) { hw := sha3.NewLegacyKeccak256() rlp.Encode(hw, x) hw.Sum(h[:0]) diff --git a/cmd/evm/internal/t8ntool/flags.go b/cmd/evm/internal/t8ntool/flags.go index f2606c86d18b..a6ec33eacf04 100644 --- a/cmd/evm/internal/t8ntool/flags.go +++ b/cmd/evm/internal/t8ntool/flags.go @@ -88,6 +88,14 @@ var ( "\t - into the file ", Value: "block.json", } + OutputBTFlag = &cli.StringFlag{ + Name: "output.vkt", + Usage: "Determines where to put the `BT` of the post-state.\n" + + "\t`stdout` - into the stdout output\n" + + "\t`stderr` - into the stderr output\n" + + "\t - into the file ", + Value: "vkt.json", + } InputAllocFlag = &cli.StringFlag{ Name: "input.alloc", Usage: "`stdin` or file name of where to find the prestate alloc to use.", @@ -123,6 +131,11 @@ var ( Usage: "`stdin` or file name of where to find the transactions list in RLP form.", Value: "txs.rlp", } + // TODO(@CPerezz): rename `Name` of the file in a follow-up PR (relays on EEST -> https://github.com/ethereum/execution-spec-tests/tree/verkle/main) + InputBTFlag = &cli.StringFlag{ + Name: "input.vkt", + Usage: "`stdin` or file name of where to find the prestate BT.", + } SealCliqueFlag = &cli.StringFlag{ Name: "seal.clique", Usage: "Seal block with Clique. `stdin` or file name of where to find the Clique sealing data.", diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index e946ccddd567..af60333cbdbc 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -28,15 +28,22 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/consensus/misc/eip1559" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/tests" + "github.com/ethereum/go-ethereum/trie/bintrie" + "github.com/ethereum/go-ethereum/triedb" + "github.com/ethereum/go-ethereum/triedb/database" + "github.com/holiman/uint256" "github.com/urfave/cli/v2" ) @@ -75,10 +82,11 @@ var ( ) type input struct { - Alloc types.GenesisAlloc `json:"alloc,omitempty"` - Env *stEnv `json:"env,omitempty"` - Txs []*txWithKey `json:"txs,omitempty"` - TxRlp string `json:"txsRlp,omitempty"` + Alloc types.GenesisAlloc `json:"alloc,omitempty"` + Env *stEnv `json:"env,omitempty"` + BT map[common.Hash]hexutil.Bytes `json:"vkt,omitempty"` + Txs []*txWithKey `json:"txs,omitempty"` + TxRlp string `json:"txsRlp,omitempty"` } func Transition(ctx *cli.Context) error { @@ -90,16 +98,16 @@ func Transition(ctx *cli.Context) error { // stdin input or in files. // Check if anything needs to be read from stdin var ( - prestate Prestate - txIt txIterator // txs to apply - allocStr = ctx.String(InputAllocFlag.Name) - + prestate Prestate + txIt txIterator // txs to apply + allocStr = ctx.String(InputAllocFlag.Name) + btStr = ctx.String(InputBTFlag.Name) envStr = ctx.String(InputEnvFlag.Name) txStr = ctx.String(InputTxsFlag.Name) inputData = &input{} ) // Figure out the prestate alloc - if allocStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector { + if allocStr == stdinSelector || btStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector { decoder := json.NewDecoder(os.Stdin) if err := decoder.Decode(inputData); err != nil { return NewError(ErrorJson, fmt.Errorf("failed unmarshalling stdin: %v", err)) @@ -112,6 +120,13 @@ func Transition(ctx *cli.Context) error { } prestate.Pre = inputData.Alloc + if btStr != stdinSelector && btStr != "" { + if err := readFile(btStr, "BT", &inputData.BT); err != nil { + return err + } + } + prestate.TreeLeaves = inputData.BT + // Set the block environment if envStr != stdinSelector { var env stEnv @@ -182,9 +197,21 @@ func Transition(ctx *cli.Context) error { return err } // Dump the execution result - collector := make(Alloc) - s.DumpToCollector(collector, nil) - return dispatchOutput(ctx, baseDir, result, collector, body) + var ( + collector = make(Alloc) + btleaves map[common.Hash]hexutil.Bytes + ) + isBinary := chainConfig.IsVerkle(big.NewInt(int64(prestate.Env.Number)), prestate.Env.Timestamp) + if !isBinary { + s.DumpToCollector(collector, nil) + } else { + btleaves = make(map[common.Hash]hexutil.Bytes) + if err := s.DumpBinTrieLeaves(btleaves); err != nil { + return err + } + } + + return dispatchOutput(ctx, baseDir, result, collector, body, btleaves) } func applyLondonChecks(env *stEnv, chainConfig *params.ChainConfig) error { @@ -306,7 +333,7 @@ func saveFile(baseDir, filename string, data interface{}) error { // dispatchOutput writes the output data to either stderr or stdout, or to the specified // files -func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc, body hexutil.Bytes) error { +func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc, body hexutil.Bytes, bt map[common.Hash]hexutil.Bytes) error { stdOutObject := make(map[string]interface{}) stdErrObject := make(map[string]interface{}) dispatch := func(baseDir, fName, name string, obj interface{}) error { @@ -333,6 +360,13 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a if err := dispatch(baseDir, ctx.String(OutputBodyFlag.Name), "body", body); err != nil { return err } + // Only write bt output if we actually have binary trie leaves + if bt != nil { + if err := dispatch(baseDir, ctx.String(OutputBTFlag.Name), "vkt", bt); err != nil { + return err + } + } + if len(stdOutObject) > 0 { b, err := json.MarshalIndent(stdOutObject, "", " ") if err != nil { @@ -351,3 +385,168 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a } return nil } + +// BinKey computes the tree key given an address and an optional slot number. +func BinKey(ctx *cli.Context) error { + if ctx.Args().Len() == 0 || ctx.Args().Len() > 2 { + return errors.New("invalid number of arguments: expecting an address and an optional slot number") + } + + addr, err := hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + return fmt.Errorf("error decoding address: %w", err) + } + + if ctx.Args().Len() == 2 { + slot, err := hexutil.Decode(ctx.Args().Get(1)) + if err != nil { + return fmt.Errorf("error decoding slot: %w", err) + } + fmt.Printf("%#x\n", bintrie.GetBinaryTreeKeyStorageSlot(common.BytesToAddress(addr), slot)) + } else { + fmt.Printf("%#x\n", bintrie.GetBinaryTreeKeyBasicData(common.BytesToAddress(addr))) + } + return nil +} + +// BinKeys computes a set of tree keys given a genesis alloc. +func BinKeys(ctx *cli.Context) error { + var allocStr = ctx.String(InputAllocFlag.Name) + var alloc core.GenesisAlloc + // Figure out the prestate alloc + if allocStr == stdinSelector { + decoder := json.NewDecoder(os.Stdin) + if err := decoder.Decode(&alloc); err != nil { + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err)) + } + } + if allocStr != stdinSelector { + if err := readFile(allocStr, "alloc", &alloc); err != nil { + return err + } + } + db := triedb.NewDatabase(rawdb.NewMemoryDatabase(), triedb.VerkleDefaults) + defer db.Close() + + bt, err := genBinTrieFromAlloc(alloc, db) + if err != nil { + return fmt.Errorf("error generating bt: %w", err) + } + + collector := make(map[common.Hash]hexutil.Bytes) + it, err := bt.NodeIterator(nil) + if err != nil { + panic(err) + } + for it.Next(true) { + if it.Leaf() { + collector[common.BytesToHash(it.LeafKey())] = it.LeafBlob() + } + } + + output, err := json.MarshalIndent(collector, "", "") + if err != nil { + return fmt.Errorf("error outputting tree: %w", err) + } + + fmt.Println(string(output)) + + return nil +} + +// BinTrieRoot computes the root of a Binary Trie from a genesis alloc. +func BinTrieRoot(ctx *cli.Context) error { + var allocStr = ctx.String(InputAllocFlag.Name) + var alloc core.GenesisAlloc + if allocStr == stdinSelector { + decoder := json.NewDecoder(os.Stdin) + if err := decoder.Decode(&alloc); err != nil { + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err)) + } + } + if allocStr != stdinSelector { + if err := readFile(allocStr, "alloc", &alloc); err != nil { + return err + } + } + db := triedb.NewDatabase(rawdb.NewMemoryDatabase(), triedb.VerkleDefaults) + defer db.Close() + + bt, err := genBinTrieFromAlloc(alloc, db) + if err != nil { + return fmt.Errorf("error generating bt: %w", err) + } + fmt.Println(bt.Hash().Hex()) + + return nil +} + +// TODO(@CPerezz): Should this go to `bintrie` module? +func genBinTrieFromAlloc(alloc core.GenesisAlloc, db database.NodeDatabase) (*bintrie.BinaryTrie, error) { + bt, err := bintrie.NewBinaryTrie(types.EmptyBinaryHash, db) + if err != nil { + return nil, err + } + for addr, acc := range alloc { + for slot, value := range acc.Storage { + err := bt.UpdateStorage(addr, slot.Bytes(), value.Big().Bytes()) + if err != nil { + return nil, fmt.Errorf("error inserting storage: %w", err) + } + } + account := &types.StateAccount{ + Balance: uint256.MustFromBig(acc.Balance), + Nonce: acc.Nonce, + CodeHash: crypto.Keccak256Hash(acc.Code).Bytes(), + Root: common.Hash{}, + } + err := bt.UpdateAccount(addr, account, len(acc.Code)) + if err != nil { + return nil, fmt.Errorf("error inserting account: %w", err) + } + err = bt.UpdateContractCode(addr, common.BytesToHash(account.CodeHash), acc.Code) + if err != nil { + return nil, fmt.Errorf("error inserting code: %w", err) + } + } + return bt, nil +} + +// BinaryCodeChunkKey computes the tree key of a code-chunk for a given address. +func BinaryCodeChunkKey(ctx *cli.Context) error { + if ctx.Args().Len() == 0 || ctx.Args().Len() > 2 { + return errors.New("invalid number of arguments: expecting an address and an code-chunk number") + } + + addr, err := hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + return fmt.Errorf("error decoding address: %w", err) + } + chunkNumberBytes, err := hexutil.Decode(ctx.Args().Get(1)) + if err != nil { + return fmt.Errorf("error decoding chunk number: %w", err) + } + var chunkNumber uint256.Int + chunkNumber.SetBytes(chunkNumberBytes) + + fmt.Printf("%#x\n", bintrie.GetBinaryTreeKeyCodeChunk(common.BytesToAddress(addr), &chunkNumber)) + + return nil +} + +// BinaryCodeChunkCode returns the code chunkification for a given code. +func BinaryCodeChunkCode(ctx *cli.Context) error { + if ctx.Args().Len() == 0 || ctx.Args().Len() > 1 { + return errors.New("invalid number of arguments: expecting a bytecode") + } + + bytecode, err := hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + return fmt.Errorf("error decoding address: %w", err) + } + + chunkedCode := bintrie.ChunkifyCode(bytecode) + fmt.Printf("%#x\n", chunkedCode) + + return nil +} diff --git a/cmd/evm/main.go b/cmd/evm/main.go index bf5be9a35924..5238d5920ca5 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -146,16 +146,63 @@ var ( t8ntool.TraceEnableCallFramesFlag, t8ntool.OutputBasedir, t8ntool.OutputAllocFlag, + t8ntool.OutputBTFlag, t8ntool.OutputResultFlag, t8ntool.OutputBodyFlag, t8ntool.InputAllocFlag, t8ntool.InputEnvFlag, + t8ntool.InputBTFlag, t8ntool.InputTxsFlag, t8ntool.ForknameFlag, t8ntool.ChainIDFlag, t8ntool.RewardFlag, }, } + + verkleCommand = &cli.Command{ + Name: "verkle", + Aliases: []string{"vkt"}, + Usage: "Binary Trie helpers", + Subcommands: []*cli.Command{ + { + Name: "tree-keys", + Aliases: []string{"v"}, + Usage: "compute a set of binary trie keys, given their source addresses and optional slot numbers", + Action: t8ntool.BinKeys, + Flags: []cli.Flag{ + t8ntool.InputAllocFlag, + }, + }, + { + Name: "single-key", + Aliases: []string{"vk"}, + Usage: "compute the binary trie key given an address and optional slot number", + Action: t8ntool.BinKey, + }, + { + Name: "code-chunk-key", + Aliases: []string{"vck"}, + Usage: "compute the binary trie key given an address and chunk number", + Action: t8ntool.BinaryCodeChunkKey, + }, + { + Name: "chunkify-code", + Aliases: []string{"vcc"}, + Usage: "chunkify a given bytecode for a binary trie", + Action: t8ntool.BinaryCodeChunkCode, + }, + { + Name: "state-root", + Aliases: []string{"vsr"}, + Usage: "compute the state-root of a binary trie for the given alloc", + Action: t8ntool.BinTrieRoot, + Flags: []cli.Flag{ + t8ntool.InputAllocFlag, + }, + }, + }, + } + transactionCommand = &cli.Command{ Name: "transaction", Aliases: []string{"t9n"}, @@ -210,6 +257,7 @@ func init() { stateTransitionCommand, transactionCommand, blockBuilderCommand, + verkleCommand, } app.Before = func(ctx *cli.Context) error { flags.MigrateGlobalFlags(ctx) diff --git a/core/bintrie_witness_test.go b/core/bintrie_witness_test.go new file mode 100644 index 000000000000..7704ba41fb66 --- /dev/null +++ b/core/bintrie_witness_test.go @@ -0,0 +1,237 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "encoding/binary" + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/beacon" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/tracing" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/triedb" +) + +var ( + testVerkleChainConfig = ¶ms.ChainConfig{ + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(params.EthashConfig), + ShanghaiTime: u64(0), + VerkleTime: u64(0), + TerminalTotalDifficulty: common.Big0, + EnableVerkleAtGenesis: true, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Verkle: params.DefaultPragueBlobConfig, + }, + } +) + +func TestProcessVerkle(t *testing.T) { + var ( + code = common.FromHex(`6060604052600a8060106000396000f360606040526008565b00`) + intrinsicContractCreationGas, _ = IntrinsicGas(code, nil, nil, true, true, true, true) + // A contract creation that calls EXTCODECOPY in the constructor. Used to ensure that the witness + // will not contain that copied data. + // Source: https://gist.github.com/gballet/a23db1e1cb4ed105616b5920feb75985 + codeWithExtCodeCopy = common.FromHex(`0x60806040526040516100109061017b565b604051809103906000f08015801561002c573d6000803e3d6000fd5b506000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555034801561007857600080fd5b5060008067ffffffffffffffff8111156100955761009461024a565b5b6040519080825280601f01601f1916602001820160405280156100c75781602001600182028036833780820191505090505b50905060008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690506020600083833c81610101906101e3565b60405161010d90610187565b61011791906101a3565b604051809103906000f080158015610133573d6000803e3d6000fd5b50600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505061029b565b60d58061046783390190565b6102068061053c83390190565b61019d816101d9565b82525050565b60006020820190506101b86000830184610194565b92915050565b6000819050602082019050919050565b600081519050919050565b6000819050919050565b60006101ee826101ce565b826101f8846101be565b905061020381610279565b925060208210156102435761023e7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8360200360080261028e565b831692505b5050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600061028582516101d9565b80915050919050565b600082821b905092915050565b6101bd806102aa6000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f566852414610030575b600080fd5b61003861004e565b6040516100459190610146565b60405180910390f35b6000600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166381ca91d36040518163ffffffff1660e01b815260040160206040518083038186803b1580156100b857600080fd5b505afa1580156100cc573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906100f0919061010a565b905090565b60008151905061010481610170565b92915050565b6000602082840312156101205761011f61016b565b5b600061012e848285016100f5565b91505092915050565b61014081610161565b82525050565b600060208201905061015b6000830184610137565b92915050565b6000819050919050565b600080fd5b61017981610161565b811461018457600080fd5b5056fea2646970667358221220a6a0e11af79f176f9c421b7b12f441356b25f6489b83d38cc828a701720b41f164736f6c63430008070033608060405234801561001057600080fd5b5060b68061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063ab5ed15014602d575b600080fd5b60336047565b604051603e9190605d565b60405180910390f35b60006001905090565b6057816076565b82525050565b6000602082019050607060008301846050565b92915050565b600081905091905056fea26469706673582212203a14eb0d5cd07c277d3e24912f110ddda3e553245a99afc4eeefb2fbae5327aa64736f6c63430008070033608060405234801561001057600080fd5b5060405161020638038061020683398181016040528101906100329190610063565b60018160001c6100429190610090565b60008190555050610145565b60008151905061005d8161012e565b92915050565b60006020828403121561007957610078610129565b5b60006100878482850161004e565b91505092915050565b600061009b826100f0565b91506100a6836100f0565b9250827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff038211156100db576100da6100fa565b5b828201905092915050565b6000819050919050565b6000819050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600080fd5b610137816100e6565b811461014257600080fd5b50565b60b3806101536000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806381ca91d314602d575b600080fd5b60336047565b604051603e9190605a565b60405180910390f35b60005481565b6054816073565b82525050565b6000602082019050606d6000830184604d565b92915050565b600081905091905056fea26469706673582212209bff7098a2f526de1ad499866f27d6d0d6f17b74a413036d6063ca6a0998ca4264736f6c63430008070033`) + intrinsicCodeWithExtCodeCopyGas, _ = IntrinsicGas(codeWithExtCodeCopy, nil, nil, true, true, true, true) + signer = types.LatestSigner(testVerkleChainConfig) + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain + coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") + gspec = &Genesis{ + Config: testVerkleChainConfig, + Alloc: GenesisAlloc{ + coinbase: { + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode, Balance: common.Big0}, + params.HistoryStorageAddress: {Nonce: 1, Code: params.HistoryStorageCode, Balance: common.Big0}, + params.WithdrawalQueueAddress: {Nonce: 1, Code: params.WithdrawalQueueCode, Balance: common.Big0}, + params.ConsolidationQueueAddress: {Nonce: 1, Code: params.ConsolidationQueueCode, Balance: common.Big0}, + }, + } + ) + // Verkle trees use the snapshot, which must be enabled before the + // data is saved into the tree+database. + // genesis := gspec.MustCommit(bcdb, triedb) + options := DefaultConfig().WithStateScheme(rawdb.PathScheme) + options.SnapshotLimit = 0 + blockchain, _ := NewBlockChain(bcdb, gspec, beacon.New(ethash.NewFaker()), options) + defer blockchain.Stop() + + txCost1 := params.TxGas + txCost2 := params.TxGas + contractCreationCost := intrinsicContractCreationGas + + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* creation with value */ + 739 /* execution costs */ + codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (tx) */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (CREATE at pc=0x20) */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #0 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #1 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #2 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #3 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #4 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #5 */ + params.WitnessChunkReadCost + /* SLOAD in constructor */ + params.WitnessChunkWriteCost + /* SSTORE in constructor */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (CREATE at PC=0x121) */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #0 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #1 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #2 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #3 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #4 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #5 */ + params.WitnessChunkReadCost + /* SLOAD in constructor */ + params.WitnessChunkWriteCost + /* SSTORE in constructor */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash for tx creation */ + 15*(params.WitnessChunkReadCost+params.WitnessChunkWriteCost) + /* code chunks #0..#14 */ + uint64(4844) /* execution costs */ + blockGasUsagesExpected := []uint64{ + txCost1*2 + txCost2, + txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas, + } + _, chain, _ := GenerateChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 2, func(i int, gen *BlockGen) { + gen.SetPoS() + + // TODO need to check that the tx cost provided is the exact amount used (no remaining left-over) + tx, _ := types.SignTx(types.NewTransaction(uint64(i)*3, common.Address{byte(i), 2, 3}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+1, common.Address{}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+2, common.Address{}, big.NewInt(0), txCost2, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + + // Add two contract creations in block #2 + if i == 1 { + tx, _ = types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 6, + Value: big.NewInt(16), + Gas: 3000000, + GasPrice: big.NewInt(875000000), + Data: code, + }) + gen.AddTx(tx) + + tx, _ = types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 7, + Value: big.NewInt(0), + Gas: 3000000, + GasPrice: big.NewInt(875000000), + Data: codeWithExtCodeCopy, + }) + gen.AddTx(tx) + } + }) + + for i, b := range chain { + fmt.Printf("%d %x\n", i, b.Root()) + } + endnum, err := blockchain.InsertChain(chain) + if err != nil { + t.Fatalf("block %d imported with error: %v", endnum, err) + } + + for i := range 2 { + b := blockchain.GetBlockByNumber(uint64(i) + 1) + if b == nil { + t.Fatalf("expected block %d to be present in chain", i+1) + } + if b.Hash() != chain[i].Hash() { + t.Fatalf("block #%d not found at expected height", b.NumberU64()) + } + if b.GasUsed() != blockGasUsagesExpected[i] { + t.Fatalf("expected block #%d txs to use %d, got %d\n", b.NumberU64(), blockGasUsagesExpected[i], b.GasUsed()) + } + } +} + +func TestProcessParentBlockHash(t *testing.T) { + // This test uses blocks where, + // block 1 parent hash is 0x0100.... + // block 2 parent hash is 0x0200.... + // etc + checkBlockHashes := func(statedb *state.StateDB, isVerkle bool) { + statedb.SetNonce(params.HistoryStorageAddress, 1, tracing.NonceChangeUnspecified) + statedb.SetCode(params.HistoryStorageAddress, params.HistoryStorageCode, tracing.CodeChangeUnspecified) + // Process n blocks, from 1 .. num + var num = 2 + for i := 1; i <= num; i++ { + header := &types.Header{ParentHash: common.Hash{byte(i)}, Number: big.NewInt(int64(i)), Difficulty: new(big.Int)} + chainConfig := params.MergedTestChainConfig + if isVerkle { + chainConfig = testVerkleChainConfig + } + vmContext := NewEVMBlockContext(header, nil, new(common.Address)) + evm := vm.NewEVM(vmContext, statedb, chainConfig, vm.Config{}) + ProcessParentBlockHash(header.ParentHash, evm) + } + // Read block hashes for block 0 .. num-1 + for i := 0; i < num; i++ { + have, want := getContractStoredBlockHash(statedb, uint64(i), isVerkle), common.Hash{byte(i + 1)} + if have != want { + t.Errorf("block %d, verkle=%v, have parent hash %v, want %v", i, isVerkle, have, want) + } + } + } + t.Run("MPT", func(t *testing.T) { + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) + checkBlockHashes(statedb, false) + }) + t.Run("Verkle", func(t *testing.T) { + db := rawdb.NewMemoryDatabase() + cacheConfig := DefaultConfig().WithStateScheme(rawdb.PathScheme) + cacheConfig.SnapshotLimit = 0 + triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(true)) + statedb, _ := state.New(types.EmptyVerkleHash, state.NewDatabase(triedb, nil)) + checkBlockHashes(statedb, true) + }) +} + +// getContractStoredBlockHash is a utility method which reads the stored parent blockhash for block 'number' +func getContractStoredBlockHash(statedb *state.StateDB, number uint64, isVerkle bool) common.Hash { + ringIndex := number % params.HistoryServeWindow + var key common.Hash + binary.BigEndian.PutUint64(key[24:], ringIndex) + if isVerkle { + return statedb.GetState(params.HistoryStorageAddress, key) + } + return statedb.GetState(params.HistoryStorageAddress, key) +} diff --git a/core/chain_makers.go b/core/chain_makers.go index af55716cca3e..a1e07becba88 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -32,7 +32,6 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/triedb" - "github.com/ethereum/go-verkle" "github.com/holiman/uint256" ) @@ -427,7 +426,11 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse } // Forcibly use hash-based state scheme for retaining all nodes in disk. - triedb := triedb.NewDatabase(db, triedb.HashDefaults) + var triedbConfig *triedb.Config = triedb.HashDefaults + if config.IsVerkle(config.ChainID, 0) { + triedbConfig = triedb.VerkleDefaults + } + triedb := triedb.NewDatabase(db, triedbConfig) defer triedb.Close() for i := 0; i < n; i++ { @@ -472,7 +475,11 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse // then generate chain on top. func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts) { db := rawdb.NewMemoryDatabase() - triedb := triedb.NewDatabase(db, triedb.HashDefaults) + var triedbConfig *triedb.Config = triedb.HashDefaults + if genesis.Config != nil && genesis.Config.IsVerkle(genesis.Config.ChainID, 0) { + triedbConfig = triedb.VerkleDefaults + } + triedb := triedb.NewDatabase(db, triedbConfig) defer triedb.Close() _, err := genesis.Commit(db, triedb) if err != nil { @@ -482,117 +489,6 @@ func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, return db, blocks, receipts } -func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.Database, trdb *triedb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts, []*verkle.VerkleProof, []verkle.StateDiff) { - if config == nil { - config = params.TestChainConfig - } - proofs := make([]*verkle.VerkleProof, 0, n) - keyvals := make([]verkle.StateDiff, 0, n) - cm := newChainMaker(parent, config, engine) - - genblock := func(i int, parent *types.Block, triedb *triedb.Database, statedb *state.StateDB) (*types.Block, types.Receipts) { - b := &BlockGen{i: i, cm: cm, parent: parent, statedb: statedb, engine: engine} - b.header = cm.makeHeader(parent, statedb, b.engine) - - // TODO uncomment when proof generation is merged - // Save pre state for proof generation - // preState := statedb.Copy() - - // EIP-2935 / 7709 - blockContext := NewEVMBlockContext(b.header, cm, &b.header.Coinbase) - blockContext.Random = &common.Hash{} // enable post-merge instruction set - evm := vm.NewEVM(blockContext, statedb, cm.config, vm.Config{}) - ProcessParentBlockHash(b.header.ParentHash, evm) - - // Execute any user modifications to the block. - if gen != nil { - gen(i, b) - } - - requests := b.collectRequests(false) - if requests != nil { - reqHash := types.CalcRequestsHash(requests) - b.header.RequestsHash = &reqHash - } - - body := &types.Body{ - Transactions: b.txs, - Uncles: b.uncles, - Withdrawals: b.withdrawals, - } - block, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, body, b.receipts) - if err != nil { - panic(err) - } - - // Write state changes to DB. - root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), config.IsCancun(b.header.Number, b.header.Time)) - if err != nil { - panic(fmt.Sprintf("state write error: %v", err)) - } - if err = triedb.Commit(root, false); err != nil { - panic(fmt.Sprintf("trie write error: %v", err)) - } - - proofs = append(proofs, block.ExecutionWitness().VerkleProof) - keyvals = append(keyvals, block.ExecutionWitness().StateDiff) - - return block, b.receipts - } - - sdb := state.NewDatabase(trdb, nil) - - for i := 0; i < n; i++ { - statedb, err := state.New(parent.Root(), sdb) - if err != nil { - panic(err) - } - block, receipts := genblock(i, parent, trdb, statedb) - - // Post-process the receipts. - // Here we assign the final block hash and other info into the receipt. - // In order for DeriveFields to work, the transaction and receipt lists need to be - // of equal length. If AddUncheckedTx or AddUncheckedReceipt are used, there will be - // extra ones, so we just trim the lists here. - receiptsCount := len(receipts) - txs := block.Transactions() - if len(receipts) > len(txs) { - receipts = receipts[:len(txs)] - } else if len(receipts) < len(txs) { - txs = txs[:len(receipts)] - } - var blobGasPrice *big.Int - if block.ExcessBlobGas() != nil { - blobGasPrice = eip4844.CalcBlobFee(cm.config, block.Header()) - } - if err := receipts.DeriveFields(config, block.Hash(), block.NumberU64(), block.Time(), block.BaseFee(), blobGasPrice, txs); err != nil { - panic(err) - } - - // Re-expand to ensure all receipts are returned. - receipts = receipts[:receiptsCount] - - // Advance the chain. - cm.add(block, receipts) - parent = block - } - return cm.chain, cm.receipts, proofs, keyvals -} - -func GenerateVerkleChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (common.Hash, ethdb.Database, []*types.Block, []types.Receipts, []*verkle.VerkleProof, []verkle.StateDiff) { - db := rawdb.NewMemoryDatabase() - cacheConfig := DefaultConfig().WithStateScheme(rawdb.PathScheme) - cacheConfig.SnapshotLimit = 0 - triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(true)) - defer triedb.Close() - genesisBlock, err := genesis.Commit(db, triedb) - if err != nil { - panic(err) - } - blocks, receipts, proofs, keyvals := GenerateVerkleChain(genesis.Config, genesisBlock, engine, db, triedb, n, gen) - return genesisBlock.Hash(), db, blocks, receipts, proofs, keyvals -} - func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header { time := parent.Time() + 10 // block time is fixed at 10 seconds parentHeader := parent.Header() diff --git a/core/genesis.go b/core/genesis.go index d0d490874d07..7d640c8caec1 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -190,7 +190,7 @@ func flushAlloc(ga *types.GenesisAlloc, triedb *triedb.Database) (common.Hash, e return common.Hash{}, err } // Commit newly generated states into disk if it's not empty. - if root != types.EmptyRootHash { + if root != emptyRoot { if err := triedb.Commit(root, true); err != nil { return common.Hash{}, err } diff --git a/core/genesis_test.go b/core/genesis_test.go index a41dfce5783e..1ed475695dfc 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -308,7 +308,7 @@ func TestVerkleGenesisCommit(t *testing.T) { }, } - expected := common.FromHex("018d20eebb130b5e2b796465fe36aafab650650729a92435aec071bf2386f080") + expected := common.FromHex("19056b480530799a4fdaa9fd9407043b965a3a5c37b4d2a1a9a4f3395a327561") got := genesis.ToBlock().Root().Bytes() if !bytes.Equal(got, expected) { t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got) diff --git a/core/state/database.go b/core/state/database.go index 58d0ccfe8292..ae177d964f9a 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -28,6 +28,8 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/bintrie" + "github.com/ethereum/go-ethereum/trie/transitiontrie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/utils" "github.com/ethereum/go-ethereum/triedb" @@ -239,10 +241,12 @@ func (db *CachingDB) OpenTrie(root common.Hash) (Trie, error) { if db.triedb.IsVerkle() { ts := overlay.LoadTransitionState(db.TrieDB().Disk(), root, db.triedb.IsVerkle()) if ts.InTransition() { - panic("transition isn't supported yet") + panic("state tree transition isn't supported yet") } if ts.Transitioned() { - return trie.NewVerkleTrie(root, db.triedb, db.pointCache) + // Use BinaryTrie instead of VerkleTrie when IsVerkle is set + // (IsVerkle actually means Binary Trie mode in this codebase) + return bintrie.NewBinaryTrie(root, db.triedb) } } tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb) @@ -302,7 +306,7 @@ func mustCopyTrie(t Trie) Trie { return t.Copy() case *trie.VerkleTrie: return t.Copy() - case *trie.TransitionTrie: + case *transitiontrie.TransitionTrie: return t.Copy() default: panic(fmt.Errorf("unknown trie type %T", t)) diff --git a/core/state/dump.go b/core/state/dump.go index a4abc33733f1..829d106ed3ac 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -18,6 +18,7 @@ package state import ( "encoding/json" + "errors" "fmt" "time" @@ -27,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/bintrie" ) // DumpConfig is a set of options to control what portions of the state will be @@ -221,6 +223,28 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey [] return nextKey } +// DumpBinTrieLeaves collects all binary trie leaf nodes into the provided map. +func (s *StateDB) DumpBinTrieLeaves(collector map[common.Hash]hexutil.Bytes) error { + tr, err := s.db.OpenTrie(s.originalRoot) + if err != nil { + return err + } + btr, ok := tr.(*bintrie.BinaryTrie) + if !ok { + return errors.New("trie is not a binary trie") + } + it, err := btr.NodeIterator(nil) + if err != nil { + return err + } + for it.Next(true) { + if it.Leaf() { + collector[common.BytesToHash(it.LeafKey())] = it.LeafBlob() + } + } + return nil +} + // RawDump returns the state. If the processing is aborted e.g. due to options // reaching Max, the `Next` key is set on the returned Dump. func (s *StateDB) RawDump(opts *DumpConfig) Dump { diff --git a/core/state/reader.go b/core/state/reader.go index 3e8b31b6be3d..93083c8ae293 100644 --- a/core/state/reader.go +++ b/core/state/reader.go @@ -30,6 +30,8 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/bintrie" + "github.com/ethereum/go-ethereum/trie/transitiontrie" "github.com/ethereum/go-ethereum/trie/utils" "github.com/ethereum/go-ethereum/triedb" "github.com/ethereum/go-ethereum/triedb/database" @@ -242,7 +244,11 @@ func newTrieReader(root common.Hash, db *triedb.Database, cache *utils.PointCach if !db.IsVerkle() { tr, err = trie.NewStateTrie(trie.StateTrieID(root), db) } else { - tr, err = trie.NewVerkleTrie(root, db, cache) + // When IsVerkle() is true, create a BinaryTrie wrapped in TransitionTrie + binTrie, binErr := bintrie.NewBinaryTrie(root, db) + if binErr != nil { + return nil, binErr + } // Based on the transition status, determine if the overlay // tree needs to be created, or if a single, target tree is @@ -253,7 +259,22 @@ func newTrieReader(root common.Hash, db *triedb.Database, cache *utils.PointCach if err != nil { return nil, err } - tr = trie.NewTransitionTrie(mpt, tr.(*trie.VerkleTrie), false) + tr = transitiontrie.NewTransitionTrie(mpt, binTrie, false) + } else { + // HACK: Use TransitionTrie with nil base as a wrapper to make BinaryTrie + // satisfy the Trie interface. This works around the import cycle between + // trie and trie/bintrie packages. + // + // TODO: In future PRs, refactor the package structure to avoid this hack: + // - Option 1: Move common interfaces (Trie, NodeIterator) to a separate + // package that both trie and trie/bintrie can import + // - Option 2: Create a factory function in the trie package that returns + // BinaryTrie as a Trie interface without direct import + // - Option 3: Move BinaryTrie to the main trie package + // + // The current approach works but adds unnecessary overhead and complexity + // by using TransitionTrie when there's no actual transition happening. + tr = transitiontrie.NewTransitionTrie(nil, binTrie, false) } } if err != nil { diff --git a/core/state/state_object.go b/core/state/state_object.go index fdeb4254c1bc..8f2f323327dd 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/transitiontrie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/holiman/uint256" ) @@ -501,7 +502,7 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject { // Verkle uses only one tree, and the copy has already been // made in mustCopyTrie. obj.trie = db.trie - case *trie.TransitionTrie: + case *transitiontrie.TransitionTrie: // Same thing for the transition tree, since the MPT is // read-only. obj.trie = db.trie diff --git a/core/verkle_witness_test.go b/core/verkle_witness_test.go deleted file mode 100644 index 9495e325ca9d..000000000000 --- a/core/verkle_witness_test.go +++ /dev/null @@ -1,1107 +0,0 @@ -// Copyright 2024 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "bytes" - "encoding/binary" - "encoding/hex" - "fmt" - "math/big" - "slices" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/beacon" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/tracing" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie/utils" - "github.com/ethereum/go-ethereum/triedb" - "github.com/ethereum/go-verkle" - "github.com/holiman/uint256" -) - -var ( - testVerkleChainConfig = ¶ms.ChainConfig{ - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - BerlinBlock: big.NewInt(0), - LondonBlock: big.NewInt(0), - Ethash: new(params.EthashConfig), - ShanghaiTime: u64(0), - VerkleTime: u64(0), - TerminalTotalDifficulty: common.Big0, - EnableVerkleAtGenesis: true, - BlobScheduleConfig: ¶ms.BlobScheduleConfig{ - Verkle: params.DefaultPragueBlobConfig, - }, - // TODO uncomment when proof generation is merged - // ProofInBlocks: true, - } - testKaustinenLikeChainConfig = ¶ms.ChainConfig{ - ChainID: big.NewInt(69420), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - BerlinBlock: big.NewInt(0), - LondonBlock: big.NewInt(0), - Ethash: new(params.EthashConfig), - ShanghaiTime: u64(0), - VerkleTime: u64(0), - TerminalTotalDifficulty: common.Big0, - EnableVerkleAtGenesis: true, - BlobScheduleConfig: ¶ms.BlobScheduleConfig{ - Verkle: params.DefaultPragueBlobConfig, - }, - } -) - -func TestProcessVerkle(t *testing.T) { - var ( - code = common.FromHex(`6060604052600a8060106000396000f360606040526008565b00`) - intrinsicContractCreationGas, _ = IntrinsicGas(code, nil, nil, true, true, true, true) - // A contract creation that calls EXTCODECOPY in the constructor. Used to ensure that the witness - // will not contain that copied data. - // Source: https://gist.github.com/gballet/a23db1e1cb4ed105616b5920feb75985 - codeWithExtCodeCopy = common.FromHex(`0x60806040526040516100109061017b565b604051809103906000f08015801561002c573d6000803e3d6000fd5b506000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555034801561007857600080fd5b5060008067ffffffffffffffff8111156100955761009461024a565b5b6040519080825280601f01601f1916602001820160405280156100c75781602001600182028036833780820191505090505b50905060008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690506020600083833c81610101906101e3565b60405161010d90610187565b61011791906101a3565b604051809103906000f080158015610133573d6000803e3d6000fd5b50600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505061029b565b60d58061046783390190565b6102068061053c83390190565b61019d816101d9565b82525050565b60006020820190506101b86000830184610194565b92915050565b6000819050602082019050919050565b600081519050919050565b6000819050919050565b60006101ee826101ce565b826101f8846101be565b905061020381610279565b925060208210156102435761023e7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8360200360080261028e565b831692505b5050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600061028582516101d9565b80915050919050565b600082821b905092915050565b6101bd806102aa6000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f566852414610030575b600080fd5b61003861004e565b6040516100459190610146565b60405180910390f35b6000600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166381ca91d36040518163ffffffff1660e01b815260040160206040518083038186803b1580156100b857600080fd5b505afa1580156100cc573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906100f0919061010a565b905090565b60008151905061010481610170565b92915050565b6000602082840312156101205761011f61016b565b5b600061012e848285016100f5565b91505092915050565b61014081610161565b82525050565b600060208201905061015b6000830184610137565b92915050565b6000819050919050565b600080fd5b61017981610161565b811461018457600080fd5b5056fea2646970667358221220a6a0e11af79f176f9c421b7b12f441356b25f6489b83d38cc828a701720b41f164736f6c63430008070033608060405234801561001057600080fd5b5060b68061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063ab5ed15014602d575b600080fd5b60336047565b604051603e9190605d565b60405180910390f35b60006001905090565b6057816076565b82525050565b6000602082019050607060008301846050565b92915050565b600081905091905056fea26469706673582212203a14eb0d5cd07c277d3e24912f110ddda3e553245a99afc4eeefb2fbae5327aa64736f6c63430008070033608060405234801561001057600080fd5b5060405161020638038061020683398181016040528101906100329190610063565b60018160001c6100429190610090565b60008190555050610145565b60008151905061005d8161012e565b92915050565b60006020828403121561007957610078610129565b5b60006100878482850161004e565b91505092915050565b600061009b826100f0565b91506100a6836100f0565b9250827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff038211156100db576100da6100fa565b5b828201905092915050565b6000819050919050565b6000819050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600080fd5b610137816100e6565b811461014257600080fd5b50565b60b3806101536000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806381ca91d314602d575b600080fd5b60336047565b604051603e9190605a565b60405180910390f35b60005481565b6054816073565b82525050565b6000602082019050606d6000830184604d565b92915050565b600081905091905056fea26469706673582212209bff7098a2f526de1ad499866f27d6d0d6f17b74a413036d6063ca6a0998ca4264736f6c63430008070033`) - intrinsicCodeWithExtCodeCopyGas, _ = IntrinsicGas(codeWithExtCodeCopy, nil, nil, true, true, true, true) - signer = types.LatestSigner(testVerkleChainConfig) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain - coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") - gspec = &Genesis{ - Config: testVerkleChainConfig, - Alloc: GenesisAlloc{ - coinbase: { - Balance: big.NewInt(1000000000000000000), // 1 ether - Nonce: 0, - }, - params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode, Balance: common.Big0}, - params.HistoryStorageAddress: {Nonce: 1, Code: params.HistoryStorageCode, Balance: common.Big0}, - params.WithdrawalQueueAddress: {Nonce: 1, Code: params.WithdrawalQueueCode, Balance: common.Big0}, - params.ConsolidationQueueAddress: {Nonce: 1, Code: params.ConsolidationQueueCode, Balance: common.Big0}, - }, - } - ) - // Verkle trees use the snapshot, which must be enabled before the - // data is saved into the tree+database. - // genesis := gspec.MustCommit(bcdb, triedb) - options := DefaultConfig().WithStateScheme(rawdb.PathScheme) - options.SnapshotLimit = 0 - blockchain, _ := NewBlockChain(bcdb, gspec, beacon.New(ethash.NewFaker()), options) - defer blockchain.Stop() - - txCost1 := params.TxGas - txCost2 := params.TxGas - contractCreationCost := intrinsicContractCreationGas + - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* creation with value */ - 739 /* execution costs */ - codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (tx) */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (CREATE at pc=0x20) */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #0 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #1 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #2 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #3 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #4 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #5 */ - params.WitnessChunkReadCost + /* SLOAD in constructor */ - params.WitnessChunkWriteCost + /* SSTORE in constructor */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (CREATE at PC=0x121) */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #0 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #1 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #2 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #3 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #4 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #5 */ - params.WitnessChunkReadCost + /* SLOAD in constructor */ - params.WitnessChunkWriteCost + /* SSTORE in constructor */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash for tx creation */ - 15*(params.WitnessChunkReadCost+params.WitnessChunkWriteCost) + /* code chunks #0..#14 */ - uint64(4844) /* execution costs */ - blockGasUsagesExpected := []uint64{ - txCost1*2 + txCost2, - txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas, - } - _, _, chain, _, proofs, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 2, func(i int, gen *BlockGen) { - gen.SetPoS() - - // TODO need to check that the tx cost provided is the exact amount used (no remaining left-over) - tx, _ := types.SignTx(types.NewTransaction(uint64(i)*3, common.Address{byte(i), 2, 3}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey) - gen.AddTx(tx) - tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+1, common.Address{}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey) - gen.AddTx(tx) - tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+2, common.Address{}, big.NewInt(0), txCost2, big.NewInt(875000000), nil), signer, testKey) - gen.AddTx(tx) - - // Add two contract creations in block #2 - if i == 1 { - tx, _ = types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 6, - Value: big.NewInt(16), - Gas: 3000000, - GasPrice: big.NewInt(875000000), - Data: code, - }) - gen.AddTx(tx) - - tx, _ = types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 7, - Value: big.NewInt(0), - Gas: 3000000, - GasPrice: big.NewInt(875000000), - Data: codeWithExtCodeCopy, - }) - gen.AddTx(tx) - } - }) - - // Check proof for both blocks - err := verkle.Verify(proofs[0], gspec.ToBlock().Root().Bytes(), chain[0].Root().Bytes(), statediffs[0]) - if err != nil { - t.Fatal(err) - } - err = verkle.Verify(proofs[1], chain[0].Root().Bytes(), chain[1].Root().Bytes(), statediffs[1]) - if err != nil { - t.Fatal(err) - } - - t.Log("verified verkle proof, inserting blocks into the chain") - - for i, b := range chain { - fmt.Printf("%d %x\n", i, b.Root()) - } - endnum, err := blockchain.InsertChain(chain) - if err != nil { - t.Fatalf("block %d imported with error: %v", endnum, err) - } - - for i := range 2 { - b := blockchain.GetBlockByNumber(uint64(i) + 1) - if b == nil { - t.Fatalf("expected block %d to be present in chain", i+1) - } - if b.Hash() != chain[i].Hash() { - t.Fatalf("block #%d not found at expected height", b.NumberU64()) - } - if b.GasUsed() != blockGasUsagesExpected[i] { - t.Fatalf("expected block #%d txs to use %d, got %d\n", b.NumberU64(), blockGasUsagesExpected[i], b.GasUsed()) - } - } -} - -func TestProcessParentBlockHash(t *testing.T) { - // This test uses blocks where, - // block 1 parent hash is 0x0100.... - // block 2 parent hash is 0x0200.... - // etc - checkBlockHashes := func(statedb *state.StateDB, isVerkle bool) { - statedb.SetNonce(params.HistoryStorageAddress, 1, tracing.NonceChangeUnspecified) - statedb.SetCode(params.HistoryStorageAddress, params.HistoryStorageCode, tracing.CodeChangeUnspecified) - // Process n blocks, from 1 .. num - var num = 2 - for i := 1; i <= num; i++ { - header := &types.Header{ParentHash: common.Hash{byte(i)}, Number: big.NewInt(int64(i)), Difficulty: new(big.Int)} - chainConfig := params.MergedTestChainConfig - if isVerkle { - chainConfig = testVerkleChainConfig - } - vmContext := NewEVMBlockContext(header, nil, new(common.Address)) - evm := vm.NewEVM(vmContext, statedb, chainConfig, vm.Config{}) - ProcessParentBlockHash(header.ParentHash, evm) - } - // Read block hashes for block 0 .. num-1 - for i := 0; i < num; i++ { - have, want := getContractStoredBlockHash(statedb, uint64(i), isVerkle), common.Hash{byte(i + 1)} - if have != want { - t.Errorf("block %d, verkle=%v, have parent hash %v, want %v", i, isVerkle, have, want) - } - } - } - t.Run("MPT", func(t *testing.T) { - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) - checkBlockHashes(statedb, false) - }) - t.Run("Verkle", func(t *testing.T) { - db := rawdb.NewMemoryDatabase() - cacheConfig := DefaultConfig().WithStateScheme(rawdb.PathScheme) - cacheConfig.SnapshotLimit = 0 - triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(true)) - statedb, _ := state.New(types.EmptyVerkleHash, state.NewDatabase(triedb, nil)) - checkBlockHashes(statedb, true) - }) -} - -// getContractStoredBlockHash is a utility method which reads the stored parent blockhash for block 'number' -func getContractStoredBlockHash(statedb *state.StateDB, number uint64, isVerkle bool) common.Hash { - ringIndex := number % params.HistoryServeWindow - var key common.Hash - binary.BigEndian.PutUint64(key[24:], ringIndex) - if isVerkle { - return statedb.GetState(params.HistoryStorageAddress, key) - } - return statedb.GetState(params.HistoryStorageAddress, key) -} - -// TestProcessVerkleInvalidContractCreation checks for several modes of contract creation failures -func TestProcessVerkleInvalidContractCreation(t *testing.T) { - var ( - account1 = common.HexToAddress("0x687704DB07e902e9A8B3754031D168D46E3D586e") - account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") - gspec = verkleTestGenesis(testKaustinenLikeChainConfig) - ) - // slightly modify it to suit the live txs from the testnet - gspec.Alloc[account2] = types.Account{ - Balance: big.NewInt(1000000000000000000), // 1 ether - Nonce: 1, - } - - // Create two blocks that reproduce what is happening on kaustinen. - // - The first block contains two failing contract creation transactions, that - // write to storage before they revert. - // - // - The second block contains a single failing contract creation transaction, - // that fails right off the bat. - genesisH, _, chain, _, _, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 2, func(i int, gen *BlockGen) { - gen.SetPoS() - - if i == 0 { - for _, rlpData := range []string{ - // SSTORE at slot 41 and reverts - "f8d48084479c2c18830186a08080b8806000602955bda3f9600060ca55600060695523b360006039551983576000601255b0620c2fde2c592ac2600060bc55e0ac6000606455a63e22600060e655eb607e605c5360a2605d5360c7605e53601d605f5360eb606053606b606153608e60625360816063536079606453601e60655360fc60665360b7606753608b60685383021e7ca0cc20c65a97d2e526b8ec0f4266e8b01bdcde43b9aeb59d8bfb44e8eb8119c109a07a8e751813ae1b2ce734960dbc39a4f954917d7822a2c5d1dca18b06c584131f", - // SSTORE at slot 133 and reverts - "02f8db83010f2c01843b9aca0084479c2c18830186a08080b88060006085553fad6000600a55600060565555600060b55506600060cf557f1b8b38183e7bd1bdfaa7123c5a4976e54cce0e42049d841411978fd3595e25c66019527f0538943712953cf08900aae40222a40b2d5a4ac8075ad8cf0870e2be307edbb96039527f9f3174ff85024747041ae7a611acffb987c513c088d90ab288aec080a0cd6ac65ce2cb0a912371f6b5a551ba8caffc22ec55ad4d3cb53de41d05eb77b6a02e0dfe8513dfa6ec7bfd7eda6f5c0dac21b39b982436045e128cec46cfd3f960", - // this one is a simple transfer that succeeds, necessary to get the correct nonce in the other block. - "f8e80184479c2c18830186a094bbbbde4ca27f83fc18aa108170547ff57675936a80b8807ff71f7c15faadb969a76a5f54a81a0117e1e743cb7f24e378eda28442ea4c6eb6604a527fb5409e5718d44e23bfffac926e5ea726067f772772e7e19446acba0c853f62f5606a526020608a536088608b536039608c536004608d5360af608e537f7f7675d9f210e0a61564e6d11e7cd75f5bc9009ac9f6b94a0fc63035441a83021e7ba04a4a172d81ebb02847829b76a387ac09749c8b65668083699abe20c887fb9efca07c5b1a990702ec7b31a5e8e3935cd9a77649f8c25a84131229e24ab61aec6093", - } { - var tx = new(types.Transaction) - if err := tx.UnmarshalBinary(common.Hex2Bytes(rlpData)); err != nil { - t.Fatal(err) - } - gen.AddTx(tx) - } - } else { - var tx = new(types.Transaction) - // immediately reverts - if err := tx.UnmarshalBinary(common.Hex2Bytes("01f8d683010f2c028443ad7d0e830186a08080b880b00e7fa3c849dce891cce5fae8a4c46cbb313d6aec0c0ffe7863e05fb7b22d4807674c6055527ffbfcb0938f3e18f7937aa8fa95d880afebd5c4cec0d85186095832d03c85cf8a60755260ab60955360cf6096536066609753606e60985360fa609953609e609a53608e609b536024609c5360f6609d536072609e5360a4609fc080a08fc6f7101f292ff1fb0de8ac69c2d320fbb23bfe61cf327173786ea5daee6e37a044c42d91838ef06646294bf4f9835588aee66243b16a66a2da37641fae4c045f")); err != nil { - t.Fatal(err) - } - gen.AddTx(tx) - } - }) - - tx1ContractAddress := crypto.CreateAddress(account1, 0) - tx1ContractStem := utils.GetTreeKey(tx1ContractAddress[:], uint256.NewInt(0), 105) - tx1ContractStem = tx1ContractStem[:31] - - tx2ContractAddress := crypto.CreateAddress(account2, 1) - tx2SlotKey := [32]byte{} - tx2SlotKey[31] = 133 - tx2ContractStem := utils.StorageSlotKey(tx2ContractAddress[:], tx2SlotKey[:]) - tx2ContractStem = tx2ContractStem[:31] - - eip2935Stem := utils.GetTreeKey(params.HistoryStorageAddress[:], uint256.NewInt(0), 0) - eip2935Stem = eip2935Stem[:31] - - // Check that the witness contains what we expect: a storage entry for each of the two contract - // creations that failed: one at 133 for the 2nd tx, and one at 105 for the first tx. - for _, stemStateDiff := range statediffs[0] { - // Check that the slot number 133, which is overflowing the account header, - // is present. Note that the offset of the 2nd group (first group after the - // header) is skipping the first 64 values, hence we still have an offset - // of 133, and not 133 - 64. - if bytes.Equal(stemStateDiff.Stem[:], tx2ContractStem[:]) { - for _, suffixDiff := range stemStateDiff.SuffixDiffs { - if suffixDiff.Suffix != 133 { - t.Fatalf("invalid suffix diff found for %x in block #1: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) - } - if suffixDiff.CurrentValue != nil { - t.Fatalf("invalid prestate value found for %x in block #1: %v != nil\n", stemStateDiff.Stem, suffixDiff.CurrentValue) - } - if suffixDiff.NewValue != nil { - t.Fatalf("invalid poststate value found for %x in block #1: %v != nil\n", stemStateDiff.Stem, suffixDiff.NewValue) - } - } - } else if bytes.Equal(stemStateDiff.Stem[:], tx1ContractStem) { - // For this contract creation, check that only the account header and storage slot 41 - // are found in the witness. - for _, suffixDiff := range stemStateDiff.SuffixDiffs { - if suffixDiff.Suffix != 105 && suffixDiff.Suffix != 0 && suffixDiff.Suffix != 1 { - t.Fatalf("invalid suffix diff found for %x in block #1: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) - } - } - } else if bytes.Equal(stemStateDiff.Stem[:], eip2935Stem) { - // Check the eip 2935 group of leaves. - // Check that only one leaf was accessed, and is present in the witness. - if len(stemStateDiff.SuffixDiffs) > 1 { - t.Fatalf("invalid suffix diff count found for BLOCKHASH contract: %d != 1", len(stemStateDiff.SuffixDiffs)) - } - // Check that this leaf is the first storage slot - if stemStateDiff.SuffixDiffs[0].Suffix != 64 { - t.Fatalf("invalid suffix diff value found for BLOCKHASH contract: %d != 64", stemStateDiff.SuffixDiffs[0].Suffix) - } - // check that the prestate value is nil and that the poststate value isn't. - if stemStateDiff.SuffixDiffs[0].CurrentValue != nil { - t.Fatalf("non-nil current value in BLOCKHASH contract insert: %x", stemStateDiff.SuffixDiffs[0].CurrentValue) - } - if stemStateDiff.SuffixDiffs[0].NewValue == nil { - t.Fatalf("nil new value in BLOCKHASH contract insert") - } - if *stemStateDiff.SuffixDiffs[0].NewValue != genesisH { - t.Fatalf("invalid BLOCKHASH value: %x != %x", *stemStateDiff.SuffixDiffs[0].NewValue, genesisH) - } - } else { - // For all other entries present in the witness, check that nothing beyond - // the account header was accessed. - for _, suffixDiff := range stemStateDiff.SuffixDiffs { - if suffixDiff.Suffix > 2 { - t.Fatalf("invalid suffix diff found for %x in block #1: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) - } - } - } - } - - // Check that no account has a value above 4 in the 2nd block as no storage nor - // code should make it to the witness. - for _, stemStateDiff := range statediffs[1] { - for _, suffixDiff := range stemStateDiff.SuffixDiffs { - if bytes.Equal(stemStateDiff.Stem[:], eip2935Stem) { - // BLOCKHASH contract stem - if len(stemStateDiff.SuffixDiffs) > 1 { - t.Fatalf("invalid suffix diff count found for BLOCKHASH contract at block #2: %d != 1", len(stemStateDiff.SuffixDiffs)) - } - if stemStateDiff.SuffixDiffs[0].Suffix != 65 { - t.Fatalf("invalid suffix diff value found for BLOCKHASH contract at block #2: %d != 65", stemStateDiff.SuffixDiffs[0].Suffix) - } - if stemStateDiff.SuffixDiffs[0].NewValue == nil { - t.Fatalf("missing post state value for BLOCKHASH contract at block #2") - } - if *stemStateDiff.SuffixDiffs[0].NewValue != chain[0].Hash() { - t.Fatalf("invalid post state value for BLOCKHASH contract at block #2: %x != %x", chain[0].Hash(), (*stemStateDiff.SuffixDiffs[0].NewValue)[:]) - } - } else if suffixDiff.Suffix > 4 { - t.Fatalf("invalid suffix diff found for %x in block #2: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) - } - } - } -} - -func verkleTestGenesis(config *params.ChainConfig) *Genesis { - var ( - coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") - account1 = common.HexToAddress("0x687704DB07e902e9A8B3754031D168D46E3D586e") - account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") - ) - return &Genesis{ - Config: config, - Alloc: GenesisAlloc{ - coinbase: GenesisAccount{ - Balance: big.NewInt(1000000000000000000), // 1 ether - Nonce: 0, - }, - account1: GenesisAccount{ - Balance: big.NewInt(1000000000000000000), // 1 ether - Nonce: 0, - }, - account2: GenesisAccount{ - Balance: big.NewInt(1000000000000000000), // 1 ether - Nonce: 3, - }, - params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode, Balance: common.Big0}, - params.HistoryStorageAddress: {Nonce: 1, Code: params.HistoryStorageCode, Balance: common.Big0}, - params.WithdrawalQueueAddress: {Nonce: 1, Code: params.WithdrawalQueueCode, Balance: common.Big0}, - params.ConsolidationQueueAddress: {Nonce: 1, Code: params.ConsolidationQueueCode, Balance: common.Big0}, - }, - } -} - -// TestProcessVerkleContractWithEmptyCode checks that the witness contains all valid -// entries, if the initcode returns an empty code. -func TestProcessVerkleContractWithEmptyCode(t *testing.T) { - // The test txs were taken from a secondary testnet with chain id 69421 - config := *testKaustinenLikeChainConfig - config.ChainID = new(big.Int).SetUint64(69421) - gspec := verkleTestGenesis(&config) - - genesisH, _, _, _, _, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 1, func(i int, gen *BlockGen) { - gen.SetPoS() - var tx types.Transaction - // a transaction that does some PUSH1n but returns a 0-sized contract - txpayload := common.Hex2Bytes("02f8db83010f2d03843b9aca008444cf6a05830186a08080b8807fdfbbb59f2371a76485ce557fd0de00c298d3ede52a3eab56d35af674eb49ec5860335260826053536001605453604c60555360f3605653606060575360446058536096605953600c605a5360df605b5360f3605c5360fb605d53600c605e53609a605f53607f60605360fe606153603d60625360f4606353604b60645360cac001a0486b6dc55b8a311568b7239a2cae1d77e7446dba71df61eaafd53f73820a138fa010bd48a45e56133ac4c5645142c2ea48950d40eb35050e9510b6bad9e15c5865") - if err := tx.UnmarshalBinary(txpayload); err != nil { - t.Fatal(err) - } - gen.AddTx(&tx) - }) - - eip2935Stem := utils.GetTreeKey(params.HistoryStorageAddress[:], uint256.NewInt(0), 0) - eip2935Stem = eip2935Stem[:31] - - for _, stemStateDiff := range statediffs[0] { - // Handle the case of the history contract: make sure only the correct - // slots are added to the witness. - if bytes.Equal(stemStateDiff.Stem[:], eip2935Stem) { - // BLOCKHASH contract stem - if len(stemStateDiff.SuffixDiffs) > 1 { - t.Fatalf("invalid suffix diff count found for BLOCKHASH contract: %d != 1", len(stemStateDiff.SuffixDiffs)) - } - if stemStateDiff.SuffixDiffs[0].Suffix != 64 { - t.Fatalf("invalid suffix diff value found for BLOCKHASH contract: %d != 64", stemStateDiff.SuffixDiffs[0].Suffix) - } - // check that the "current value" is nil and that the new value isn't. - if stemStateDiff.SuffixDiffs[0].CurrentValue != nil { - t.Fatalf("non-nil current value in BLOCKHASH contract insert: %x", stemStateDiff.SuffixDiffs[0].CurrentValue) - } - if stemStateDiff.SuffixDiffs[0].NewValue == nil { - t.Fatalf("nil new value in BLOCKHASH contract insert") - } - if *stemStateDiff.SuffixDiffs[0].NewValue != genesisH { - t.Fatalf("invalid BLOCKHASH value: %x != %x", *stemStateDiff.SuffixDiffs[0].NewValue, genesisH) - } - } else { - for _, suffixDiff := range stemStateDiff.SuffixDiffs { - if suffixDiff.Suffix > 2 { - // if d8898012c484fb48610ecb7963886339207dab004bce968b007b616ffa18e0 shows up, it means that the PUSHn - // in the transaction above added entries into the witness, when they should not have since they are - // part of a contract deployment. - t.Fatalf("invalid suffix diff found for %x in block #1: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) - } - } - } - } -} - -// TestProcessVerkleExtCodeHashOpcode verifies that calling EXTCODEHASH on another -// deployed contract, creates all the right entries in the witness. -func TestProcessVerkleExtCodeHashOpcode(t *testing.T) { - // The test txs were taken from a secondary testnet with chain id 69421 - config := *testKaustinenLikeChainConfig - config.ChainID = new(big.Int).SetUint64(69421) - - var ( - signer = types.LatestSigner(&config) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - gspec = verkleTestGenesis(&config) - ) - dummyContract := []byte{ - byte(vm.PUSH1), 2, - byte(vm.PUSH1), 12, - byte(vm.PUSH1), 0x00, - byte(vm.CODECOPY), - - byte(vm.PUSH1), 2, - byte(vm.PUSH1), 0x00, - byte(vm.RETURN), - - byte(vm.PUSH1), 42, - } - deployer := crypto.PubkeyToAddress(testKey.PublicKey) - dummyContractAddr := crypto.CreateAddress(deployer, 0) - - // contract that calls EXTCODEHASH on the dummy contract - extCodeHashContract := []byte{ - byte(vm.PUSH1), 22, - byte(vm.PUSH1), 12, - byte(vm.PUSH1), 0x00, - byte(vm.CODECOPY), - - byte(vm.PUSH1), 22, - byte(vm.PUSH1), 0x00, - byte(vm.RETURN), - - byte(vm.PUSH20), - 0x3a, 0x22, 0x0f, 0x35, 0x12, 0x52, 0x08, 0x9d, 0x38, 0x5b, 0x29, 0xbe, 0xca, 0x14, 0xe2, 0x7f, 0x20, 0x4c, 0x29, 0x6a, - byte(vm.EXTCODEHASH), - } - extCodeHashContractAddr := crypto.CreateAddress(deployer, 1) - - _, _, _, _, _, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 2, func(i int, gen *BlockGen) { - gen.SetPoS() - - if i == 0 { - // Create dummy contract. - tx, _ := types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 0, - Value: big.NewInt(0), - Gas: 100_000, - GasPrice: big.NewInt(875000000), - Data: dummyContract, - }) - gen.AddTx(tx) - - // Create contract with EXTCODEHASH opcode. - tx, _ = types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 1, - Value: big.NewInt(0), - Gas: 100_000, - GasPrice: big.NewInt(875000000), - Data: extCodeHashContract}) - gen.AddTx(tx) - } else { - tx, _ := types.SignTx(types.NewTransaction(2, extCodeHashContractAddr, big.NewInt(0), 100_000, big.NewInt(875000000), nil), signer, testKey) - gen.AddTx(tx) - } - }) - - contractKeccakTreeKey := utils.CodeHashKey(dummyContractAddr[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range statediffs[1] { - if bytes.Equal(stemStateDiff.Stem[:], contractKeccakTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatalf("no state diff found for stem") - } - - codeHashStateDiff := statediffs[1][stateDiffIdx].SuffixDiffs[0] - // Check location of code hash was accessed - if codeHashStateDiff.Suffix != utils.CodeHashLeafKey { - t.Fatalf("code hash invalid suffix") - } - // check the code hash wasn't present in the prestate, as - // the contract was deployed in this block. - if codeHashStateDiff.CurrentValue == nil { - t.Fatalf("codeHash.CurrentValue must not be empty") - } - // check the poststate value corresponds to the code hash - // of the deployed contract. - expCodeHash := crypto.Keccak256Hash(dummyContract[12:]) - if *codeHashStateDiff.CurrentValue != expCodeHash { - t.Fatalf("codeHash.CurrentValue unexpected code hash") - } - if codeHashStateDiff.NewValue != nil { - t.Fatalf("codeHash.NewValue must be nil") - } -} - -// TestProcessVerkleBalanceOpcode checks that calling balance -// on another contract will add the correct entries to the witness. -func TestProcessVerkleBalanceOpcode(t *testing.T) { - // The test txs were taken from a secondary testnet with chain id 69421 - config := *testKaustinenLikeChainConfig - config.ChainID = new(big.Int).SetUint64(69421) - - var ( - signer = types.LatestSigner(&config) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") - gspec = verkleTestGenesis(&config) - ) - _, _, _, _, _, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 1, func(i int, gen *BlockGen) { - gen.SetPoS() - txData := slices.Concat( - []byte{byte(vm.PUSH20)}, - common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d").Bytes(), - []byte{byte(vm.BALANCE)}) - - tx, _ := types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 0, - Value: big.NewInt(0), - Gas: 100_000, - GasPrice: big.NewInt(875000000), - Data: txData}) - gen.AddTx(tx) - }) - - account2BalanceTreeKey := utils.BasicDataKey(account2[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range statediffs[0] { - if bytes.Equal(stemStateDiff.Stem[:], account2BalanceTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatalf("no state diff found for stem") - } - - var zero [32]byte - balanceStateDiff := statediffs[0][stateDiffIdx].SuffixDiffs[0] - if balanceStateDiff.Suffix != utils.BasicDataLeafKey { - t.Fatalf("invalid suffix diff") - } - // check the prestate balance wasn't 0 or missing - if balanceStateDiff.CurrentValue == nil || *balanceStateDiff.CurrentValue == zero { - t.Fatalf("invalid current value %v", *balanceStateDiff.CurrentValue) - } - // check that the poststate witness value for the balance is nil, - // meaning that it didn't get updated. - if balanceStateDiff.NewValue != nil { - t.Fatalf("invalid new value") - } -} - -// TestProcessVerkleSelfDestructInSeparateTx controls the contents of the witness after -// a non-eip6780-compliant selfdestruct occurs. -func TestProcessVerkleSelfDestructInSeparateTx(t *testing.T) { - // The test txs were taken from a secondary testnet with chain id 69421 - config := *testKaustinenLikeChainConfig - config.ChainID = new(big.Int).SetUint64(69421) - - var ( - signer = types.LatestSigner(&config) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") - gspec = verkleTestGenesis(&config) - ) - - // runtime code: selfdestruct ( 0x6177843db3138ae69679A54b95cf345ED759450d ) - runtimeCode := slices.Concat( - []byte{byte(vm.PUSH20)}, - account2.Bytes(), - []byte{byte(vm.SELFDESTRUCT)}) - - //The goal of this test is to test SELFDESTRUCT that happens in a contract - // execution which is created in a previous transaction. - selfDestructContract := slices.Concat([]byte{ - byte(vm.PUSH1), byte(len(runtimeCode)), - byte(vm.PUSH1), 12, - byte(vm.PUSH1), 0x00, - byte(vm.CODECOPY), // Codecopy( to-offset: 0, code offset: 12, length: 22 ) - - byte(vm.PUSH1), byte(len(runtimeCode)), - byte(vm.PUSH1), 0x00, - byte(vm.RETURN), // Return ( 0 : len(runtimecode) - }, - runtimeCode) - - deployer := crypto.PubkeyToAddress(testKey.PublicKey) - contract := crypto.CreateAddress(deployer, 0) - - _, _, _, _, _, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 2, func(i int, gen *BlockGen) { - gen.SetPoS() - - if i == 0 { - // Create selfdestruct contract, sending 42 wei. - tx, _ := types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 0, - Value: big.NewInt(42), - Gas: 100_000, - GasPrice: big.NewInt(875000000), - Data: selfDestructContract, - }) - gen.AddTx(tx) - } else { - // Call it. - tx, _ := types.SignTx(types.NewTransaction(1, contract, big.NewInt(0), 100_000, big.NewInt(875000000), nil), signer, testKey) - gen.AddTx(tx) - } - }) - - var zero [32]byte - { // Check self-destructed contract in the witness - selfDestructContractTreeKey := utils.CodeHashKey(contract[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range statediffs[1] { - if bytes.Equal(stemStateDiff.Stem[:], selfDestructContractTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatalf("no state diff found for stem") - } - - balanceStateDiff := statediffs[1][stateDiffIdx].SuffixDiffs[0] - if balanceStateDiff.Suffix != utils.BasicDataLeafKey { - t.Fatalf("balance invalid suffix") - } - - // The original balance was 42. - var oldBalance [16]byte - oldBalance[15] = 42 - if !bytes.Equal((*balanceStateDiff.CurrentValue)[utils.BasicDataBalanceOffset:], oldBalance[:]) { - t.Fatalf("the pre-state balance before self-destruct must be %x, got %x", oldBalance, *balanceStateDiff.CurrentValue) - } - - // The new balance must be 0. - if !bytes.Equal((*balanceStateDiff.NewValue)[utils.BasicDataBalanceOffset:], zero[utils.BasicDataBalanceOffset:]) { - t.Fatalf("the post-state balance after self-destruct must be 0") - } - } - { // Check self-destructed target in the witness. - selfDestructTargetTreeKey := utils.CodeHashKey(account2[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range statediffs[1] { - if bytes.Equal(stemStateDiff.Stem[:], selfDestructTargetTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatalf("no state diff found for stem") - } - - balanceStateDiff := statediffs[1][stateDiffIdx].SuffixDiffs[0] - if balanceStateDiff.Suffix != utils.BasicDataLeafKey { - t.Fatalf("balance invalid suffix") - } - if balanceStateDiff.CurrentValue == nil { - t.Fatalf("codeHash.CurrentValue must not be empty") - } - if balanceStateDiff.NewValue == nil { - t.Fatalf("codeHash.NewValue must not be empty") - } - preStateBalance := binary.BigEndian.Uint64(balanceStateDiff.CurrentValue[utils.BasicDataBalanceOffset+8:]) - postStateBalance := binary.BigEndian.Uint64(balanceStateDiff.NewValue[utils.BasicDataBalanceOffset+8:]) - if postStateBalance-preStateBalance != 42 { - t.Fatalf("the post-state balance after self-destruct must be 42, got %d-%d=%d", postStateBalance, preStateBalance, postStateBalance-preStateBalance) - } - } -} - -// TestProcessVerkleSelfDestructInSameTx controls the contents of the witness after -// a eip6780-compliant selfdestruct occurs. -func TestProcessVerkleSelfDestructInSameTx(t *testing.T) { - // The test txs were taken from a secondary testnet with chain id 69421 - config := *testKaustinenLikeChainConfig - config.ChainID = new(big.Int).SetUint64(69421) - - var ( - signer = types.LatestSigner(&config) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") - gspec = verkleTestGenesis(&config) - ) - - // The goal of this test is to test SELFDESTRUCT that happens in a contract - // execution which is created in **the same** transaction sending the remaining - // balance to an external (i.e: not itself) account. - - selfDestructContract := slices.Concat( - []byte{byte(vm.PUSH20)}, - account2.Bytes(), - []byte{byte(vm.SELFDESTRUCT)}) - deployer := crypto.PubkeyToAddress(testKey.PublicKey) - contract := crypto.CreateAddress(deployer, 0) - - _, _, _, _, _, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 1, func(i int, gen *BlockGen) { - gen.SetPoS() - tx, _ := types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 0, - Value: big.NewInt(42), - Gas: 100_000, - GasPrice: big.NewInt(875000000), - Data: selfDestructContract, - }) - gen.AddTx(tx) - }) - - { // Check self-destructed contract in the witness - selfDestructContractTreeKey := utils.CodeHashKey(contract[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range statediffs[0] { - if bytes.Equal(stemStateDiff.Stem[:], selfDestructContractTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatalf("no state diff found for stem") - } - - balanceStateDiff := statediffs[0][stateDiffIdx].SuffixDiffs[0] - if balanceStateDiff.Suffix != utils.BasicDataLeafKey { - t.Fatalf("balance invalid suffix") - } - - if balanceStateDiff.CurrentValue != nil { - t.Fatalf("the pre-state balance before must be nil, since the contract didn't exist") - } - - if balanceStateDiff.NewValue != nil { - t.Fatalf("the post-state balance after self-destruct must be nil since the contract shouldn't be created at all") - } - } - { // Check self-destructed target in the witness. - selfDestructTargetTreeKey := utils.CodeHashKey(account2[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range statediffs[0] { - if bytes.Equal(stemStateDiff.Stem[:], selfDestructTargetTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatalf("no state diff found for stem") - } - - balanceStateDiff := statediffs[0][stateDiffIdx].SuffixDiffs[0] - if balanceStateDiff.Suffix != utils.BasicDataLeafKey { - t.Fatalf("balance invalid suffix") - } - if balanceStateDiff.CurrentValue == nil { - t.Fatalf("codeHash.CurrentValue must not be empty") - } - if balanceStateDiff.NewValue == nil { - t.Fatalf("codeHash.NewValue must not be empty") - } - preStateBalance := binary.BigEndian.Uint64(balanceStateDiff.CurrentValue[utils.BasicDataBalanceOffset+8:]) - postStateBalance := binary.BigEndian.Uint64(balanceStateDiff.NewValue[utils.BasicDataBalanceOffset+8:]) - if postStateBalance-preStateBalance != 42 { - t.Fatalf("the post-state balance after self-destruct must be 42. got %d", postStateBalance) - } - } -} - -// TestProcessVerkleSelfDestructInSeparateTxWithSelfBeneficiary checks the content of the witness -// if a selfdestruct occurs in a different tx than the one that created it, but the beneficiary -// is the selfdestructed account. -func TestProcessVerkleSelfDestructInSeparateTxWithSelfBeneficiary(t *testing.T) { - // The test txs were taken from a secondary testnet with chain id 69421 - config := *testKaustinenLikeChainConfig - config.ChainID = new(big.Int).SetUint64(69421) - - var ( - signer = types.LatestSigner(&config) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - gspec = verkleTestGenesis(&config) - ) - // The goal of this test is to test SELFDESTRUCT that happens in a contract - // execution which is created in a *previous* transaction sending the remaining - // balance to itself. - selfDestructContract := []byte{ - byte(vm.PUSH1), 2, // PUSH1 2 - byte(vm.PUSH1), 10, // PUSH1 12 - byte(vm.PUSH0), // PUSH0 - byte(vm.CODECOPY), // Codecopy ( to offset 0, code@offset: 10, length: 2) - - byte(vm.PUSH1), 22, - byte(vm.PUSH0), - byte(vm.RETURN), // RETURN( memory[0:2] ) - - // Deployed code - byte(vm.ADDRESS), - byte(vm.SELFDESTRUCT), - } - deployer := crypto.PubkeyToAddress(testKey.PublicKey) - contract := crypto.CreateAddress(deployer, 0) - - _, _, _, _, _, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 2, func(i int, gen *BlockGen) { - gen.SetPoS() - if i == 0 { - // Create self-destruct contract, sending 42 wei. - tx, _ := types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 0, - Value: big.NewInt(42), - Gas: 100_000, - GasPrice: big.NewInt(875000000), - Data: selfDestructContract, - }) - gen.AddTx(tx) - } else { - // Call it. - tx, _ := types.SignTx(types.NewTransaction(1, contract, big.NewInt(0), 100_000, big.NewInt(875000000), nil), signer, testKey) - gen.AddTx(tx) - } - }) - - { - // Check self-destructed contract in the witness. - // The way 6780 is implemented today, it always SubBalance from the self-destructed contract, and AddBalance - // to the beneficiary. In this case both addresses are the same, thus this might be optimizable from a gas - // perspective. But until that happens, we need to honor this "balance reading" adding it to the witness. - - selfDestructContractTreeKey := utils.CodeHashKey(contract[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range statediffs[1] { - if bytes.Equal(stemStateDiff.Stem[:], selfDestructContractTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatal("no state diff found for stem") - } - - balanceStateDiff := statediffs[1][stateDiffIdx].SuffixDiffs[0] - if balanceStateDiff.Suffix != utils.BasicDataLeafKey { - t.Fatal("balance invalid suffix") - } - - // The original balance was 42. - var oldBalance [16]byte - oldBalance[15] = 42 - if !bytes.Equal((*balanceStateDiff.CurrentValue)[utils.BasicDataBalanceOffset:], oldBalance[:]) { - t.Fatal("the pre-state balance before self-destruct must be 42") - } - - // Note that the SubBalance+AddBalance net effect is a 0 change, so NewValue - // must be nil. - if balanceStateDiff.NewValue != nil { - t.Fatal("the post-state balance after self-destruct must be empty") - } - } -} - -// TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiary checks the content of the witness -// if a selfdestruct occurs in the same tx as the one that created it, but the beneficiary -// is the selfdestructed account. -func TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiary(t *testing.T) { - // The test txs were taken from a secondary testnet with chain id 69421 - config := *testKaustinenLikeChainConfig - config.ChainID = new(big.Int).SetUint64(69421) - - var ( - signer = types.LatestSigner(&config) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - gspec = verkleTestGenesis(&config) - deployer = crypto.PubkeyToAddress(testKey.PublicKey) - contract = crypto.CreateAddress(deployer, 0) - ) - - // The goal of this test is to test SELFDESTRUCT that happens while executing - // the init code of a contract creation, that occurs in **the same** transaction. - // The balance is sent to itself. - t.Logf("Contract: %v", contract.String()) - - selfDestructContract := []byte{byte(vm.ADDRESS), byte(vm.SELFDESTRUCT)} - - _, _, _, _, _, stateDiffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 1, func(i int, gen *BlockGen) { - gen.SetPoS() - tx, _ := types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 0, - Value: big.NewInt(42), - Gas: 100_000, - GasPrice: big.NewInt(875000000), - Data: selfDestructContract, - }) - gen.AddTx(tx) - }) - stateDiff := stateDiffs[0] // state difference of block 1 - - { // Check self-destructed contract in the witness - selfDestructContractTreeKey := utils.CodeHashKey(contract[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range stateDiff { - if bytes.Equal(stemStateDiff.Stem[:], selfDestructContractTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatal("no state diff found for stem") - } - balanceStateDiff := stateDiff[stateDiffIdx].SuffixDiffs[0] - if balanceStateDiff.Suffix != utils.BasicDataLeafKey { - t.Fatal("balance invalid suffix") - } - if balanceStateDiff.CurrentValue != nil { - t.Fatal("the pre-state balance before must be nil, since the contract didn't exist") - } - // Ensure that the value is burnt, and therefore that the balance of the self-destructed - // contract isn't modified (it should remain missing from the state) - if balanceStateDiff.NewValue != nil { - t.Fatal("the post-state balance after self-destruct must be nil since the contract shouldn't be created at all") - } - } -} - -// TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiaryAndPrefundedAccount checks the -// content of the witness if a selfdestruct occurs in the same tx as the one that created it, -// it, but the beneficiary is the selfdestructed account. The difference with the test above, -// is that the created account is prefunded and so the final value should be 0. -func TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiaryAndPrefundedAccount(t *testing.T) { - // The test txs were taken from a secondary testnet with chain id 69421 - config := *testKaustinenLikeChainConfig - config.ChainID = new(big.Int).SetUint64(69421) - - var ( - signer = types.LatestSigner(&config) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - gspec = verkleTestGenesis(&config) - deployer = crypto.PubkeyToAddress(testKey.PublicKey) - contract = crypto.CreateAddress(deployer, 0) - ) - // Prefund the account, at an address that the contract will be deployed at, - // before it selfdestrucs. We can therefore check that the account itseld is - // NOT destroyed, which is what the current version of the spec requires. - // TODO(gballet) revisit after the spec has been modified. - gspec.Alloc[contract] = types.Account{ - Balance: big.NewInt(100), - } - - selfDestructContract := []byte{byte(vm.ADDRESS), byte(vm.SELFDESTRUCT)} - - _, _, _, _, _, stateDiffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 1, func(i int, gen *BlockGen) { - gen.SetPoS() - tx, _ := types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 0, - Value: big.NewInt(42), - Gas: 100_000, - GasPrice: big.NewInt(875000000), - Data: selfDestructContract, - }) - gen.AddTx(tx) - }) - stateDiff := stateDiffs[0] // state difference of block 1 - - { // Check self-destructed contract in the witness - selfDestructContractTreeKey := utils.CodeHashKey(contract[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range stateDiff { - if bytes.Equal(stemStateDiff.Stem[:], selfDestructContractTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatal("no state diff found for stem") - } - balanceStateDiff := stateDiff[stateDiffIdx].SuffixDiffs[0] - if balanceStateDiff.Suffix != utils.BasicDataLeafKey { - t.Fatal("balance invalid suffix") - } - expected, _ := hex.DecodeString("0000000000000000000000000000000000000000000000000000000000000064") - if balanceStateDiff.CurrentValue == nil || !bytes.Equal(balanceStateDiff.CurrentValue[:], expected) { - t.Fatalf("incorrect prestate balance: %x != %x", *balanceStateDiff.CurrentValue, expected) - } - // Ensure that the value is burnt, and therefore that the balance of the self-destructed - // contract isn't modified (it should remain missing from the state) - expected = make([]byte, 32) - if balanceStateDiff.NewValue == nil { - t.Fatal("incorrect nil poststate balance") - } - if !bytes.Equal(balanceStateDiff.NewValue[:], expected[:]) { - t.Fatalf("incorrect poststate balance: %x != %x", *balanceStateDiff.NewValue, expected[:]) - } - } -} diff --git a/tests/block_test_util.go b/tests/block_test_util.go index 3b88753b1c82..2ced18787a24 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -117,19 +117,20 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t return UnsupportedForkError{t.json.Network} } // import pre accounts & construct test genesis block & state root + // Commit genesis state var ( + gspec = t.genesis(config) db = rawdb.NewMemoryDatabase() tconf = &triedb.Config{ Preimages: true, + IsVerkle: gspec.Config.VerkleTime != nil && *gspec.Config.VerkleTime <= gspec.Timestamp, } ) - if scheme == rawdb.PathScheme { + if scheme == rawdb.PathScheme || tconf.IsVerkle { tconf.PathDB = pathdb.Defaults } else { tconf.HashDB = hashdb.Defaults } - // Commit genesis state - gspec := t.genesis(config) // if ttd is not specified, set an arbitrary huge value if gspec.Config.TerminalTotalDifficulty == nil { diff --git a/tests/init.go b/tests/init.go index 705e929ae9d7..d10b47986cd4 100644 --- a/tests/init.go +++ b/tests/init.go @@ -720,6 +720,25 @@ var Forks = map[string]*params.ChainConfig{ BPO4: params.DefaultBPO4BlobConfig, }, }, + "Verkle": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + ShanghaiTime: u64(0), + VerkleTime: u64(0), + }, } var bpo1BlobConfig = ¶ms.BlobConfig{ diff --git a/trie/bintrie/binary_node.go b/trie/bintrie/binary_node.go index 1c003a6c8fdf..690489b2aa16 100644 --- a/trie/bintrie/binary_node.go +++ b/trie/bintrie/binary_node.go @@ -31,8 +31,11 @@ type ( var zero [32]byte const ( - NodeWidth = 256 // Number of child per leaf node - StemSize = 31 // Number of bytes to travel before reaching a group of leaves + StemNodeWidth = 256 // Number of child per leaf node + StemSize = 31 // Number of bytes to travel before reaching a group of leaves + NodeTypeBytes = 1 // Size of node type prefix in serialization + HashSize = 32 // Size of a hash in bytes + BitmapSize = 32 // Size of the bitmap in a stem node ) const ( @@ -58,25 +61,28 @@ type BinaryNode interface { func SerializeNode(node BinaryNode) []byte { switch n := (node).(type) { case *InternalNode: - var serialized [65]byte + // InternalNode: 1 byte type + 32 bytes left hash + 32 bytes right hash + var serialized [NodeTypeBytes + HashSize + HashSize]byte serialized[0] = nodeTypeInternal copy(serialized[1:33], n.left.Hash().Bytes()) copy(serialized[33:65], n.right.Hash().Bytes()) return serialized[:] case *StemNode: - var serialized [32 + 32 + 256*32]byte + // StemNode: 1 byte type + 31 bytes stem + 32 bytes bitmap + 256*32 bytes values + var serialized [NodeTypeBytes + StemSize + BitmapSize + StemNodeWidth*HashSize]byte serialized[0] = nodeTypeStem - copy(serialized[1:32], node.(*StemNode).Stem) - bitmap := serialized[32:64] - offset := 64 - for i, v := range node.(*StemNode).Values { + copy(serialized[NodeTypeBytes:NodeTypeBytes+StemSize], n.Stem) + bitmap := serialized[NodeTypeBytes+StemSize : NodeTypeBytes+StemSize+BitmapSize] + offset := NodeTypeBytes + StemSize + BitmapSize + for i, v := range n.Values { if v != nil { bitmap[i/8] |= 1 << (7 - (i % 8)) - copy(serialized[offset:offset+32], v) - offset += 32 + copy(serialized[offset:offset+HashSize], v) + offset += HashSize } } - return serialized[:] + // Only return the actual data, not the entire array + return serialized[:offset] default: panic("invalid node type") } @@ -104,21 +110,21 @@ func DeserializeNode(serialized []byte, depth int) (BinaryNode, error) { if len(serialized) < 64 { return nil, invalidSerializedLength } - var values [256][]byte - bitmap := serialized[32:64] - offset := 64 + var values [StemNodeWidth][]byte + bitmap := serialized[NodeTypeBytes+StemSize : NodeTypeBytes+StemSize+BitmapSize] + offset := NodeTypeBytes + StemSize + BitmapSize - for i := range 256 { + for i := range StemNodeWidth { if bitmap[i/8]>>(7-(i%8))&1 == 1 { - if len(serialized) < offset+32 { + if len(serialized) < offset+HashSize { return nil, invalidSerializedLength } - values[i] = serialized[offset : offset+32] - offset += 32 + values[i] = serialized[offset : offset+HashSize] + offset += HashSize } } return &StemNode{ - Stem: serialized[1:32], + Stem: serialized[NodeTypeBytes : NodeTypeBytes+StemSize], Values: values[:], depth: depth, }, nil diff --git a/trie/bintrie/binary_node_test.go b/trie/bintrie/binary_node_test.go index b21daaab6974..242743ba53bb 100644 --- a/trie/bintrie/binary_node_test.go +++ b/trie/bintrie/binary_node_test.go @@ -77,12 +77,12 @@ func TestSerializeDeserializeInternalNode(t *testing.T) { // TestSerializeDeserializeStemNode tests serialization and deserialization of StemNode func TestSerializeDeserializeStemNode(t *testing.T) { // Create a stem node with some values - stem := make([]byte, 31) + stem := make([]byte, StemSize) for i := range stem { stem[i] = byte(i) } - var values [256][]byte + var values [StemNodeWidth][]byte // Add some values at different indices values[0] = common.HexToHash("0x0101010101010101010101010101010101010101010101010101010101010101").Bytes() values[10] = common.HexToHash("0x0202020202020202020202020202020202020202020202020202020202020202").Bytes() @@ -103,7 +103,7 @@ func TestSerializeDeserializeStemNode(t *testing.T) { } // Check the stem is correctly serialized - if !bytes.Equal(serialized[1:32], stem) { + if !bytes.Equal(serialized[1:1+StemSize], stem) { t.Errorf("Stem mismatch in serialized data") } @@ -136,7 +136,7 @@ func TestSerializeDeserializeStemNode(t *testing.T) { } // Check that other values are nil - for i := range NodeWidth { + for i := range StemNodeWidth { if i == 0 || i == 10 || i == 255 { continue } @@ -218,15 +218,15 @@ func TestKeyToPath(t *testing.T) { }, { name: "max valid depth", - depth: 31 * 8, - key: make([]byte, 32), - expected: make([]byte, 31*8+1), + depth: StemSize * 8, + key: make([]byte, HashSize), + expected: make([]byte, StemSize*8+1), wantErr: false, }, { name: "depth too large", - depth: 31*8 + 1, - key: make([]byte, 32), + depth: StemSize*8 + 1, + key: make([]byte, HashSize), wantErr: true, }, } diff --git a/trie/bintrie/hashed_node.go b/trie/bintrie/hashed_node.go index 8f9fd66a59a0..e4d8c2e7ac7d 100644 --- a/trie/bintrie/hashed_node.go +++ b/trie/bintrie/hashed_node.go @@ -46,8 +46,31 @@ func (h HashedNode) GetValuesAtStem(_ []byte, _ NodeResolverFn) ([][]byte, error return nil, errors.New("attempted to get values from an unresolved node") } -func (h HashedNode) InsertValuesAtStem(key []byte, values [][]byte, resolver NodeResolverFn, depth int) (BinaryNode, error) { - return nil, errors.New("insertValuesAtStem not implemented for hashed node") +func (h HashedNode) InsertValuesAtStem(stem []byte, values [][]byte, resolver NodeResolverFn, depth int) (BinaryNode, error) { + // Step 1: Generate the path for this node's position in the tree + path, err := keyToPath(depth, stem) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem path generation error: %w", err) + } + + if resolver == nil { + return nil, errors.New("InsertValuesAtStem resolve error: resolver is nil") + } + + // Step 2: Resolve the hashed node to get the actual node data + data, err := resolver(path, common.Hash(h)) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem resolve error: %w", err) + } + + // Step 3: Deserialize the resolved data into a concrete node + node, err := DeserializeNode(data, depth) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem node deserialization error: %w", err) + } + + // Step 4: Call InsertValuesAtStem on the resolved concrete node + return node.InsertValuesAtStem(stem, values, resolver, depth) } func (h HashedNode) toDot(parent string, path string) string { @@ -58,7 +81,8 @@ func (h HashedNode) toDot(parent string, path string) string { } func (h HashedNode) CollectNodes([]byte, NodeFlushFn) error { - return errors.New("collectNodes not implemented for hashed node") + // HashedNodes are already persisted in the database and don't need to be collected. + return nil } func (h HashedNode) GetHeight() int { diff --git a/trie/bintrie/hashed_node_test.go b/trie/bintrie/hashed_node_test.go index 0c19ae0c57d4..f9e69848884a 100644 --- a/trie/bintrie/hashed_node_test.go +++ b/trie/bintrie/hashed_node_test.go @@ -17,6 +17,7 @@ package bintrie import ( + "bytes" "testing" "github.com/ethereum/go-ethereum/common" @@ -59,8 +60,8 @@ func TestHashedNodeCopy(t *testing.T) { func TestHashedNodeInsert(t *testing.T) { node := HashedNode(common.HexToHash("0x1234")) - key := make([]byte, 32) - value := make([]byte, 32) + key := make([]byte, HashSize) + value := make([]byte, HashSize) _, err := node.Insert(key, value, nil, 0) if err == nil { @@ -76,7 +77,7 @@ func TestHashedNodeInsert(t *testing.T) { func TestHashedNodeGetValuesAtStem(t *testing.T) { node := HashedNode(common.HexToHash("0x1234")) - stem := make([]byte, 31) + stem := make([]byte, StemSize) _, err := node.GetValuesAtStem(stem, nil) if err == nil { t.Fatal("Expected error for GetValuesAtStem on HashedNode") @@ -91,17 +92,85 @@ func TestHashedNodeGetValuesAtStem(t *testing.T) { func TestHashedNodeInsertValuesAtStem(t *testing.T) { node := HashedNode(common.HexToHash("0x1234")) - stem := make([]byte, 31) - values := make([][]byte, 256) + stem := make([]byte, StemSize) + values := make([][]byte, StemNodeWidth) + // Test 1: nil resolver should return an error _, err := node.InsertValuesAtStem(stem, values, nil, 0) if err == nil { - t.Fatal("Expected error for InsertValuesAtStem on HashedNode") + t.Fatal("Expected error for InsertValuesAtStem on HashedNode with nil resolver") } - if err.Error() != "insertValuesAtStem not implemented for hashed node" { + if err.Error() != "InsertValuesAtStem resolve error: resolver is nil" { t.Errorf("Unexpected error message: %v", err) } + + // Test 2: mock resolver returning invalid data should return deserialization error + mockResolver := func(path []byte, hash common.Hash) ([]byte, error) { + // Return invalid/nonsense data that cannot be deserialized + return []byte{0xff, 0xff, 0xff}, nil + } + + _, err = node.InsertValuesAtStem(stem, values, mockResolver, 0) + if err == nil { + t.Fatal("Expected error for InsertValuesAtStem on HashedNode with invalid resolver data") + } + + expectedPrefix := "InsertValuesAtStem node deserialization error:" + if len(err.Error()) < len(expectedPrefix) || err.Error()[:len(expectedPrefix)] != expectedPrefix { + t.Errorf("Expected deserialization error, got: %v", err) + } + + // Test 3: mock resolver returning valid serialized node should succeed + stem = make([]byte, StemSize) + stem[0] = 0xaa + var originalValues [StemNodeWidth][]byte + originalValues[0] = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111").Bytes() + originalValues[1] = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222").Bytes() + + originalNode := &StemNode{ + Stem: stem, + Values: originalValues[:], + depth: 0, + } + + // Serialize the node + serialized := SerializeNode(originalNode) + + // Create a mock resolver that returns the serialized node + validResolver := func(path []byte, hash common.Hash) ([]byte, error) { + return serialized, nil + } + + var newValues [StemNodeWidth][]byte + newValues[2] = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333").Bytes() + + resolvedNode, err := node.InsertValuesAtStem(stem, newValues[:], validResolver, 0) + if err != nil { + t.Fatalf("Expected successful resolution and insertion, got error: %v", err) + } + + resultStem, ok := resolvedNode.(*StemNode) + if !ok { + t.Fatalf("Expected resolved node to be *StemNode, got %T", resolvedNode) + } + + if !bytes.Equal(resultStem.Stem, stem) { + t.Errorf("Stem mismatch: expected %x, got %x", stem, resultStem.Stem) + } + + // Verify the original values are preserved + if !bytes.Equal(resultStem.Values[0], originalValues[0]) { + t.Errorf("Original value at index 0 not preserved: expected %x, got %x", originalValues[0], resultStem.Values[0]) + } + if !bytes.Equal(resultStem.Values[1], originalValues[1]) { + t.Errorf("Original value at index 1 not preserved: expected %x, got %x", originalValues[1], resultStem.Values[1]) + } + + // Verify the new value was inserted + if !bytes.Equal(resultStem.Values[2], newValues[2]) { + t.Errorf("New value at index 2 not inserted correctly: expected %x, got %x", newValues[2], resultStem.Values[2]) + } } // TestHashedNodeToDot tests the toDot method for visualization diff --git a/trie/bintrie/internal_node.go b/trie/bintrie/internal_node.go index f3ddd1aab023..0a7bece521fd 100644 --- a/trie/bintrie/internal_node.go +++ b/trie/bintrie/internal_node.go @@ -49,14 +49,26 @@ func (bt *InternalNode) GetValuesAtStem(stem []byte, resolver NodeResolverFn) ([ } bit := stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1 - var child *BinaryNode if bit == 0 { - child = &bt.left - } else { - child = &bt.right + if hn, ok := bt.left.(HashedNode); ok { + path, err := keyToPath(bt.depth, stem) + if err != nil { + return nil, fmt.Errorf("GetValuesAtStem resolve error: %w", err) + } + data, err := resolver(path, common.Hash(hn)) + if err != nil { + return nil, fmt.Errorf("GetValuesAtStem resolve error: %w", err) + } + node, err := DeserializeNode(data, bt.depth+1) + if err != nil { + return nil, fmt.Errorf("GetValuesAtStem node deserialization error: %w", err) + } + bt.left = node + } + return bt.left.GetValuesAtStem(stem, resolver) } - if hn, ok := (*child).(HashedNode); ok { + if hn, ok := bt.right.(HashedNode); ok { path, err := keyToPath(bt.depth, stem) if err != nil { return nil, fmt.Errorf("GetValuesAtStem resolve error: %w", err) @@ -69,9 +81,9 @@ func (bt *InternalNode) GetValuesAtStem(stem []byte, resolver NodeResolverFn) ([ if err != nil { return nil, fmt.Errorf("GetValuesAtStem node deserialization error: %w", err) } - *child = node + bt.right = node } - return (*child).GetValuesAtStem(stem, resolver) + return bt.right.GetValuesAtStem(stem, resolver) } // Get retrieves the value for the given key. @@ -80,6 +92,9 @@ func (bt *InternalNode) Get(key []byte, resolver NodeResolverFn) ([]byte, error) if err != nil { return nil, fmt.Errorf("get error: %w", err) } + if values == nil { + return nil, nil + } return values[key[31]], nil } @@ -118,17 +133,54 @@ func (bt *InternalNode) Hash() common.Hash { // InsertValuesAtStem inserts a full value group at the given stem in the internal node. // Already-existing values will be overwritten. func (bt *InternalNode) InsertValuesAtStem(stem []byte, values [][]byte, resolver NodeResolverFn, depth int) (BinaryNode, error) { - var ( - child *BinaryNode - err error - ) + var err error bit := stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1 if bit == 0 { - child = &bt.left - } else { - child = &bt.right + if bt.left == nil { + bt.left = Empty{} + } + + if hn, ok := bt.left.(HashedNode); ok { + path, err := keyToPath(bt.depth, stem) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem resolve error: %w", err) + } + data, err := resolver(path, common.Hash(hn)) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem resolve error: %w", err) + } + node, err := DeserializeNode(data, bt.depth+1) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem node deserialization error: %w", err) + } + bt.left = node + } + + bt.left, err = bt.left.InsertValuesAtStem(stem, values, resolver, depth+1) + return bt, err + } + + if bt.right == nil { + bt.right = Empty{} } - *child, err = (*child).InsertValuesAtStem(stem, values, resolver, depth+1) + + if hn, ok := bt.right.(HashedNode); ok { + path, err := keyToPath(bt.depth, stem) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem resolve error: %w", err) + } + data, err := resolver(path, common.Hash(hn)) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem resolve error: %w", err) + } + node, err := DeserializeNode(data, bt.depth+1) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem node deserialization error: %w", err) + } + bt.right = node + } + + bt.right, err = bt.right.InsertValuesAtStem(stem, values, resolver, depth+1) return bt, err } diff --git a/trie/bintrie/iterator.go b/trie/bintrie/iterator.go index a6bab2bcfa9f..9b863ed1e3f8 100644 --- a/trie/bintrie/iterator.go +++ b/trie/bintrie/iterator.go @@ -108,6 +108,11 @@ func (it *binaryNodeIterator) Next(descend bool) bool { } // go back to parent to get the next leaf + // Check if we're at the root before popping + if len(it.stack) == 1 { + it.lastErr = errIteratorEnd + return false + } it.stack = it.stack[:len(it.stack)-1] it.current = it.stack[len(it.stack)-1].Node it.stack[len(it.stack)-1].Index++ @@ -183,9 +188,31 @@ func (it *binaryNodeIterator) NodeBlob() []byte { } // Leaf returns true iff the current node is a leaf node. +// In a Binary Trie, a StemNode contains up to 256 leaf values. +// The iterator is only considered to be "at a leaf" when it's positioned +// at a specific non-nil value within the StemNode, not just at the StemNode itself. func (it *binaryNodeIterator) Leaf() bool { - _, ok := it.current.(*StemNode) - return ok + sn, ok := it.current.(*StemNode) + if !ok { + return false + } + + // Check if we have a valid stack position + if len(it.stack) == 0 { + return false + } + + // The Index in the stack state points to the NEXT position after the current value. + // So if Index is 0, we haven't started iterating through the values yet. + // If Index is 5, we're currently at value[4] (the 5th value, 0-indexed). + idx := it.stack[len(it.stack)-1].Index + if idx == 0 || idx > 256 { + return false + } + + // Check if there's actually a value at the current position + currentValueIndex := idx - 1 + return sn.Values[currentValueIndex] != nil } // LeafKey returns the key of the leaf. The method panics if the iterator is not @@ -219,7 +246,7 @@ func (it *binaryNodeIterator) LeafProof() [][]byte { panic("LeafProof() called on an binary node iterator not at a leaf location") } - proof := make([][]byte, 0, len(it.stack)+NodeWidth) + proof := make([][]byte, 0, len(it.stack)+StemNodeWidth) // Build proof by walking up the stack and collecting sibling hashes for i := range it.stack[:len(it.stack)-2] { diff --git a/trie/bintrie/iterator_test.go b/trie/bintrie/iterator_test.go deleted file mode 100644 index 8773e9e0c54c..000000000000 --- a/trie/bintrie/iterator_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2025 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package bintrie - -import ( - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/triedb" - "github.com/ethereum/go-ethereum/triedb/hashdb" - "github.com/ethereum/go-ethereum/triedb/pathdb" - "github.com/holiman/uint256" -) - -func newTestDatabase(diskdb ethdb.Database, scheme string) *triedb.Database { - config := &triedb.Config{Preimages: true} - if scheme == rawdb.HashScheme { - config.HashDB = &hashdb.Config{CleanCacheSize: 0} - } else { - config.PathDB = &pathdb.Config{TrieCleanSize: 0, StateCleanSize: 0} - } - return triedb.NewDatabase(diskdb, config) -} - -func TestBinaryIterator(t *testing.T) { - trie, err := NewBinaryTrie(types.EmptyVerkleHash, newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)) - if err != nil { - t.Fatal(err) - } - account0 := &types.StateAccount{ - Nonce: 1, - Balance: uint256.NewInt(2), - Root: types.EmptyRootHash, - CodeHash: nil, - } - // NOTE: the code size isn't written to the trie via TryUpdateAccount - // so it will be missing from the test nodes. - trie.UpdateAccount(common.Address{}, account0, 0) - account1 := &types.StateAccount{ - Nonce: 1337, - Balance: uint256.NewInt(2000), - Root: types.EmptyRootHash, - CodeHash: nil, - } - // This address is meant to hash to a value that has the same first byte as 0xbf - var clash = common.HexToAddress("69fd8034cdb20934dedffa7dccb4fb3b8062a8be") - trie.UpdateAccount(clash, account1, 0) - - // Manually go over every node to check that we get all - // the correct nodes. - it, err := trie.NodeIterator(nil) - if err != nil { - t.Fatal(err) - } - var leafcount int - for it.Next(true) { - t.Logf("Node: %x", it.Path()) - if it.Leaf() { - leafcount++ - t.Logf("\tLeaf: %x", it.LeafKey()) - } - } - if leafcount != 2 { - t.Fatalf("invalid leaf count: %d != 6", leafcount) - } -} diff --git a/trie/bintrie/key_encoding.go b/trie/bintrie/key_encoding.go index 13c20573710b..cda797521a66 100644 --- a/trie/bintrie/key_encoding.go +++ b/trie/bintrie/key_encoding.go @@ -47,6 +47,12 @@ func GetBinaryTreeKey(addr common.Address, key []byte) []byte { return k } +func GetBinaryTreeKeyBasicData(addr common.Address) []byte { + var k [32]byte + k[31] = BasicDataLeafKey + return GetBinaryTreeKey(addr, k[:]) +} + func GetBinaryTreeKeyCodeHash(addr common.Address) []byte { var k [32]byte k[31] = CodeHashLeafKey diff --git a/trie/bintrie/stem_node.go b/trie/bintrie/stem_node.go index 50c06c9761e7..60856b42ce60 100644 --- a/trie/bintrie/stem_node.go +++ b/trie/bintrie/stem_node.go @@ -28,7 +28,7 @@ import ( // StemNode represents a group of `NodeWith` values sharing the same stem. type StemNode struct { - Stem []byte // Stem path to get to 256 values + Stem []byte // Stem path to get to StemNodeWidth values Values [][]byte // All values, indexed by the last byte of the key. depth int // Depth of the node } @@ -40,7 +40,7 @@ func (bt *StemNode) Get(key []byte, _ NodeResolverFn) ([]byte, error) { // Insert inserts a new key-value pair into the node. func (bt *StemNode) Insert(key []byte, value []byte, _ NodeResolverFn, depth int) (BinaryNode, error) { - if !bytes.Equal(bt.Stem, key[:31]) { + if !bytes.Equal(bt.Stem, key[:StemSize]) { bitStem := bt.Stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1 n := &InternalNode{depth: bt.depth} @@ -65,26 +65,26 @@ func (bt *StemNode) Insert(key []byte, value []byte, _ NodeResolverFn, depth int } *other = Empty{} } else { - var values [256][]byte - values[key[31]] = value + var values [StemNodeWidth][]byte + values[key[StemSize]] = value *other = &StemNode{ - Stem: slices.Clone(key[:31]), + Stem: slices.Clone(key[:StemSize]), Values: values[:], depth: depth + 1, } } return n, nil } - if len(value) != 32 { + if len(value) != HashSize { return bt, errors.New("invalid insertion: value length") } - bt.Values[key[31]] = value + bt.Values[key[StemSize]] = value return bt, nil } // Copy creates a deep copy of the node. func (bt *StemNode) Copy() BinaryNode { - var values [256][]byte + var values [StemNodeWidth][]byte for i, v := range bt.Values { values[i] = slices.Clone(v) } @@ -102,7 +102,7 @@ func (bt *StemNode) GetHeight() int { // Hash returns the hash of the node. func (bt *StemNode) Hash() common.Hash { - var data [NodeWidth]common.Hash + var data [StemNodeWidth]common.Hash for i, v := range bt.Values { if v != nil { h := sha256.Sum256(v) @@ -112,7 +112,7 @@ func (bt *StemNode) Hash() common.Hash { h := sha256.New() for level := 1; level <= 8; level++ { - for i := range NodeWidth / (1 << level) { + for i := range StemNodeWidth / (1 << level) { h.Reset() if data[i*2] == (common.Hash{}) && data[i*2+1] == (common.Hash{}) { @@ -141,14 +141,17 @@ func (bt *StemNode) CollectNodes(path []byte, flush NodeFlushFn) error { } // GetValuesAtStem retrieves the group of values located at the given stem key. -func (bt *StemNode) GetValuesAtStem(_ []byte, _ NodeResolverFn) ([][]byte, error) { +func (bt *StemNode) GetValuesAtStem(stem []byte, _ NodeResolverFn) ([][]byte, error) { + if !bytes.Equal(bt.Stem, stem) { + return nil, nil + } return bt.Values[:], nil } // InsertValuesAtStem inserts a full value group at the given stem in the internal node. // Already-existing values will be overwritten. func (bt *StemNode) InsertValuesAtStem(key []byte, values [][]byte, _ NodeResolverFn, depth int) (BinaryNode, error) { - if !bytes.Equal(bt.Stem, key[:31]) { + if !bytes.Equal(bt.Stem, key[:StemSize]) { bitStem := bt.Stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1 n := &InternalNode{depth: bt.depth} @@ -174,7 +177,7 @@ func (bt *StemNode) InsertValuesAtStem(key []byte, values [][]byte, _ NodeResolv *other = Empty{} } else { *other = &StemNode{ - Stem: slices.Clone(key[:31]), + Stem: slices.Clone(key[:StemSize]), Values: values, depth: n.depth + 1, } @@ -206,7 +209,7 @@ func (bt *StemNode) toDot(parent, path string) string { // Key returns the full key for the given index. func (bt *StemNode) Key(i int) []byte { - var ret [32]byte + var ret [HashSize]byte copy(ret[:], bt.Stem) ret[StemSize] = byte(i) return ret[:] diff --git a/trie/bintrie/stem_node_test.go b/trie/bintrie/stem_node_test.go index e0ffd5c3c844..d8d6844427de 100644 --- a/trie/bintrie/stem_node_test.go +++ b/trie/bintrie/stem_node_test.go @@ -251,27 +251,23 @@ func TestStemNodeGetValuesAtStem(t *testing.T) { } // Check that all values match - for i := 0; i < 256; i++ { + for i := range 256 { if !bytes.Equal(retrievedValues[i], values[i]) { t.Errorf("Value mismatch at index %d", i) } } - // GetValuesAtStem with different stem also returns the same values - // (implementation ignores the stem parameter) + // GetValuesAtStem with different stem should return nil differentStem := make([]byte, 31) differentStem[0] = 0xFF - retrievedValues2, err := node.GetValuesAtStem(differentStem, nil) + shouldBeNil, err := node.GetValuesAtStem(differentStem, nil) if err != nil { t.Fatalf("Failed to get values with different stem: %v", err) } - // Should still return the same values (stem is ignored) - for i := 0; i < 256; i++ { - if !bytes.Equal(retrievedValues2[i], values[i]) { - t.Errorf("Value mismatch at index %d with different stem", i) - } + if shouldBeNil != nil { + t.Error("Expected nil for different stem, got non-nil") } } diff --git a/trie/bintrie/trie.go b/trie/bintrie/trie.go index 0a8bd325f58d..a7ee342b7452 100644 --- a/trie/bintrie/trie.go +++ b/trie/bintrie/trie.go @@ -33,6 +33,84 @@ import ( var errInvalidRootType = errors.New("invalid root type") +// ChunkedCode represents a sequence of HashSize-byte chunks of code (StemSize bytes of which +// are actual code, and NodeTypeBytes byte is the pushdata offset). +type ChunkedCode []byte + +// Copy the values here so as to avoid an import cycle +const ( + PUSH1 = byte(0x60) + PUSH32 = byte(0x7f) +) + +// ChunkifyCode generates the chunked version of an array representing EVM bytecode +// according to EIP-7864 specification. +// +// The code is divided into HashSize-byte chunks, where each chunk contains: +// - Byte 0: Metadata byte indicating the number of leading bytes that are PUSHDATA (0-StemSize) +// - Bytes 1-StemSize: Actual code bytes +// +// This format enables stateless clients to validate jump destinations within a chunk +// without requiring additional context. When a PUSH instruction's data spans multiple +// chunks, the metadata byte tells us how many bytes at the start of the chunk are +// part of the previous chunk's PUSH instruction data. +// +// For example: +// - If a chunk starts with regular code: metadata byte = 0 +// - If a PUSH32 instruction starts at byte 30 of chunk N: +// - Chunk N: normal, contains PUSH32 opcode + 1 byte of data +// - Chunk N+1: metadata = StemSize (entire chunk is PUSH data) +// - Chunk N+2: metadata = 1 (first byte is PUSH data, then normal code resumes) +// +// This chunking approach ensures that jump destination validity can be determined +// by examining only the chunk containing the potential JUMPDEST, making it ideal +// for stateless execution and verkle/binary tries. +// +// Reference: https://eips.ethereum.org/EIPS/eip-7864 +func ChunkifyCode(code []byte) ChunkedCode { + var ( + chunkOffset = 0 // offset in the chunk + chunkCount = len(code) / StemSize + codeOffset = 0 // offset in the code + ) + if len(code)%StemSize != 0 { + chunkCount++ + } + chunks := make([]byte, chunkCount*HashSize) + for i := 0; i < chunkCount; i++ { + // number of bytes to copy, StemSize unless the end of the code has been reached. + end := StemSize * (i + 1) + if len(code) < end { + end = len(code) + } + copy(chunks[i*HashSize+1:], code[StemSize*i:end]) // copy the code itself + + // chunk offset = taken from the last chunk. + if chunkOffset > StemSize { + // skip offset calculation if push data covers the whole chunk + chunks[i*HashSize] = StemSize + chunkOffset = 1 + continue + } + chunks[HashSize*i] = byte(chunkOffset) + chunkOffset = 0 + + // Check each instruction and update the offset it should be 0 unless + // a PUSH-N overflows. + for ; codeOffset < end; codeOffset++ { + if code[codeOffset] >= PUSH1 && code[codeOffset] <= PUSH32 { + codeOffset += int(code[codeOffset] - PUSH1 + 1) + if codeOffset+1 >= StemSize*(i+1) { + codeOffset++ + chunkOffset = codeOffset - StemSize*(i+1) + break + } + } + } + } + return chunks +} + // NewBinaryNode creates a new empty binary trie func NewBinaryNode() BinaryNode { return Empty{} @@ -114,7 +192,7 @@ func (t *BinaryTrie) GetAccount(addr common.Address) (*types.StateAccount, error ) switch r := t.root.(type) { case *InternalNode: - values, err = r.GetValuesAtStem(key[:31], t.nodeResolver) + values, err = r.GetValuesAtStem(key[:StemSize], t.nodeResolver) case *StemNode: values = r.Values case Empty: @@ -168,8 +246,8 @@ func (t *BinaryTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) func (t *BinaryTrie) UpdateAccount(addr common.Address, acc *types.StateAccount, codeLen int) error { var ( err error - basicData [32]byte - values = make([][]byte, NodeWidth) + basicData [HashSize]byte + values = make([][]byte, StemNodeWidth) stem = GetBinaryTreeKey(addr, zero[:]) ) binary.BigEndian.PutUint32(basicData[BasicDataCodeSizeOffset-1:], uint32(codeLen)) @@ -177,14 +255,14 @@ func (t *BinaryTrie) UpdateAccount(addr common.Address, acc *types.StateAccount, // Because the balance is a max of 16 bytes, truncate // the extra values. This happens in devmode, where - // 0xff**32 is allocated to the developer account. + // 0xff**HashSize is allocated to the developer account. balanceBytes := acc.Balance.Bytes() // TODO: reduce the size of the allocation in devmode, then panic instead // of truncating. if len(balanceBytes) > 16 { balanceBytes = balanceBytes[16:] } - copy(basicData[32-len(balanceBytes):], balanceBytes[:]) + copy(basicData[HashSize-len(balanceBytes):], balanceBytes[:]) values[BasicDataLeafKey] = basicData[:] values[CodeHashLeafKey] = acc.CodeHash[:] @@ -205,11 +283,11 @@ func (t *BinaryTrie) UpdateStem(key []byte, values [][]byte) error { // database, a trie.MissingNodeError is returned. func (t *BinaryTrie) UpdateStorage(address common.Address, key, value []byte) error { k := GetBinaryTreeKeyStorageSlot(address, key) - var v [32]byte - if len(value) >= 32 { - copy(v[:], value[:32]) + var v [HashSize]byte + if len(value) >= HashSize { + copy(v[:], value[:HashSize]) } else { - copy(v[32-len(value):], value[:]) + copy(v[HashSize-len(value):], value[:]) } root, err := t.root.Insert(k, v[:], t.nodeResolver, 0) if err != nil { @@ -228,7 +306,7 @@ func (t *BinaryTrie) DeleteAccount(addr common.Address) error { // found in the database, a trie.MissingNodeError is returned. func (t *BinaryTrie) DeleteStorage(addr common.Address, key []byte) error { k := GetBinaryTreeKey(addr, key) - var zero [32]byte + var zero [HashSize]byte root, err := t.root.Insert(k, zero[:], t.nodeResolver, 0) if err != nil { return fmt.Errorf("DeleteStorage (%x) error: %v", addr, err) @@ -246,12 +324,12 @@ func (t *BinaryTrie) Hash() common.Hash { // Commit writes all nodes to the trie's memory database, tracking the internal // and external (for account tries) references. func (t *BinaryTrie) Commit(_ bool) (common.Hash, *trienode.NodeSet) { - root := t.root.(*InternalNode) nodeset := trienode.NewNodeSet(common.Hash{}) - err := root.CollectNodes(nil, func(path []byte, node BinaryNode) { + // The root can be any type of BinaryNode (InternalNode, StemNode, etc.) + err := t.root.CollectNodes(nil, func(path []byte, node BinaryNode) { serialized := SerializeNode(node) - nodeset.AddNode(path, trienode.NewNodeWithPrev(common.Hash{}, serialized, t.tracer.Get(path))) + nodeset.AddNode(path, trienode.NewNodeWithPrev(node.Hash(), serialized, t.tracer.Get(path))) }) if err != nil { panic(fmt.Errorf("CollectNodes failed: %v", err)) @@ -299,23 +377,23 @@ func (t *BinaryTrie) IsVerkle() bool { // Note: the basic data leaf needs to have been previously created for this to work func (t *BinaryTrie) UpdateContractCode(addr common.Address, codeHash common.Hash, code []byte) error { var ( - chunks = trie.ChunkifyCode(code) + chunks = ChunkifyCode(code) values [][]byte key []byte err error ) - for i, chunknr := 0, uint64(0); i < len(chunks); i, chunknr = i+32, chunknr+1 { - groupOffset := (chunknr + 128) % 256 + for i, chunknr := 0, uint64(0); i < len(chunks); i, chunknr = i+HashSize, chunknr+1 { + groupOffset := (chunknr + 128) % StemNodeWidth if groupOffset == 0 /* start of new group */ || chunknr == 0 /* first chunk in header group */ { - values = make([][]byte, NodeWidth) - var offset [32]byte + values = make([][]byte, StemNodeWidth) + var offset [HashSize]byte binary.LittleEndian.PutUint64(offset[24:], chunknr+128) key = GetBinaryTreeKey(addr, offset[:]) } - values[groupOffset] = chunks[i : i+32] + values[groupOffset] = chunks[i : i+HashSize] - if groupOffset == 255 || len(chunks)-i <= 32 { - err = t.UpdateStem(key[:31], values) + if groupOffset == StemNodeWidth-1 || len(chunks)-i <= HashSize { + err = t.UpdateStem(key[:StemSize], values) if err != nil { return fmt.Errorf("UpdateContractCode (addr=%x) error: %w", addr[:], err) diff --git a/trie/bintrie/trie_test.go b/trie/bintrie/trie_test.go index 84f76895494a..ca02cfaa1f3a 100644 --- a/trie/bintrie/trie_test.go +++ b/trie/bintrie/trie_test.go @@ -25,7 +25,7 @@ import ( ) var ( - zeroKey = [32]byte{} + zeroKey = [HashSize]byte{} oneKey = common.HexToHash("0101010101010101010101010101010101010101010101010101010101010101") twoKey = common.HexToHash("0202020202020202020202020202020202020202020202020202020202020202") threeKey = common.HexToHash("0303030303030303030303030303030303030303030303030303030303030303") @@ -158,8 +158,8 @@ func TestInsertDuplicateKey(t *testing.T) { func TestLargeNumberOfEntries(t *testing.T) { var err error tree := NewBinaryNode() - for i := range 256 { - var key [32]byte + for i := range StemNodeWidth { + var key [HashSize]byte key[0] = byte(i) tree, err = tree.Insert(key[:], ffKey[:], nil, 0) if err != nil { @@ -182,7 +182,7 @@ func TestMerkleizeMultipleEntries(t *testing.T) { common.HexToHash("8100000000000000000000000000000000000000000000000000000000000000").Bytes(), } for i, key := range keys { - var v [32]byte + var v [HashSize]byte binary.LittleEndian.PutUint64(v[:8], uint64(i)) tree, err = tree.Insert(key, v[:], nil, 0) if err != nil { diff --git a/trie/transition.go b/trie/transitiontrie/transition.go similarity index 87% rename from trie/transition.go rename to trie/transitiontrie/transition.go index c6eecd39376d..4c730220821f 100644 --- a/trie/transition.go +++ b/trie/transitiontrie/transition.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package trie +package transitiontrie import ( "fmt" @@ -22,8 +22,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/bintrie" "github.com/ethereum/go-ethereum/trie/trienode" - "github.com/ethereum/go-verkle" ) // TransitionTrie is a trie that implements a façade design pattern, presenting @@ -31,13 +32,16 @@ import ( // first from the overlay trie, and falls back to the base trie if the key isn't // found. All writes go to the overlay trie. type TransitionTrie struct { - overlay *VerkleTrie - base *SecureTrie + overlay *bintrie.BinaryTrie + base *trie.SecureTrie storage bool } // NewTransitionTrie creates a new TransitionTrie. -func NewTransitionTrie(base *SecureTrie, overlay *VerkleTrie, st bool) *TransitionTrie { +// Note: base can be nil when using TransitionTrie as a wrapper for BinaryTrie +// to work around import cycles. This is a temporary hack that should be +// refactored in future PRs (see core/state/reader.go for details). +func NewTransitionTrie(base *trie.SecureTrie, overlay *bintrie.BinaryTrie, st bool) *TransitionTrie { return &TransitionTrie{ overlay: overlay, base: base, @@ -46,12 +50,12 @@ func NewTransitionTrie(base *SecureTrie, overlay *VerkleTrie, st bool) *Transiti } // Base returns the base trie. -func (t *TransitionTrie) Base() *SecureTrie { +func (t *TransitionTrie) Base() *trie.SecureTrie { return t.base } // Overlay returns the overlay trie. -func (t *TransitionTrie) Overlay() *VerkleTrie { +func (t *TransitionTrie) Overlay() *bintrie.BinaryTrie { return t.overlay } @@ -61,7 +65,10 @@ func (t *TransitionTrie) GetKey(key []byte) []byte { if key := t.overlay.GetKey(key); key != nil { return key } - return t.base.GetKey(key) + if t.base != nil { + return t.base.GetKey(key) + } + return nil } // GetStorage returns the value for key stored in the trie. The value bytes must @@ -74,8 +81,11 @@ func (t *TransitionTrie) GetStorage(addr common.Address, key []byte) ([]byte, er if len(val) != 0 { return val, nil } - // TODO also insert value into overlay - return t.base.GetStorage(addr, key) + if t.base != nil { + // TODO also insert value into overlay + return t.base.GetStorage(addr, key) + } + return nil, nil } // PrefetchStorage attempts to resolve specific storage slots from the database @@ -102,7 +112,10 @@ func (t *TransitionTrie) GetAccount(address common.Address) (*types.StateAccount if data != nil { return data, nil } - return t.base.GetAccount(address) + if t.base != nil { + return t.base.GetAccount(address) + } + return nil, nil } // PrefetchAccount attempts to resolve specific accounts from the database @@ -174,7 +187,7 @@ func (t *TransitionTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSe // NodeIterator returns an iterator that returns nodes of the trie. Iteration // starts at the key after the given start key. -func (t *TransitionTrie) NodeIterator(startKey []byte) (NodeIterator, error) { +func (t *TransitionTrie) NodeIterator(startKey []byte) (trie.NodeIterator, error) { panic("not implemented") // TODO: Implement } @@ -197,14 +210,10 @@ func (t *TransitionTrie) IsVerkle() bool { // UpdateStem updates a group of values, given the stem they are using. If // a value already exists, it is overwritten. +// TODO: This is Verkle-specific and requires access to private fields. +// Not currently used in the codebase. func (t *TransitionTrie) UpdateStem(key []byte, values [][]byte) error { - trie := t.overlay - switch root := trie.root.(type) { - case *verkle.InternalNode: - return root.InsertValuesAtStem(key, values, t.overlay.nodeResolver) - default: - panic("invalid root type") - } + panic("UpdateStem is not implemented for TransitionTrie") } // Copy creates a deep copy of the transition trie. diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go index dea210c0465a..2e42477b8d9e 100644 --- a/trie/utils/verkle.go +++ b/trie/utils/verkle.go @@ -45,6 +45,10 @@ var ( verkleNodeWidth = uint256.NewInt(256) codeStorageDelta = uint256.NewInt(0).Sub(codeOffset, headerStorageOffset) mainStorageOffsetLshVerkleNodeWidth = new(uint256.Int).Lsh(uint256.NewInt(1), 248-uint(verkleNodeWidthLog2)) + CodeOffset = uint256.NewInt(128) + VerkleNodeWidth = uint256.NewInt(256) + HeaderStorageOffset = uint256.NewInt(64) + VerkleNodeWidthLog2 = 8 index0Point *verkle.Point // pre-computed commitment of polynomial [2+256*64] @@ -200,6 +204,22 @@ func CodeChunkKey(address []byte, chunk *uint256.Int) []byte { return GetTreeKey(address, treeIndex, subIndex) } +func GetTreeKeyCodeChunkIndices(chunk *uint256.Int) (*uint256.Int, byte) { + chunkOffset := new(uint256.Int).Add(CodeOffset, chunk) + treeIndex := new(uint256.Int).Div(chunkOffset, VerkleNodeWidth) + subIndexMod := new(uint256.Int).Mod(chunkOffset, VerkleNodeWidth) + var subIndex byte + if len(subIndexMod) != 0 { + subIndex = byte(subIndexMod[0]) + } + return treeIndex, subIndex +} + +func GetTreeKeyCodeChunk(address []byte, chunk *uint256.Int) []byte { + treeIndex, subIndex := GetTreeKeyCodeChunkIndices(chunk) + return GetTreeKey(address, treeIndex, subIndex) +} + func StorageIndex(storageKey []byte) (*uint256.Int, byte) { // If the storage slot is in the header, we need to add the header offset. var key uint256.Int @@ -297,3 +317,97 @@ func evaluateAddressPoint(address []byte) *verkle.Point { ret.Add(ret, index0Point) return ret } + +func EvaluateAddressPoint(address []byte) *verkle.Point { + if len(address) < 32 { + var aligned [32]byte + address = append(aligned[:32-len(address)], address...) + } + var poly [3]fr.Element + + poly[0].SetZero() + + // 32-byte address, interpreted as two little endian + // 16-byte numbers. + verkle.FromLEBytes(&poly[1], address[:16]) + verkle.FromLEBytes(&poly[2], address[16:]) + + cfg := verkle.GetConfig() + ret := cfg.CommitToPoly(poly[:], 0) + + // add a constant point + ret.Add(ret, index0Point) + + return ret +} + +func GetTreeKeyStorageSlotWithEvaluatedAddress(evaluated *verkle.Point, storageKey []byte) []byte { + treeIndex, subIndex := GetTreeKeyStorageSlotTreeIndexes(storageKey) + return GetTreeKeyWithEvaluatedAddess(evaluated, treeIndex, subIndex) +} + +func GetTreeKeyStorageSlotTreeIndexes(storageKey []byte) (*uint256.Int, byte) { + var pos uint256.Int + pos.SetBytes(storageKey) + + // If the storage slot is in the header, we need to add the header offset. + if pos.Cmp(codeStorageDelta) < 0 { + // This addition is always safe; it can't ever overflow since pos