diff --git a/.gitea/workflows/release.yml b/.gitea/workflows/release.yml
index f2dcc3ae96..41defedd00 100644
--- a/.gitea/workflows/release.yml
+++ b/.gitea/workflows/release.yml
@@ -122,6 +122,27 @@ jobs:
LINUX_SIGNING_KEY: ${{ secrets.LINUX_SIGNING_KEY }}
AZURE_BLOBSTORE_TOKEN: ${{ secrets.AZURE_BLOBSTORE_TOKEN }}
+ keeper:
+ name: Keeper Build
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v5
+ with:
+ go-version: 1.24
+ cache: false
+
+ - name: Install cross toolchain
+ run: |
+ apt-get update
+ apt-get -yq --no-install-suggests --no-install-recommends install gcc-multilib
+
+ - name: Build (amd64)
+ run: |
+ go run build/ci.go keeper -dlgo
+
windows:
name: Windows Build
runs-on: "win-11"
diff --git a/.gitignore b/.gitignore
index 269455db7a..293359a669 100644
--- a/.gitignore
+++ b/.gitignore
@@ -55,4 +55,5 @@ cmd/ethkey/ethkey
cmd/evm/evm
cmd/geth/geth
cmd/rlpdump/rlpdump
-cmd/workload/workload
\ No newline at end of file
+cmd/workload/workload
+cmd/keeper/keeper
diff --git a/accounts/abi/bind/v2/util_test.go b/accounts/abi/bind/v2/util_test.go
index a9f5b4035c..5beb0a4fae 100644
--- a/accounts/abi/bind/v2/util_test.go
+++ b/accounts/abi/bind/v2/util_test.go
@@ -144,10 +144,9 @@ func TestWaitDeployedCornerCases(t *testing.T) {
done := make(chan struct{})
go func() {
defer close(done)
- want := errors.New("context canceled")
_, err := bind.WaitDeployed(ctx, backend.Client(), tx.Hash())
- if err == nil || errors.Is(want, err) {
- t.Errorf("error mismatch: want %v, got %v", want, err)
+ if !errors.Is(err, context.Canceled) {
+ t.Errorf("error mismatch: want %v, got %v", context.Canceled, err)
}
}()
diff --git a/appveyor.yml b/appveyor.yml
index 8dce7f30a2..aeafcfc838 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -36,4 +36,4 @@ for:
- go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
test_script:
- - go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -short -skip-spectests
+ - go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -short
diff --git a/beacon/params/config.go b/beacon/params/config.go
index 492ee53308..b01b739e07 100644
--- a/beacon/params/config.go
+++ b/beacon/params/config.go
@@ -108,6 +108,8 @@ func (c *ChainConfig) LoadForks(file []byte) error {
switch version := value.(type) {
case int:
versions[name] = new(big.Int).SetUint64(uint64(version)).FillBytes(make([]byte, 4))
+ case int64:
+ versions[name] = new(big.Int).SetUint64(uint64(version)).FillBytes(make([]byte, 4))
case uint64:
versions[name] = new(big.Int).SetUint64(version).FillBytes(make([]byte, 4))
case string:
@@ -125,6 +127,8 @@ func (c *ChainConfig) LoadForks(file []byte) error {
switch epoch := value.(type) {
case int:
epochs[name] = uint64(epoch)
+ case int64:
+ epochs[name] = uint64(epoch)
case uint64:
epochs[name] = epoch
case string:
diff --git a/build/ci.go b/build/ci.go
index 42fd56f5ba..3d2c857423 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -31,6 +31,9 @@ Available commands are:
install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
test [ -coverage ] [ packages... ] -- runs the tests
+ keeper [ -dlgo ]
+ keeper-archive [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ]
+
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ] -- archives build artifacts
importkeys -- imports signing keys from env
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
@@ -86,6 +89,30 @@ var (
executablePath("clef"),
}
+ // Keeper build targets with their configurations
+ keeperTargets = []struct {
+ Name string
+ GOOS string
+ GOARCH string
+ CC string
+ Tags string
+ Env map[string]string
+ }{
+ {
+ Name: "ziren",
+ GOOS: "linux",
+ GOARCH: "mipsle",
+ // enable when cgo works
+ // CC: "mipsel-linux-gnu-gcc",
+ Tags: "ziren",
+ Env: map[string]string{"GOMIPS": "softfloat", "CGO_ENABLED": "0"},
+ },
+ {
+ Name: "example",
+ Tags: "example",
+ },
+ }
+
// A debian package is created for all executables listed here.
debExecutables = []debExecutable{
{
@@ -178,6 +205,10 @@ func main() {
doPurge(os.Args[2:])
case "sanitycheck":
doSanityCheck()
+ case "keeper":
+ doInstallKeeper(os.Args[2:])
+ case "keeper-archive":
+ doKeeperArchive(os.Args[2:])
default:
log.Fatal("unknown command ", os.Args[1])
}
@@ -212,9 +243,6 @@ func doInstall(cmdline []string) {
// Configure the build.
gobuild := tc.Go("build", buildFlags(env, *staticlink, buildTags)...)
- // We use -trimpath to avoid leaking local paths into the built executables.
- gobuild.Args = append(gobuild.Args, "-trimpath")
-
// Show packages during build.
gobuild.Args = append(gobuild.Args, "-v")
@@ -234,6 +262,43 @@ func doInstall(cmdline []string) {
}
}
+// doInstallKeeper builds keeper binaries for all supported targets.
+func doInstallKeeper(cmdline []string) {
+ var dlgo = flag.Bool("dlgo", false, "Download Go and build with it")
+
+ flag.CommandLine.Parse(cmdline)
+ env := build.Env()
+
+ // Configure the toolchain.
+ tc := build.GoToolchain{}
+ if *dlgo {
+ csdb := download.MustLoadChecksums("build/checksums.txt")
+ tc.Root = build.DownloadGo(csdb)
+ }
+
+ for _, target := range keeperTargets {
+ log.Printf("Building keeper-%s", target.Name)
+
+ // Configure the build.
+ tc.GOARCH = target.GOARCH
+ tc.GOOS = target.GOOS
+ tc.CC = target.CC
+ gobuild := tc.Go("build", buildFlags(env, true, []string{target.Tags})...)
+ gobuild.Dir = "./cmd/keeper"
+ gobuild.Args = append(gobuild.Args, "-v")
+
+ for key, value := range target.Env {
+ gobuild.Env = append(gobuild.Env, key+"="+value)
+ }
+ outputName := fmt.Sprintf("keeper-%s", target.Name)
+
+ args := slices.Clone(gobuild.Args)
+ args = append(args, "-o", executablePath(outputName))
+ args = append(args, ".")
+ build.MustRun(&exec.Cmd{Path: gobuild.Path, Args: args, Env: gobuild.Env})
+ }
+}
+
// buildFlags returns the go tool flags for building.
func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (flags []string) {
var ld []string
@@ -275,6 +340,8 @@ func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (
if len(buildTags) > 0 {
flags = append(flags, "-tags", strings.Join(buildTags, ","))
}
+ // We use -trimpath to avoid leaking local paths into the built executables.
+ flags = append(flags, "-trimpath")
return flags
}
@@ -284,16 +351,15 @@ func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (
func doTest(cmdline []string) {
var (
- dlgo = flag.Bool("dlgo", false, "Download Go and build with it")
- arch = flag.String("arch", "", "Run tests for given architecture")
- cc = flag.String("cc", "", "Sets C compiler binary")
- coverage = flag.Bool("coverage", false, "Whether to record code coverage")
- verbose = flag.Bool("v", false, "Whether to log verbosely")
- race = flag.Bool("race", false, "Execute the race detector")
- short = flag.Bool("short", false, "Pass the 'short'-flag to go test")
- cachedir = flag.String("cachedir", "./build/cache", "directory for caching downloads")
- skipspectests = flag.Bool("skip-spectests", false, "Skip downloading execution-spec-tests fixtures")
- threads = flag.Int("p", 1, "Number of CPU threads to use for testing")
+ dlgo = flag.Bool("dlgo", false, "Download Go and build with it")
+ arch = flag.String("arch", "", "Run tests for given architecture")
+ cc = flag.String("cc", "", "Sets C compiler binary")
+ coverage = flag.Bool("coverage", false, "Whether to record code coverage")
+ verbose = flag.Bool("v", false, "Whether to log verbosely")
+ race = flag.Bool("race", false, "Execute the race detector")
+ short = flag.Bool("short", false, "Pass the 'short'-flag to go test")
+ cachedir = flag.String("cachedir", "./build/cache", "directory for caching downloads")
+ threads = flag.Int("p", 1, "Number of CPU threads to use for testing")
)
flag.CommandLine.Parse(cmdline)
@@ -301,7 +367,7 @@ func doTest(cmdline []string) {
csdb := download.MustLoadChecksums("build/checksums.txt")
// Get test fixtures.
- if !*skipspectests {
+ if !*short {
downloadSpecTestFixtures(csdb, *cachedir)
}
@@ -633,6 +699,32 @@ func doArchive(cmdline []string) {
}
}
+func doKeeperArchive(cmdline []string) {
+ var (
+ signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. LINUX_SIGNING_KEY)`)
+ signify = flag.String("signify", "", `Environment variable holding the signify key (e.g. LINUX_SIGNIFY_KEY)`)
+ upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`)
+ )
+ flag.CommandLine.Parse(cmdline)
+
+ var (
+ env = build.Env()
+ vsn = version.Archive(env.Commit)
+ keeper = "keeper-" + vsn + ".tar.gz"
+ )
+ maybeSkipArchive(env)
+ files := []string{"COPYING"}
+ for _, target := range keeperTargets {
+ files = append(files, executablePath(fmt.Sprintf("keeper-%s", target.Name)))
+ }
+ if err := build.WriteArchive(keeper, files); err != nil {
+ log.Fatal(err)
+ }
+ if err := archiveUpload(keeper, *upload, *signer, *signify); err != nil {
+ log.Fatal(err)
+ }
+}
+
func archiveBasename(arch string, archiveVersion string) string {
platform := runtime.GOOS + "-" + arch
if arch == "arm" {
diff --git a/cmd/devp2p/README.md b/cmd/devp2p/README.md
index ad2985b4b0..b20d921dc4 100644
--- a/cmd/devp2p/README.md
+++ b/cmd/devp2p/README.md
@@ -121,7 +121,7 @@ with our test chain. The chain files are located in `./cmd/devp2p/internal/ethte
--nat=none \
--networkid 3503995874084926 \
--verbosity 5 \
- --authrpc.jwtsecret 0x7365637265747365637265747365637265747365637265747365637265747365
+ --authrpc.jwtsecret jwt.secret
Note that the tests also require access to the engine API.
The test suite can now be executed using the devp2p tool.
@@ -130,7 +130,7 @@ The test suite can now be executed using the devp2p tool.
--chain internal/ethtest/testdata \
--node enode://.... \
--engineapi http://127.0.0.1:8551 \
- --jwtsecret 0x7365637265747365637265747365637265747365637265747365637265747365
+ --jwtsecret $(cat jwt.secret)
Repeat the above process (re-initialising the node) in order to run the Eth Protocol test suite again.
diff --git a/cmd/devp2p/rlpxcmd.go b/cmd/devp2p/rlpxcmd.go
index 118731fd6c..1dc8f82460 100644
--- a/cmd/devp2p/rlpxcmd.go
+++ b/cmd/devp2p/rlpxcmd.go
@@ -143,9 +143,6 @@ type testParams struct {
func cliTestParams(ctx *cli.Context) *testParams {
nodeStr := ctx.String(testNodeFlag.Name)
- if nodeStr == "" {
- exit(fmt.Errorf("missing -%s", testNodeFlag.Name))
- }
node, err := parseNode(nodeStr)
if err != nil {
exit(err)
@@ -156,14 +153,5 @@ func cliTestParams(ctx *cli.Context) *testParams {
jwt: ctx.String(testNodeJWTFlag.Name),
chainDir: ctx.String(testChainDirFlag.Name),
}
- if p.engineAPI == "" {
- exit(fmt.Errorf("missing -%s", testNodeEngineFlag.Name))
- }
- if p.jwt == "" {
- exit(fmt.Errorf("missing -%s", testNodeJWTFlag.Name))
- }
- if p.chainDir == "" {
- exit(fmt.Errorf("missing -%s", testChainDirFlag.Name))
- }
return &p
}
diff --git a/cmd/devp2p/runtest.go b/cmd/devp2p/runtest.go
index 7e3723c641..c40a4b8a01 100644
--- a/cmd/devp2p/runtest.go
+++ b/cmd/devp2p/runtest.go
@@ -39,26 +39,29 @@ var (
}
// for eth/snap tests
- testChainDirFlag = &cli.StringFlag{
+ testChainDirFlag = &cli.PathFlag{
Name: "chain",
Usage: "Test chain directory (required)",
Category: flags.TestingCategory,
+ Required: true,
}
testNodeFlag = &cli.StringFlag{
Name: "node",
Usage: "Peer-to-Peer endpoint (ENR) of the test node (required)",
Category: flags.TestingCategory,
+ Required: true,
}
testNodeJWTFlag = &cli.StringFlag{
Name: "jwtsecret",
Usage: "JWT secret for the engine API of the test node (required)",
Category: flags.TestingCategory,
- Value: "0x7365637265747365637265747365637265747365637265747365637265747365",
+ Required: true,
}
testNodeEngineFlag = &cli.StringFlag{
Name: "engineapi",
Usage: "Engine API endpoint of the test node (required)",
Category: flags.TestingCategory,
+ Required: true,
}
// These two are specific to the discovery tests.
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index a3af05e85f..1ea2fcb4cc 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -67,6 +67,7 @@ var (
utils.OverrideBPO1,
utils.OverrideBPO2,
utils.OverrideVerkle,
+ utils.OverrideGenesisFlag,
utils.OverrideOptimismCanyon,
utils.OverrideOptimismEcotone,
utils.OverrideOptimismFjord,
@@ -215,6 +216,8 @@ var (
utils.AllowUnprotectedTxs,
utils.BatchRequestLimit,
utils.BatchResponseMaxSize,
+ utils.RPCTxSyncDefaultTimeoutFlag,
+ utils.RPCTxSyncMaxTimeoutFlag,
}
metricsFlags = []cli.Flag{
diff --git a/cmd/geth/verkle.go b/cmd/geth/verkle.go
index 6490f832af..67dc7257c0 100644
--- a/cmd/geth/verkle.go
+++ b/cmd/geth/verkle.go
@@ -201,7 +201,7 @@ func expandVerkle(ctx *cli.Context) error {
}
for i, key := range keylist {
- log.Info("Reading key", "index", i, "key", keylist[0])
+ log.Info("Reading key", "index", i, "key", key)
root.Get(key, chaindb.Get)
}
diff --git a/cmd/keeper/getpayload_ziren.go b/cmd/keeper/getpayload_ziren.go
index 11c5845bcc..bc373db94f 100644
--- a/cmd/keeper/getpayload_ziren.go
+++ b/cmd/keeper/getpayload_ziren.go
@@ -19,7 +19,7 @@
package main
import (
- zkruntime "github.com/zkMIPS/zkMIPS/crates/go-runtime/zkm_runtime"
+ zkruntime "github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime"
)
// getInput reads the input payload from the zkVM runtime environment.
diff --git a/cmd/keeper/go.mod b/cmd/keeper/go.mod
index 6b334f6379..0b1cdf9e6e 100644
--- a/cmd/keeper/go.mod
+++ b/cmd/keeper/go.mod
@@ -3,8 +3,8 @@ module github.com/ethereum/go-ethereum/cmd/keeper
go 1.24.0
require (
+ github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6
github.com/ethereum/go-ethereum v0.0.0-00010101000000-000000000000
- github.com/zkMIPS/zkMIPS/crates/go-runtime/zkm_runtime v0.0.0-20250915074013-fbc07aa2c6f5
)
require (
@@ -47,7 +47,4 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
)
-replace (
- github.com/ethereum/go-ethereum => ../../
- github.com/zkMIPS/zkMIPS/crates/go-runtime/zkm_runtime => github.com/weilzkm/zkMIPS/crates/go-runtime/zkvm_runtime v0.0.0-20250915074013-fbc07aa2c6f5
-)
+replace github.com/ethereum/go-ethereum => ../../
diff --git a/cmd/keeper/go.sum b/cmd/keeper/go.sum
index e56b3e032a..e0d07f8d7a 100644
--- a/cmd/keeper/go.sum
+++ b/cmd/keeper/go.sum
@@ -1,7 +1,9 @@
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
-github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e h1:ZIWapoIRN1VqT8GR8jAwb1Ie9GyehWjVcGh32Y2MznE=
-github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
+github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
+github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
+github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU=
+github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6/go.mod h1:ioLG6R+5bUSO1oeGSDxOV3FADARuMoytZCSX6MEMQkI=
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0=
@@ -123,8 +125,6 @@ github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFA
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
-github.com/weilzkm/zkMIPS/crates/go-runtime/zkvm_runtime v0.0.0-20250915074013-fbc07aa2c6f5 h1:MxKlbmI7Dta6O6Nsc9OAer/rOltjoL11CVLMqCiYnxU=
-github.com/weilzkm/zkMIPS/crates/go-runtime/zkvm_runtime v0.0.0-20250915074013-fbc07aa2c6f5/go.mod h1:zk/SUgiiVz2U1ufZ+yM2MHPbD93W25KH5zK3qAxXbT4=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME=
diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go
index db7bd691d8..3e337a3d00 100644
--- a/cmd/utils/cmd.go
+++ b/cmd/utils/cmd.go
@@ -25,6 +25,7 @@ import (
"errors"
"fmt"
"io"
+ "math"
"math/big"
"os"
"os/signal"
@@ -311,7 +312,7 @@ func ImportHistory(chain *core.BlockChain, dir string, network string) error {
return fmt.Errorf("error reading receipts %d: %w", it.Number(), err)
}
encReceipts := types.EncodeBlockReceiptLists([]types.Receipts{receipts})
- if _, err := chain.InsertReceiptChain([]*types.Block{block}, encReceipts, 2^64-1); err != nil {
+ if _, err := chain.InsertReceiptChain([]*types.Block{block}, encReceipts, math.MaxUint64); err != nil {
return fmt.Errorf("error inserting body %d: %w", it.Number(), err)
}
imported += 1
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index ca095389a3..05ac8b4b40 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -20,7 +20,6 @@ package utils
import (
"context"
"crypto/ecdsa"
- "encoding/hex"
"encoding/json"
"errors"
"fmt"
@@ -273,6 +272,11 @@ var (
Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting",
Category: flags.EthCategory,
}
+ OverrideGenesisFlag = &cli.StringFlag{
+ Name: "override.genesis",
+ Usage: "Load genesis block and configuration from file at this path",
+ Category: flags.EthCategory,
+ }
OverrideOptimismCanyon = &cli.Uint64Flag{
Name: "override.canyon",
Usage: "Manually specify the Optimism Canyon fork timestamp, overriding the bundled setting",
@@ -683,6 +687,18 @@ var (
Value: ethconfig.Defaults.LogQueryLimit,
Category: flags.APICategory,
}
+ RPCTxSyncDefaultTimeoutFlag = &cli.DurationFlag{
+ Name: "rpc.txsync.defaulttimeout",
+ Usage: "Default timeout for eth_sendRawTransactionSync (e.g. 2s, 500ms)",
+ Value: ethconfig.Defaults.TxSyncDefaultTimeout,
+ Category: flags.APICategory,
+ }
+ RPCTxSyncMaxTimeoutFlag = &cli.DurationFlag{
+ Name: "rpc.txsync.maxtimeout",
+ Usage: "Maximum allowed timeout for eth_sendRawTransactionSync (e.g. 5m)",
+ Value: ethconfig.Defaults.TxSyncMaxTimeout,
+ Category: flags.APICategory,
+ }
// Authenticated RPC HTTP settings
AuthListenFlag = &cli.StringFlag{
Name: "authrpc.addr",
@@ -1496,15 +1512,10 @@ func setEtherbase(ctx *cli.Context, cfg *ethconfig.Config) {
return
}
addr := ctx.String(MinerPendingFeeRecipientFlag.Name)
- if strings.HasPrefix(addr, "0x") || strings.HasPrefix(addr, "0X") {
- addr = addr[2:]
- }
- b, err := hex.DecodeString(addr)
- if err != nil || len(b) != common.AddressLength {
+ if !common.IsHexAddress(addr) {
Fatalf("-%s: invalid pending block producer address %q", MinerPendingFeeRecipientFlag.Name, addr)
- return
}
- cfg.Miner.PendingFeeRecipient = common.BytesToAddress(b)
+ cfg.Miner.PendingFeeRecipient = common.HexToAddress(addr)
}
func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
@@ -1787,7 +1798,7 @@ func setRequiredBlocks(ctx *cli.Context, cfg *ethconfig.Config) {
// SetEthConfig applies eth-related command line flags to the config.
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// Avoid conflicting network flags, don't allow network id override on preset networks
- flags.CheckExclusive(ctx, MainnetFlag, DeveloperFlag, SepoliaFlag, HoleskyFlag, HoodiFlag, OPNetworkFlag)
+ flags.CheckExclusive(ctx, MainnetFlag, DeveloperFlag, SepoliaFlag, HoleskyFlag, HoodiFlag, OPNetworkFlag, OverrideGenesisFlag)
flags.CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
// Set configurations from CLI flags
@@ -1911,6 +1922,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.IsSet(RPCGlobalLogQueryLimit.Name) {
cfg.LogQueryLimit = ctx.Int(RPCGlobalLogQueryLimit.Name)
}
+ if ctx.IsSet(RPCTxSyncDefaultTimeoutFlag.Name) {
+ cfg.TxSyncDefaultTimeout = ctx.Duration(RPCTxSyncDefaultTimeoutFlag.Name)
+ }
+ if ctx.IsSet(RPCTxSyncMaxTimeoutFlag.Name) {
+ cfg.TxSyncMaxTimeout = ctx.Duration(RPCTxSyncMaxTimeoutFlag.Name)
+ }
if !ctx.Bool(SnapshotFlag.Name) || cfg.SnapshotCache == 0 {
// If snap-sync is requested, this flag is also required
if cfg.SyncMode == ethconfig.SnapSync {
@@ -2093,6 +2110,18 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if !ctx.IsSet(MinerGasPriceFlag.Name) {
cfg.Miner.GasPrice = big.NewInt(1)
}
+ case ctx.String(OverrideGenesisFlag.Name) != "":
+ f, err := os.Open(ctx.String(OverrideGenesisFlag.Name))
+ if err != nil {
+ Fatalf("Failed to read genesis file: %v", err)
+ }
+ defer f.Close()
+
+ genesis := new(core.Genesis)
+ if err := json.NewDecoder(f).Decode(genesis); err != nil {
+ Fatalf("Invalid genesis file: %v", err)
+ }
+ cfg.Genesis = genesis
case ctx.IsSet(OPNetworkFlag.Name):
genesis := MakeGenesis(ctx)
if !ctx.IsSet(NetworkIdFlag.Name) {
diff --git a/common/format.go b/common/format.go
index 7af41f52d5..31e08831f5 100644
--- a/common/format.go
+++ b/common/format.go
@@ -69,7 +69,7 @@ func (t PrettyAge) String() string {
result, prec := "", 0
for _, unit := range ageUnits {
- if diff > unit.Size {
+ if diff >= unit.Size {
result = fmt.Sprintf("%s%d%s", result, diff/unit.Size, unit.Symbol)
diff %= unit.Size
diff --git a/common/path.go b/common/path.go
index 49c6a5efc2..841946348e 100644
--- a/common/path.go
+++ b/common/path.go
@@ -17,6 +17,8 @@
package common
import (
+ "errors"
+ "io/fs"
"os"
"path/filepath"
)
@@ -24,10 +26,7 @@ import (
// FileExist checks if a file exists at filePath.
func FileExist(filePath string) bool {
_, err := os.Stat(filePath)
- if err != nil && os.IsNotExist(err) {
- return false
- }
- return true
+ return !errors.Is(err, fs.ErrNotExist)
}
// AbsolutePath returns datadir + filename, or filename if it is absolute.
@@ -37,3 +36,14 @@ func AbsolutePath(datadir string, filename string) string {
}
return filepath.Join(datadir, filename)
}
+
+// IsNonEmptyDir checks if a directory exists and is non-empty.
+func IsNonEmptyDir(dir string) bool {
+ f, err := os.Open(dir)
+ if err != nil {
+ return false
+ }
+ defer f.Close()
+ names, _ := f.Readdirnames(1)
+ return len(names) > 0
+}
diff --git a/core/genesis.go b/core/genesis.go
index 8c55ca36a3..aadd3e0248 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -837,23 +837,24 @@ func DeveloperGenesisBlock(gasLimit uint64, faucet *common.Address) *Genesis {
BaseFee: big.NewInt(params.InitialBaseFee),
Difficulty: big.NewInt(0),
Alloc: map[common.Address]types.Account{
- common.BytesToAddress([]byte{0x01}): {Balance: big.NewInt(1)}, // ECRecover
- common.BytesToAddress([]byte{0x02}): {Balance: big.NewInt(1)}, // SHA256
- common.BytesToAddress([]byte{0x03}): {Balance: big.NewInt(1)}, // RIPEMD
- common.BytesToAddress([]byte{0x04}): {Balance: big.NewInt(1)}, // Identity
- common.BytesToAddress([]byte{0x05}): {Balance: big.NewInt(1)}, // ModExp
- common.BytesToAddress([]byte{0x06}): {Balance: big.NewInt(1)}, // ECAdd
- common.BytesToAddress([]byte{0x07}): {Balance: big.NewInt(1)}, // ECScalarMul
- common.BytesToAddress([]byte{0x08}): {Balance: big.NewInt(1)}, // ECPairing
- common.BytesToAddress([]byte{0x09}): {Balance: big.NewInt(1)}, // BLAKE2b
- common.BytesToAddress([]byte{0x0a}): {Balance: big.NewInt(1)}, // KZGPointEval
- common.BytesToAddress([]byte{0x0b}): {Balance: big.NewInt(1)}, // BLSG1Add
- common.BytesToAddress([]byte{0x0c}): {Balance: big.NewInt(1)}, // BLSG1MultiExp
- common.BytesToAddress([]byte{0x0d}): {Balance: big.NewInt(1)}, // BLSG2Add
- common.BytesToAddress([]byte{0x0e}): {Balance: big.NewInt(1)}, // BLSG2MultiExp
- common.BytesToAddress([]byte{0x0f}): {Balance: big.NewInt(1)}, // BLSG1Pairing
- common.BytesToAddress([]byte{0x10}): {Balance: big.NewInt(1)}, // BLSG1MapG1
- common.BytesToAddress([]byte{0x11}): {Balance: big.NewInt(1)}, // BLSG2MapG2
+ common.BytesToAddress([]byte{0x01}): {Balance: big.NewInt(1)}, // ECRecover
+ common.BytesToAddress([]byte{0x02}): {Balance: big.NewInt(1)}, // SHA256
+ common.BytesToAddress([]byte{0x03}): {Balance: big.NewInt(1)}, // RIPEMD
+ common.BytesToAddress([]byte{0x04}): {Balance: big.NewInt(1)}, // Identity
+ common.BytesToAddress([]byte{0x05}): {Balance: big.NewInt(1)}, // ModExp
+ common.BytesToAddress([]byte{0x06}): {Balance: big.NewInt(1)}, // ECAdd
+ common.BytesToAddress([]byte{0x07}): {Balance: big.NewInt(1)}, // ECScalarMul
+ common.BytesToAddress([]byte{0x08}): {Balance: big.NewInt(1)}, // ECPairing
+ common.BytesToAddress([]byte{0x09}): {Balance: big.NewInt(1)}, // BLAKE2b
+ common.BytesToAddress([]byte{0x0a}): {Balance: big.NewInt(1)}, // KZGPointEval
+ common.BytesToAddress([]byte{0x0b}): {Balance: big.NewInt(1)}, // BLSG1Add
+ common.BytesToAddress([]byte{0x0c}): {Balance: big.NewInt(1)}, // BLSG1MultiExp
+ common.BytesToAddress([]byte{0x0d}): {Balance: big.NewInt(1)}, // BLSG2Add
+ common.BytesToAddress([]byte{0x0e}): {Balance: big.NewInt(1)}, // BLSG2MultiExp
+ common.BytesToAddress([]byte{0x0f}): {Balance: big.NewInt(1)}, // BLSG1Pairing
+ common.BytesToAddress([]byte{0x10}): {Balance: big.NewInt(1)}, // BLSG1MapG1
+ common.BytesToAddress([]byte{0x11}): {Balance: big.NewInt(1)}, // BLSG2MapG2
+ common.BytesToAddress([]byte{0x1, 00}): {Balance: big.NewInt(1)}, // P256Verify
// Pre-deploy system contracts
params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode, Balance: common.Big0},
params.HistoryStorageAddress: {Nonce: 1, Code: params.HistoryStorageCode, Balance: common.Big0},
diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go
index 714c1f77d6..b97c7a07a1 100644
--- a/core/rawdb/accessors_state.go
+++ b/core/rawdb/accessors_state.go
@@ -313,13 +313,13 @@ func ReadTrienodeHistoryHeader(db ethdb.AncientReaderOp, id uint64) ([]byte, err
}
// ReadTrienodeHistoryKeySection retrieves the key section of trienode history.
-func ReadTrienodeHistoryKeySection(db ethdb.AncientReaderOp, id uint64) ([]byte, error) {
- return db.Ancient(trienodeHistoryKeySectionTable, id-1)
+func ReadTrienodeHistoryKeySection(db ethdb.AncientReaderOp, id uint64, offset uint64, length uint64) ([]byte, error) {
+ return db.AncientBytes(trienodeHistoryKeySectionTable, id-1, offset, length)
}
// ReadTrienodeHistoryValueSection retrieves the value section of trienode history.
-func ReadTrienodeHistoryValueSection(db ethdb.AncientReaderOp, id uint64) ([]byte, error) {
- return db.Ancient(trienodeHistoryValueSectionTable, id-1)
+func ReadTrienodeHistoryValueSection(db ethdb.AncientReaderOp, id uint64, offset uint64, length uint64) ([]byte, error) {
+ return db.AncientBytes(trienodeHistoryValueSectionTable, id-1, offset, length)
}
// ReadTrienodeHistoryList retrieves the a list of trienode history corresponding
diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go
index f4909d86e7..b940d91040 100644
--- a/core/rawdb/ancient_utils.go
+++ b/core/rawdb/ancient_utils.go
@@ -105,6 +105,23 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
}
infos = append(infos, info)
+ case MerkleTrienodeFreezerName, VerkleTrienodeFreezerName:
+ datadir, err := db.AncientDatadir()
+ if err != nil {
+ return nil, err
+ }
+ f, err := NewTrienodeFreezer(datadir, freezer == VerkleTrienodeFreezerName, true)
+ if err != nil {
+ continue // might be possible the trienode freezer is not existent
+ }
+ defer f.Close()
+
+ info, err := inspect(freezer, trienodeFreezerTableConfigs, f)
+ if err != nil {
+ return nil, err
+ }
+ infos = append(infos, info)
+
default:
return nil, fmt.Errorf("unknown freezer, supported ones: %v", freezers)
}
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 97a17bbd29..8745200abf 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -177,7 +177,7 @@ func resolveChainFreezerDir(ancient string) string {
// - chain freezer exists in legacy location (root ancient folder)
freezer := filepath.Join(ancient, ChainFreezerName)
if !common.FileExist(freezer) {
- if !common.FileExist(ancient) {
+ if !common.FileExist(ancient) || !common.IsNonEmptyDir(ancient) {
// The entire ancient store is not initialized, still use the sub
// folder for initialization.
} else {
diff --git a/core/state/access_list.go b/core/state/access_list.go
index e3f1738864..0b830e7222 100644
--- a/core/state/access_list.go
+++ b/core/state/access_list.go
@@ -61,9 +61,10 @@ func newAccessList() *accessList {
// Copy creates an independent copy of an accessList.
func (al *accessList) Copy() *accessList {
- cp := newAccessList()
- cp.addresses = maps.Clone(al.addresses)
- cp.slots = make([]map[common.Hash]struct{}, len(al.slots))
+ cp := &accessList{
+ addresses: maps.Clone(al.addresses),
+ slots: make([]map[common.Hash]struct{}, len(al.slots)),
+ }
for i, slotMap := range al.slots {
cp.slots[i] = maps.Clone(slotMap)
}
diff --git a/core/state/database.go b/core/state/database.go
index 5fff5c4c89..5bbf99379d 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -318,6 +318,8 @@ func mustCopyTrie(t Trie) Trie {
return t.Copy()
case *trie.VerkleTrie:
return t.Copy()
+ case *trie.TransitionTrie:
+ return t.Copy()
default:
panic(fmt.Errorf("unknown trie type %T", t))
}
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 2938750503..fdeb4254c1 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/holiman/uint256"
)
@@ -494,8 +495,20 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject {
selfDestructed: s.selfDestructed,
newContract: s.newContract,
}
- if s.trie != nil {
+
+ switch s.trie.(type) {
+ case *trie.VerkleTrie:
+ // Verkle uses only one tree, and the copy has already been
+ // made in mustCopyTrie.
+ obj.trie = db.trie
+ case *trie.TransitionTrie:
+ // Same thing for the transition tree, since the MPT is
+ // read-only.
+ obj.trie = db.trie
+ case *trie.StateTrie:
obj.trie = mustCopyTrie(s.trie)
+ case nil:
+ // do nothing
}
return obj
}
diff --git a/core/state/state_sizer_test.go b/core/state/state_sizer_test.go
index cab0c38163..65f652e424 100644
--- a/core/state/state_sizer_test.go
+++ b/core/state/state_sizer_test.go
@@ -94,6 +94,14 @@ func TestSizeTracker(t *testing.T) {
}
baselineRoot := currentRoot
+ // Close and reopen the trie database so all async flushes triggered by the
+ // baseline commits are written before we measure the baseline snapshot.
+ if err := tdb.Close(); err != nil {
+ t.Fatalf("Failed to close triedb before baseline measurement: %v", err)
+ }
+ tdb = triedb.NewDatabase(db, &triedb.Config{PathDB: pathdb.Defaults})
+ sdb = NewDatabase(tdb, nil)
+
// Wait for snapshot completion
for !tdb.SnapshotCompleted() {
time.Sleep(100 * time.Millisecond)
@@ -215,13 +223,12 @@ func TestSizeTracker(t *testing.T) {
if actualStats.ContractCodeBytes != expectedStats.ContractCodeBytes {
t.Errorf("Contract code bytes mismatch: expected %d, got %d", expectedStats.ContractCodeBytes, actualStats.ContractCodeBytes)
}
- // TODO: failed on github actions, need to investigate
- // if actualStats.AccountTrienodes != expectedStats.AccountTrienodes {
- // t.Errorf("Account trie nodes mismatch: expected %d, got %d", expectedStats.AccountTrienodes, actualStats.AccountTrienodes)
- // }
- // if actualStats.AccountTrienodeBytes != expectedStats.AccountTrienodeBytes {
- // t.Errorf("Account trie node bytes mismatch: expected %d, got %d", expectedStats.AccountTrienodeBytes, actualStats.AccountTrienodeBytes)
- // }
+ if actualStats.AccountTrienodes != expectedStats.AccountTrienodes {
+ t.Errorf("Account trie nodes mismatch: expected %d, got %d", expectedStats.AccountTrienodes, actualStats.AccountTrienodes)
+ }
+ if actualStats.AccountTrienodeBytes != expectedStats.AccountTrienodeBytes {
+ t.Errorf("Account trie node bytes mismatch: expected %d, got %d", expectedStats.AccountTrienodeBytes, actualStats.AccountTrienodeBytes)
+ }
if actualStats.StorageTrienodes != expectedStats.StorageTrienodes {
t.Errorf("Storage trie nodes mismatch: expected %d, got %d", expectedStats.StorageTrienodes, actualStats.StorageTrienodes)
}
diff --git a/core/state/statedb_hooked.go b/core/state/statedb_hooked.go
index d2595bcefe..9db201fc2b 100644
--- a/core/state/statedb_hooked.go
+++ b/core/state/statedb_hooked.go
@@ -191,17 +191,18 @@ func (s *hookedStateDB) SetNonce(address common.Address, nonce uint64, reason tr
func (s *hookedStateDB) SetCode(address common.Address, code []byte, reason tracing.CodeChangeReason) []byte {
prev := s.inner.SetCode(address, code, reason)
+
if s.hooks.OnCodeChangeV2 != nil || s.hooks.OnCodeChange != nil {
- prevHash := types.EmptyCodeHash
- if len(prev) != 0 {
- prevHash = crypto.Keccak256Hash(prev)
- }
+ prevHash := crypto.Keccak256Hash(prev)
codeHash := crypto.Keccak256Hash(code)
- if s.hooks.OnCodeChangeV2 != nil {
- s.hooks.OnCodeChangeV2(address, prevHash, prev, codeHash, code, reason)
- } else if s.hooks.OnCodeChange != nil {
- s.hooks.OnCodeChange(address, prevHash, prev, codeHash, code)
+ // Invoke the hooks only if the contract code is changed
+ if prevHash != codeHash {
+ if s.hooks.OnCodeChangeV2 != nil {
+ s.hooks.OnCodeChangeV2(address, prevHash, prev, codeHash, code, reason)
+ } else if s.hooks.OnCodeChange != nil {
+ s.hooks.OnCodeChange(address, prevHash, prev, codeHash, code)
+ }
}
}
return prev
diff --git a/core/txpool/locals/journal.go b/core/txpool/locals/journal.go
index 46fd6de346..cd2be8a794 100644
--- a/core/txpool/locals/journal.go
+++ b/core/txpool/locals/journal.go
@@ -117,6 +117,25 @@ func (journal *journal) load(add func([]*types.Transaction) []error) error {
return failure
}
+func (journal *journal) setupWriter() error {
+ if journal.writer != nil {
+ if err := journal.writer.Close(); err != nil {
+ return err
+ }
+ journal.writer = nil
+ }
+
+ // Re-open the journal file for appending
+ // Use O_APPEND to ensure we always write to the end of the file
+ sink, err := os.OpenFile(journal.path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
+ if err != nil {
+ return err
+ }
+ journal.writer = sink
+
+ return nil
+}
+
// insert adds the specified transaction to the local disk journal.
func (journal *journal) insert(tx *types.Transaction) error {
if journal.writer == nil {
@@ -177,7 +196,6 @@ func (journal *journal) rotate(all map[common.Address]types.Transactions) error
// close flushes the transaction journal contents to disk and closes the file.
func (journal *journal) close() error {
var err error
-
if journal.writer != nil {
err = journal.writer.Close()
journal.writer = nil
diff --git a/core/txpool/locals/tx_tracker.go b/core/txpool/locals/tx_tracker.go
index e08384ce71..bb178f175e 100644
--- a/core/txpool/locals/tx_tracker.go
+++ b/core/txpool/locals/tx_tracker.go
@@ -114,13 +114,14 @@ func (tracker *TxTracker) TrackAll(txs []*types.Transaction) {
}
// recheck checks and returns any transactions that needs to be resubmitted.
-func (tracker *TxTracker) recheck(journalCheck bool) (resubmits []*types.Transaction, rejournal map[common.Address]types.Transactions) {
+func (tracker *TxTracker) recheck(journalCheck bool) []*types.Transaction {
tracker.mu.Lock()
defer tracker.mu.Unlock()
var (
numStales = 0
numOk = 0
+ resubmits []*types.Transaction
)
for sender, txs := range tracker.byAddr {
// Wipe the stales
@@ -141,7 +142,7 @@ func (tracker *TxTracker) recheck(journalCheck bool) (resubmits []*types.Transac
}
if journalCheck { // rejournal
- rejournal = make(map[common.Address]types.Transactions)
+ rejournal := make(map[common.Address]types.Transactions)
for _, tx := range tracker.all {
addr, _ := types.Sender(tracker.signer, tx)
rejournal[addr] = append(rejournal[addr], tx)
@@ -153,10 +154,18 @@ func (tracker *TxTracker) recheck(journalCheck bool) (resubmits []*types.Transac
return int(a.Nonce() - b.Nonce())
})
}
+ // Rejournal the tracker while holding the lock. No new transactions will
+ // be added to the old journal during this period, preventing any potential
+ // transaction loss.
+ if tracker.journal != nil {
+ if err := tracker.journal.rotate(rejournal); err != nil {
+ log.Warn("Transaction journal rotation failed", "err", err)
+ }
+ }
}
localGauge.Update(int64(len(tracker.all)))
log.Debug("Tx tracker status", "need-resubmit", len(resubmits), "stale", numStales, "ok", numOk)
- return resubmits, rejournal
+ return resubmits
}
// Start implements node.Lifecycle interface
@@ -185,6 +194,12 @@ func (tracker *TxTracker) loop() {
tracker.TrackAll(transactions)
return nil
})
+
+ // Setup the writer for the upcoming transactions
+ if err := tracker.journal.setupWriter(); err != nil {
+ log.Error("Failed to setup the journal writer", "err", err)
+ return
+ }
defer tracker.journal.close()
}
var (
@@ -196,20 +211,15 @@ func (tracker *TxTracker) loop() {
case <-tracker.shutdownCh:
return
case <-timer.C:
- checkJournal := tracker.journal != nil && time.Since(lastJournal) > tracker.rejournal
- resubmits, rejournal := tracker.recheck(checkJournal)
+ var rejournal bool
+ if tracker.journal != nil && time.Since(lastJournal) > tracker.rejournal {
+ rejournal, lastJournal = true, time.Now()
+ log.Debug("Rejournal the transaction tracker")
+ }
+ resubmits := tracker.recheck(rejournal)
if len(resubmits) > 0 {
tracker.pool.Add(resubmits, false)
}
- if checkJournal {
- // Lock to prevent journal.rotate <-> journal.insert (via TrackAll) conflicts
- tracker.mu.Lock()
- lastJournal = time.Now()
- if err := tracker.journal.rotate(rejournal); err != nil {
- log.Warn("Transaction journal rotation failed", "err", err)
- }
- tracker.mu.Unlock()
- }
timer.Reset(recheckInterval)
}
}
diff --git a/core/txpool/locals/tx_tracker_test.go b/core/txpool/locals/tx_tracker_test.go
index b67b1d6f07..ed77309662 100644
--- a/core/txpool/locals/tx_tracker_test.go
+++ b/core/txpool/locals/tx_tracker_test.go
@@ -17,7 +17,11 @@
package locals
import (
+ "fmt"
+ "maps"
"math/big"
+ "math/rand"
+ "path/filepath"
"testing"
"time"
@@ -146,20 +150,59 @@ func TestResubmit(t *testing.T) {
txsA := txs[:len(txs)/2]
txsB := txs[len(txs)/2:]
env.pool.Add(txsA, true)
+
pending, queued := env.pool.ContentFrom(address)
if len(pending) != len(txsA) || len(queued) != 0 {
t.Fatalf("Unexpected txpool content: %d, %d", len(pending), len(queued))
}
env.tracker.TrackAll(txs)
- resubmit, all := env.tracker.recheck(true)
+ resubmit := env.tracker.recheck(true)
if len(resubmit) != len(txsB) {
t.Fatalf("Unexpected transactions to resubmit, got: %d, want: %d", len(resubmit), len(txsB))
}
- if len(all) == 0 || len(all[address]) == 0 {
- t.Fatalf("Unexpected transactions being tracked, got: %d, want: %d", 0, len(txs))
+ env.tracker.mu.Lock()
+ allCopy := maps.Clone(env.tracker.all)
+ env.tracker.mu.Unlock()
+
+ if len(allCopy) != len(txs) {
+ t.Fatalf("Unexpected transactions being tracked, got: %d, want: %d", len(allCopy), len(txs))
}
- if len(all[address]) != len(txs) {
- t.Fatalf("Unexpected transactions being tracked, got: %d, want: %d", len(all[address]), len(txs))
+}
+
+func TestJournal(t *testing.T) {
+ journalPath := filepath.Join(t.TempDir(), fmt.Sprintf("%d", rand.Int63()))
+ env := newTestEnv(t, 10, 0, journalPath)
+ defer env.close()
+
+ env.tracker.Start()
+ defer env.tracker.Stop()
+
+ txs := env.makeTxs(10)
+ txsA := txs[:len(txs)/2]
+ txsB := txs[len(txs)/2:]
+ env.pool.Add(txsA, true)
+
+ pending, queued := env.pool.ContentFrom(address)
+ if len(pending) != len(txsA) || len(queued) != 0 {
+ t.Fatalf("Unexpected txpool content: %d, %d", len(pending), len(queued))
+ }
+ env.tracker.TrackAll(txsA)
+ env.tracker.TrackAll(txsB)
+ env.tracker.recheck(true) // manually rejournal the tracker
+
+ // Make sure all the transactions are properly journalled
+ trackerB := New(journalPath, time.Minute, gspec.Config, env.pool)
+ trackerB.journal.load(func(transactions []*types.Transaction) []error {
+ trackerB.TrackAll(transactions)
+ return nil
+ })
+
+ trackerB.mu.Lock()
+ allCopy := maps.Clone(trackerB.all)
+ trackerB.mu.Unlock()
+
+ if len(allCopy) != len(txs) {
+ t.Fatalf("Unexpected transactions being tracked, got: %d, want: %d", len(allCopy), len(txs))
}
}
diff --git a/core/types/transaction.go b/core/types/transaction.go
index 3e67a4f45e..d242280696 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -35,7 +35,6 @@ import (
var (
ErrInvalidSig = errors.New("invalid transaction v, r, s values")
ErrUnexpectedProtection = errors.New("transaction type does not supported EIP-155 protected signatures")
- ErrInvalidTxType = errors.New("transaction type not valid in this context")
ErrTxTypeNotSupported = errors.New("transaction type not supported")
ErrGasFeeCapTooLow = errors.New("fee cap less than base fee")
ErrUint256Overflow = errors.New("bigint overflow, too large for uint256")
@@ -764,7 +763,7 @@ func TxDifference(a, b Transactions) Transactions {
func HashDifference(a, b []common.Hash) []common.Hash {
keep := make([]common.Hash, 0, len(a))
- remove := make(map[common.Hash]struct{})
+ remove := make(map[common.Hash]struct{}, len(b))
for _, hash := range b {
remove[hash] = struct{}{}
}
diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go
index d0c21d3469..1409a7067a 100644
--- a/core/types/transaction_signing.go
+++ b/core/types/transaction_signing.go
@@ -20,7 +20,6 @@ import (
"crypto/ecdsa"
"errors"
"fmt"
- "maps"
"math/big"
"github.com/ethereum/go-ethereum/common"
@@ -187,18 +186,35 @@ type Signer interface {
// modernSigner is the signer implementation that handles non-legacy transaction types.
// For legacy transactions, it defers to one of the legacy signers (frontier, homestead, eip155).
type modernSigner struct {
- txtypes map[byte]struct{}
+ txtypes txtypeSet
chainID *big.Int
legacy Signer
}
+// txtypeSet is a bitmap for transaction types.
+type txtypeSet [2]uint64
+
+func (v *txtypeSet) set(txType byte) {
+ v[txType/64] |= 1 << (txType % 64)
+}
+
+func (v *txtypeSet) unset(txType byte) {
+ v[txType/64] &^= 1 << (txType % 64)
+}
+
+func (v *txtypeSet) has(txType byte) bool {
+ if txType >= byte(len(v)*64) {
+ return false
+ }
+ return v[txType/64]&(1<<(txType%64)) != 0
+}
+
func newModernSigner(chainID *big.Int, fork forks.Fork) Signer {
if chainID == nil || chainID.Sign() <= 0 {
panic(fmt.Sprintf("invalid chainID %v", chainID))
}
s := &modernSigner{
chainID: chainID,
- txtypes: make(map[byte]struct{}, 4),
}
// configure legacy signer
switch {
@@ -209,19 +225,19 @@ func newModernSigner(chainID *big.Int, fork forks.Fork) Signer {
default:
s.legacy = FrontierSigner{}
}
- s.txtypes[LegacyTxType] = struct{}{}
+ s.txtypes.set(LegacyTxType)
// configure tx types
if fork >= forks.Berlin {
- s.txtypes[AccessListTxType] = struct{}{}
+ s.txtypes.set(AccessListTxType)
}
if fork >= forks.London {
- s.txtypes[DynamicFeeTxType] = struct{}{}
+ s.txtypes.set(DynamicFeeTxType)
}
if fork >= forks.Cancun {
- s.txtypes[BlobTxType] = struct{}{}
+ s.txtypes.set(BlobTxType)
}
if fork >= forks.Prague {
- s.txtypes[SetCodeTxType] = struct{}{}
+ s.txtypes.set(SetCodeTxType)
}
return s
}
@@ -232,7 +248,7 @@ func (s *modernSigner) ChainID() *big.Int {
func (s *modernSigner) Equal(s2 Signer) bool {
other, ok := s2.(*modernSigner)
- return ok && s.chainID.Cmp(other.chainID) == 0 && maps.Equal(s.txtypes, other.txtypes) && s.legacy.Equal(other.legacy)
+ return ok && s.chainID.Cmp(other.chainID) == 0 && s.txtypes == other.txtypes && s.legacy.Equal(other.legacy)
}
func (s *modernSigner) Hash(tx *Transaction) common.Hash {
@@ -240,8 +256,7 @@ func (s *modernSigner) Hash(tx *Transaction) common.Hash {
}
func (s *modernSigner) supportsType(txtype byte) bool {
- _, ok := s.txtypes[txtype]
- return ok
+ return s.txtypes.has(txtype)
}
func (s *modernSigner) Sender(tx *Transaction) (common.Address, error) {
@@ -301,8 +316,7 @@ func (s *modernSigner) SignatureValues(tx *Transaction, sig []byte) (R, S, V *bi
// OP-Stack addition
func NewIsthmusSigner(chainId *big.Int) Signer {
s := newModernSigner(chainId, forks.Prague).(*modernSigner)
- // OP-Stack: remove blob tx support
- delete(s.txtypes, BlobTxType)
+ s.txtypes.unset(BlobTxType)
return s
}
diff --git a/core/types/transaction_signing_test.go b/core/types/transaction_signing_test.go
index b66577f7ed..02a65fda13 100644
--- a/core/types/transaction_signing_test.go
+++ b/core/types/transaction_signing_test.go
@@ -25,6 +25,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/params/forks"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -188,3 +189,14 @@ func createTestLegacyTxInner() *LegacyTx {
Data: nil,
}
}
+
+func Benchmark_modernSigner_Equal(b *testing.B) {
+ signer1 := newModernSigner(big.NewInt(1), forks.Amsterdam)
+ signer2 := newModernSigner(big.NewInt(1), forks.Amsterdam)
+
+ for b.Loop() {
+ if !signer1.Equal(signer2) {
+ b.Fatal("expected signers to be equal")
+ }
+ }
+}
diff --git a/core/verkle_witness_test.go b/core/verkle_witness_test.go
index 254cf753d7..cd7cb8e971 100644
--- a/core/verkle_witness_test.go
+++ b/core/verkle_witness_test.go
@@ -455,7 +455,7 @@ func verkleTestGenesis(config *params.ChainConfig) *Genesis {
func TestProcessVerkleContractWithEmptyCode(t *testing.T) {
// The test txs were taken from a secondary testnet with chain id 69421
config := *testKaustinenLikeChainConfig
- config.ChainID.SetUint64(69421)
+ config.ChainID = new(big.Int).SetUint64(69421)
gspec := verkleTestGenesis(&config)
genesisH, _, _, _, _, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 1, func(i int, gen *BlockGen) {
@@ -511,7 +511,7 @@ func TestProcessVerkleContractWithEmptyCode(t *testing.T) {
func TestProcessVerkleExtCodeHashOpcode(t *testing.T) {
// The test txs were taken from a secondary testnet with chain id 69421
config := *testKaustinenLikeChainConfig
- config.ChainID.SetUint64(69421)
+ config.ChainID = new(big.Int).SetUint64(69421)
var (
signer = types.LatestSigner(&config)
@@ -615,7 +615,7 @@ func TestProcessVerkleExtCodeHashOpcode(t *testing.T) {
func TestProcessVerkleBalanceOpcode(t *testing.T) {
// The test txs were taken from a secondary testnet with chain id 69421
config := *testKaustinenLikeChainConfig
- config.ChainID.SetUint64(69421)
+ config.ChainID = new(big.Int).SetUint64(69421)
var (
signer = types.LatestSigner(&config)
@@ -672,7 +672,7 @@ func TestProcessVerkleBalanceOpcode(t *testing.T) {
func TestProcessVerkleSelfDestructInSeparateTx(t *testing.T) {
// The test txs were taken from a secondary testnet with chain id 69421
config := *testKaustinenLikeChainConfig
- config.ChainID.SetUint64(69421)
+ config.ChainID = new(big.Int).SetUint64(69421)
var (
signer = types.LatestSigner(&config)
@@ -792,7 +792,7 @@ func TestProcessVerkleSelfDestructInSeparateTx(t *testing.T) {
func TestProcessVerkleSelfDestructInSameTx(t *testing.T) {
// The test txs were taken from a secondary testnet with chain id 69421
config := *testKaustinenLikeChainConfig
- config.ChainID.SetUint64(69421)
+ config.ChainID = new(big.Int).SetUint64(69421)
var (
signer = types.LatestSigner(&config)
@@ -888,7 +888,7 @@ func TestProcessVerkleSelfDestructInSameTx(t *testing.T) {
func TestProcessVerkleSelfDestructInSeparateTxWithSelfBeneficiary(t *testing.T) {
// The test txs were taken from a secondary testnet with chain id 69421
config := *testKaustinenLikeChainConfig
- config.ChainID.SetUint64(69421)
+ config.ChainID = new(big.Int).SetUint64(69421)
var (
signer = types.LatestSigner(&config)
@@ -978,7 +978,7 @@ func TestProcessVerkleSelfDestructInSeparateTxWithSelfBeneficiary(t *testing.T)
func TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiary(t *testing.T) {
// The test txs were taken from a secondary testnet with chain id 69421
config := *testKaustinenLikeChainConfig
- config.ChainID.SetUint64(69421)
+ config.ChainID = new(big.Int).SetUint64(69421)
var (
signer = types.LatestSigner(&config)
@@ -1042,7 +1042,7 @@ func TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiary(t *testing.T) {
func TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiaryAndPrefundedAccount(t *testing.T) {
// The test txs were taken from a secondary testnet with chain id 69421
config := *testKaustinenLikeChainConfig
- config.ChainID.SetUint64(69421)
+ config.ChainID = new(big.Int).SetUint64(69421)
var (
signer = types.LatestSigner(&config)
diff --git a/core/vm/evm.go b/core/vm/evm.go
index d6e8542aac..76f9ba94d8 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -621,7 +621,9 @@ func (evm *EVM) initNewContract(contract *Contract, address common.Address) ([]b
}
}
- evm.StateDB.SetCode(address, ret, tracing.CodeChangeContractCreation)
+ if len(ret) > 0 {
+ evm.StateDB.SetCode(address, ret, tracing.CodeChangeContractCreation)
+ }
return ret, nil
}
diff --git a/crypto/crypto.go b/crypto/crypto.go
index 09596c05ce..db6b6ee071 100644
--- a/crypto/crypto.go
+++ b/crypto/crypto.go
@@ -28,12 +28,10 @@ import (
"io"
"math/big"
"os"
- "sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/rlp"
- "golang.org/x/crypto/sha3"
)
// SignatureLength indicates the byte length required to carry a signature with recovery id.
@@ -69,17 +67,6 @@ type KeccakState interface {
Read([]byte) (int, error)
}
-// NewKeccakState creates a new KeccakState
-func NewKeccakState() KeccakState {
- return sha3.NewLegacyKeccak256().(KeccakState)
-}
-
-var hasherPool = sync.Pool{
- New: func() any {
- return sha3.NewLegacyKeccak256().(KeccakState)
- },
-}
-
// HashData hashes the provided data using the KeccakState and returns a 32 byte hash
func HashData(kh KeccakState, data []byte) (h common.Hash) {
kh.Reset()
@@ -88,41 +75,6 @@ func HashData(kh KeccakState, data []byte) (h common.Hash) {
return h
}
-// Keccak256 calculates and returns the Keccak256 hash of the input data.
-func Keccak256(data ...[]byte) []byte {
- b := make([]byte, 32)
- d := hasherPool.Get().(KeccakState)
- d.Reset()
- for _, b := range data {
- d.Write(b)
- }
- d.Read(b)
- hasherPool.Put(d)
- return b
-}
-
-// Keccak256Hash calculates and returns the Keccak256 hash of the input data,
-// converting it to an internal Hash data structure.
-func Keccak256Hash(data ...[]byte) (h common.Hash) {
- d := hasherPool.Get().(KeccakState)
- d.Reset()
- for _, b := range data {
- d.Write(b)
- }
- d.Read(h[:])
- hasherPool.Put(d)
- return h
-}
-
-// Keccak512 calculates and returns the Keccak512 hash of the input data.
-func Keccak512(data ...[]byte) []byte {
- d := sha3.NewLegacyKeccak512()
- for _, b := range data {
- d.Write(b)
- }
- return d.Sum(nil)
-}
-
// CreateAddress creates an ethereum address given the bytes and the nonce
func CreateAddress(b common.Address, nonce uint64) common.Address {
data, _ := rlp.EncodeToBytes([]interface{}{b, nonce})
diff --git a/crypto/keccak.go b/crypto/keccak.go
new file mode 100644
index 0000000000..0ad79a63c1
--- /dev/null
+++ b/crypto/keccak.go
@@ -0,0 +1,63 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+//go:build !ziren
+
+package crypto
+
+import (
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "golang.org/x/crypto/sha3"
+)
+
+// NewKeccakState creates a new KeccakState
+func NewKeccakState() KeccakState {
+ return sha3.NewLegacyKeccak256().(KeccakState)
+}
+
+var hasherPool = sync.Pool{
+ New: func() any {
+ return sha3.NewLegacyKeccak256().(KeccakState)
+ },
+}
+
+// Keccak256 calculates and returns the Keccak256 hash of the input data.
+func Keccak256(data ...[]byte) []byte {
+ b := make([]byte, 32)
+ d := hasherPool.Get().(KeccakState)
+ d.Reset()
+ for _, b := range data {
+ d.Write(b)
+ }
+ d.Read(b)
+ hasherPool.Put(d)
+ return b
+}
+
+// Keccak256Hash calculates and returns the Keccak256 hash of the input data,
+// converting it to an internal Hash data structure.
+func Keccak256Hash(data ...[]byte) (h common.Hash) {
+ d := hasherPool.Get().(KeccakState)
+ d.Reset()
+ for _, b := range data {
+ d.Write(b)
+ }
+ d.Read(h[:])
+ hasherPool.Put(d)
+ return h
+}
diff --git a/crypto/keccak_ziren.go b/crypto/keccak_ziren.go
new file mode 100644
index 0000000000..8e967c6dbf
--- /dev/null
+++ b/crypto/keccak_ziren.go
@@ -0,0 +1,122 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+//go:build ziren
+
+package crypto
+
+import (
+ "github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// zirenKeccakState implements the KeccakState interface using the Ziren zkvm_runtime.
+// It accumulates data written to it and uses the zkvm's Keccak256 system call for hashing.
+type zirenKeccakState struct {
+ buf []byte // accumulated data
+ result []byte // cached result
+ dirty bool // whether new data has been written since last hash
+}
+
+func newZirenKeccakState() KeccakState {
+ return &zirenKeccakState{
+ buf: make([]byte, 0, 512), // pre-allocate reasonable capacity
+ }
+}
+
+func (s *zirenKeccakState) Write(p []byte) (n int, err error) {
+ s.buf = append(s.buf, p...)
+ s.dirty = true
+ return len(p), nil
+}
+
+func (s *zirenKeccakState) Sum(b []byte) []byte {
+ s.computeHashIfNeeded()
+ return append(b, s.result...)
+}
+
+func (s *zirenKeccakState) Reset() {
+ s.buf = s.buf[:0]
+ s.result = nil
+ s.dirty = false
+}
+
+func (s *zirenKeccakState) Size() int {
+ return 32
+}
+
+func (s *zirenKeccakState) BlockSize() int {
+ return 136 // Keccak256 rate
+}
+
+func (s *zirenKeccakState) Read(p []byte) (n int, err error) {
+ s.computeHashIfNeeded()
+
+ if len(p) == 0 {
+ return 0, nil
+ }
+
+ // After computeHashIfNeeded(), s.result is always a 32-byte slice
+ n = copy(p, s.result)
+ return n, nil
+}
+
+func (s *zirenKeccakState) computeHashIfNeeded() {
+ if s.dirty || s.result == nil {
+ // Use the zkvm_runtime Keccak256 which uses SyscallKeccakSponge
+ hashArray := zkvm_runtime.Keccak256(s.buf)
+ s.result = hashArray[:]
+ s.dirty = false
+ }
+}
+
+// NewKeccakState creates a new KeccakState
+// This uses a Ziren-optimized implementation that leverages the zkvm_runtime.Keccak256 system call.
+func NewKeccakState() KeccakState {
+ return newZirenKeccakState()
+}
+
+// Keccak256 calculates and returns the Keccak256 hash using the Ziren zkvm_runtime implementation.
+func Keccak256(data ...[]byte) []byte {
+ // For multiple data chunks, concatenate them
+ if len(data) == 0 {
+ result := zkvm_runtime.Keccak256(nil)
+ return result[:]
+ }
+ if len(data) == 1 {
+ result := zkvm_runtime.Keccak256(data[0])
+ return result[:]
+ }
+
+ // Concatenate multiple data chunks
+ var totalLen int
+ for _, d := range data {
+ totalLen += len(d)
+ }
+
+ combined := make([]byte, 0, totalLen)
+ for _, d := range data {
+ combined = append(combined, d...)
+ }
+
+ result := zkvm_runtime.Keccak256(combined)
+ return result[:]
+}
+
+// Keccak256Hash calculates and returns the Keccak256 hash as a Hash using the Ziren zkvm_runtime implementation.
+func Keccak256Hash(data ...[]byte) common.Hash {
+ return common.Hash(Keccak256(data...))
+}
diff --git a/eth/api_backend.go b/eth/api_backend.go
index d13ebfe1e7..1bfeee46fb 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -520,6 +520,14 @@ func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Blo
return b.eth.stateAtTransaction(ctx, block, txIndex, reexec)
}
+func (b *EthAPIBackend) RPCTxSyncDefaultTimeout() time.Duration {
+ return b.eth.config.TxSyncDefaultTimeout
+}
+
+func (b *EthAPIBackend) RPCTxSyncMaxTimeout() time.Duration {
+ return b.eth.config.TxSyncMaxTimeout
+}
+
func (b *EthAPIBackend) HistoricalRPCService() *rpc.Client {
return b.eth.historicalRPCService
}
diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go
index 62d4c588b7..cb9a036c11 100644
--- a/eth/downloader/queue_test.go
+++ b/eth/downloader/queue_test.go
@@ -351,6 +351,7 @@ func XTestDelivery(t *testing.T) {
}
}
}()
+ wg.Add(1)
go func() {
defer wg.Done()
// reserve receiptfetch
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index c8e7848c8a..2b16837c2e 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -50,27 +50,29 @@ var FullNodeGPO = gasprice.Config{
// Defaults contains default settings for use on the Ethereum main net.
var Defaults = Config{
- HistoryMode: history.KeepAll,
- SyncMode: SnapSync,
- NetworkId: 0, // enable auto configuration of networkID == chainID
- TxLookupLimit: 2350000,
- TransactionHistory: 2350000,
- LogHistory: 2350000,
- StateHistory: params.FullImmutabilityThreshold,
- DatabaseCache: 512,
- TrieCleanCache: 154,
- TrieDirtyCache: 256,
- TrieTimeout: 60 * time.Minute,
- SnapshotCache: 102,
- FilterLogCacheSize: 32,
- LogQueryLimit: 1000,
- Miner: miner.DefaultConfig,
- TxPool: legacypool.DefaultConfig,
- BlobPool: blobpool.DefaultConfig,
- RPCGasCap: 50000000,
- RPCEVMTimeout: 5 * time.Second,
- GPO: FullNodeGPO,
- RPCTxFeeCap: 1, // 1 ether
+ HistoryMode: history.KeepAll,
+ SyncMode: SnapSync,
+ NetworkId: 0, // enable auto configuration of networkID == chainID
+ TxLookupLimit: 2350000,
+ TransactionHistory: 2350000,
+ LogHistory: 2350000,
+ StateHistory: params.FullImmutabilityThreshold,
+ DatabaseCache: 512,
+ TrieCleanCache: 154,
+ TrieDirtyCache: 256,
+ TrieTimeout: 60 * time.Minute,
+ SnapshotCache: 102,
+ FilterLogCacheSize: 32,
+ LogQueryLimit: 1000,
+ Miner: miner.DefaultConfig,
+ TxPool: legacypool.DefaultConfig,
+ BlobPool: blobpool.DefaultConfig,
+ RPCGasCap: 50000000,
+ RPCEVMTimeout: 5 * time.Second,
+ GPO: FullNodeGPO,
+ RPCTxFeeCap: 1, // 1 ether
+ TxSyncDefaultTimeout: 20 * time.Second,
+ TxSyncMaxTimeout: 1 * time.Minute,
}
//go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go
@@ -185,6 +187,10 @@ type Config struct {
// OverrideVerkle (TODO: remove after the fork)
OverrideVerkle *uint64 `toml:",omitempty"`
+ // EIP-7966: eth_sendRawTransactionSync timeouts
+ TxSyncDefaultTimeout time.Duration `toml:",omitempty"`
+ TxSyncMaxTimeout time.Duration `toml:",omitempty"`
+
OverrideOptimismCanyon *uint64 `toml:",omitempty"`
OverrideOptimismEcotone *uint64 `toml:",omitempty"`
diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go
index 30ec163a88..e9f1679255 100644
--- a/eth/ethconfig/gen_config.go
+++ b/eth/ethconfig/gen_config.go
@@ -58,19 +58,21 @@ func (c Config) MarshalTOML() (interface{}, error) {
RPCGasCap uint64
RPCEVMTimeout time.Duration
RPCTxFeeCap float64
- OverrideOsaka *uint64 `toml:",omitempty"`
- OverrideBPO1 *uint64 `toml:",omitempty"`
- OverrideBPO2 *uint64 `toml:",omitempty"`
- OverrideVerkle *uint64 `toml:",omitempty"`
- OverrideOptimismCanyon *uint64 `toml:",omitempty"`
- OverrideOptimismEcotone *uint64 `toml:",omitempty"`
- OverrideOptimismFjord *uint64 `toml:",omitempty"`
- OverrideOptimismGranite *uint64 `toml:",omitempty"`
- OverrideOptimismHolocene *uint64 `toml:",omitempty"`
- OverrideOptimismIsthmus *uint64 `toml:",omitempty"`
- OverrideOptimismJovian *uint64 `toml:",omitempty"`
- OverrideOptimismInterop *uint64 `toml:",omitempty"`
- ApplySuperchainUpgrades bool `toml:",omitempty"`
+ OverrideOsaka *uint64 `toml:",omitempty"`
+ OverrideBPO1 *uint64 `toml:",omitempty"`
+ OverrideBPO2 *uint64 `toml:",omitempty"`
+ OverrideVerkle *uint64 `toml:",omitempty"`
+ TxSyncDefaultTimeout time.Duration `toml:",omitempty"`
+ TxSyncMaxTimeout time.Duration `toml:",omitempty"`
+ OverrideOptimismCanyon *uint64 `toml:",omitempty"`
+ OverrideOptimismEcotone *uint64 `toml:",omitempty"`
+ OverrideOptimismFjord *uint64 `toml:",omitempty"`
+ OverrideOptimismGranite *uint64 `toml:",omitempty"`
+ OverrideOptimismHolocene *uint64 `toml:",omitempty"`
+ OverrideOptimismIsthmus *uint64 `toml:",omitempty"`
+ OverrideOptimismJovian *uint64 `toml:",omitempty"`
+ OverrideOptimismInterop *uint64 `toml:",omitempty"`
+ ApplySuperchainUpgrades bool `toml:",omitempty"`
RollupSequencerHTTP string
RollupSequencerTxConditionalEnabled bool
RollupSequencerTxConditionalCostRateLimit int
@@ -130,6 +132,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.OverrideBPO1 = c.OverrideBPO1
enc.OverrideBPO2 = c.OverrideBPO2
enc.OverrideVerkle = c.OverrideVerkle
+ enc.TxSyncDefaultTimeout = c.TxSyncDefaultTimeout
+ enc.TxSyncMaxTimeout = c.TxSyncMaxTimeout
enc.OverrideOptimismCanyon = c.OverrideOptimismCanyon
enc.OverrideOptimismEcotone = c.OverrideOptimismEcotone
enc.OverrideOptimismFjord = c.OverrideOptimismFjord
@@ -198,19 +202,21 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
RPCGasCap *uint64
RPCEVMTimeout *time.Duration
RPCTxFeeCap *float64
- OverrideOsaka *uint64 `toml:",omitempty"`
- OverrideBPO1 *uint64 `toml:",omitempty"`
- OverrideBPO2 *uint64 `toml:",omitempty"`
- OverrideVerkle *uint64 `toml:",omitempty"`
- OverrideOptimismCanyon *uint64 `toml:",omitempty"`
- OverrideOptimismEcotone *uint64 `toml:",omitempty"`
- OverrideOptimismFjord *uint64 `toml:",omitempty"`
- OverrideOptimismGranite *uint64 `toml:",omitempty"`
- OverrideOptimismHolocene *uint64 `toml:",omitempty"`
- OverrideOptimismIsthmus *uint64 `toml:",omitempty"`
- OverrideOptimismJovian *uint64 `toml:",omitempty"`
- OverrideOptimismInterop *uint64 `toml:",omitempty"`
- ApplySuperchainUpgrades *bool `toml:",omitempty"`
+ OverrideOsaka *uint64 `toml:",omitempty"`
+ OverrideBPO1 *uint64 `toml:",omitempty"`
+ OverrideBPO2 *uint64 `toml:",omitempty"`
+ OverrideVerkle *uint64 `toml:",omitempty"`
+ TxSyncDefaultTimeout *time.Duration `toml:",omitempty"`
+ TxSyncMaxTimeout *time.Duration `toml:",omitempty"`
+ OverrideOptimismCanyon *uint64 `toml:",omitempty"`
+ OverrideOptimismEcotone *uint64 `toml:",omitempty"`
+ OverrideOptimismFjord *uint64 `toml:",omitempty"`
+ OverrideOptimismGranite *uint64 `toml:",omitempty"`
+ OverrideOptimismHolocene *uint64 `toml:",omitempty"`
+ OverrideOptimismIsthmus *uint64 `toml:",omitempty"`
+ OverrideOptimismJovian *uint64 `toml:",omitempty"`
+ OverrideOptimismInterop *uint64 `toml:",omitempty"`
+ ApplySuperchainUpgrades *bool `toml:",omitempty"`
RollupSequencerHTTP *string
RollupSequencerTxConditionalEnabled *bool
RollupSequencerTxConditionalCostRateLimit *int
@@ -363,6 +369,12 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.OverrideVerkle != nil {
c.OverrideVerkle = dec.OverrideVerkle
}
+ if dec.TxSyncDefaultTimeout != nil {
+ c.TxSyncDefaultTimeout = *dec.TxSyncDefaultTimeout
+ }
+ if dec.TxSyncMaxTimeout != nil {
+ c.TxSyncMaxTimeout = *dec.TxSyncMaxTimeout
+ }
if dec.OverrideOptimismCanyon != nil {
c.OverrideOptimismCanyon = dec.OverrideOptimismCanyon
}
diff --git a/eth/fetcher/metrics.go b/eth/fetcher/metrics.go
new file mode 100644
index 0000000000..fd1678dd30
--- /dev/null
+++ b/eth/fetcher/metrics.go
@@ -0,0 +1,59 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see 0 => something's running.
diff --git a/eth/fetcher/tx_fetcher_test.go b/eth/fetcher/tx_fetcher_test.go
index 0f05a1c995..bb41f62932 100644
--- a/eth/fetcher/tx_fetcher_test.go
+++ b/eth/fetcher/tx_fetcher_test.go
@@ -1858,6 +1858,56 @@ func TestBlobTransactionAnnounce(t *testing.T) {
})
}
+func TestTransactionFetcherDropAlternates(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ nil,
+ )
+ },
+ steps: []interface{}{
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}},
+
+ isScheduled{
+ tracking: map[string][]announce{
+ "A": {
+ {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())},
+ },
+ "B": {
+ {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())},
+ },
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ doDrop("B"),
+
+ isScheduled{
+ tracking: map[string][]announce{
+ "A": {
+ {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())},
+ },
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ doDrop("A"),
+ isScheduled{
+ tracking: nil, fetching: nil,
+ },
+ },
+ })
+}
+
func testTransactionFetcherParallel(t *testing.T, tt txFetcherTest) {
t.Parallel()
testTransactionFetcher(t, tt)
diff --git a/eth/filters/filter.go b/eth/filters/filter.go
index 02399bc801..422e5cd67b 100644
--- a/eth/filters/filter.go
+++ b/eth/filters/filter.go
@@ -563,7 +563,7 @@ type ReceiptWithTx struct {
// In addition to returning receipts, it also returns the corresponding transactions.
// This is because receipts only contain low-level data, while user-facing data
// may require additional information from the Transaction.
-func filterReceipts(txHashes []common.Hash, ev core.ChainEvent) []*ReceiptWithTx {
+func filterReceipts(txHashes map[common.Hash]bool, ev core.ChainEvent) []*ReceiptWithTx {
var ret []*ReceiptWithTx
receipts := ev.Receipts
@@ -583,27 +583,9 @@ func filterReceipts(txHashes []common.Hash, ev core.ChainEvent) []*ReceiptWithTx
Transaction: txs[i],
}
}
- } else if len(txHashes) == 1 {
- // Filter by single transaction hash.
- // This is a common case, so we distinguish it from filtering by multiple tx hashes and made a small optimization.
- for i, receipt := range receipts {
- if receipt.TxHash == txHashes[0] {
- ret = append(ret, &ReceiptWithTx{
- Receipt: receipt,
- Transaction: txs[i],
- })
- break
- }
- }
} else {
- // Filter by multiple transaction hashes.
- txHashMap := make(map[common.Hash]bool, len(txHashes))
- for _, hash := range txHashes {
- txHashMap[hash] = true
- }
-
for i, receipt := range receipts {
- if txHashMap[receipt.TxHash] {
+ if txHashes[receipt.TxHash] {
ret = append(ret, &ReceiptWithTx{
Receipt: receipt,
Transaction: txs[i],
diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go
index 02783fa5ec..f10e6a277b 100644
--- a/eth/filters/filter_system.go
+++ b/eth/filters/filter_system.go
@@ -185,9 +185,9 @@ type subscription struct {
txs chan []*types.Transaction
headers chan *types.Header
receipts chan []*ReceiptWithTx
- txHashes []common.Hash // contains transaction hashes for transactionReceipts subscription filtering
- installed chan struct{} // closed when the filter is installed
- err chan error // closed when the filter is uninstalled
+ txHashes map[common.Hash]bool // contains transaction hashes for transactionReceipts subscription filtering
+ installed chan struct{} // closed when the filter is installed
+ err chan error // closed when the filter is uninstalled
}
// EventSystem creates subscriptions, processes events and broadcasts them to the
@@ -403,6 +403,10 @@ func (es *EventSystem) SubscribePendingTxs(txs chan []*types.Transaction) *Subsc
// transactions when they are included in blocks. If txHashes is provided, only receipts
// for those specific transaction hashes will be delivered.
func (es *EventSystem) SubscribeTransactionReceipts(txHashes []common.Hash, receipts chan []*ReceiptWithTx) *Subscription {
+ hashSet := make(map[common.Hash]bool)
+ for _, h := range txHashes {
+ hashSet[h] = true
+ }
sub := &subscription{
id: rpc.NewID(),
typ: TransactionReceiptsSubscription,
@@ -411,7 +415,7 @@ func (es *EventSystem) SubscribeTransactionReceipts(txHashes []common.Hash, rece
txs: make(chan []*types.Transaction),
headers: make(chan *types.Header),
receipts: receipts,
- txHashes: txHashes,
+ txHashes: hashSet,
installed: make(chan struct{}),
err: make(chan error),
}
diff --git a/eth/tracers/api.go b/eth/tracers/api.go
index 5de8cb859f..f42a0d9878 100644
--- a/eth/tracers/api.go
+++ b/eth/tracers/api.go
@@ -1007,7 +1007,7 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc
blockContext := core.NewEVMBlockContext(h, api.chainContext(ctx), nil, api.backend.ChainConfig(), statedb)
// Apply the customization rules if required.
if config != nil {
- if config.BlockOverrides != nil && config.BlockOverrides.Number.ToInt().Uint64() == h.Number.Uint64()+1 {
+ if config.BlockOverrides != nil && config.BlockOverrides.Number != nil && config.BlockOverrides.Number.ToInt().Uint64() == h.Number.Uint64()+1 {
// Overriding the block number to n+1 is a common way for wallets to
// simulate transactions, however without the following fix, a contract
// can assert it is being simulated by checking if blockhash(n) == 0x0 and
diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go
index e6315925c6..903954070c 100644
--- a/eth/tracers/api_test.go
+++ b/eth/tracers/api_test.go
@@ -557,6 +557,20 @@ func TestTraceCall(t *testing.T) {
{"pc":0,"op":"NUMBER","gas":24946984,"gasCost":2,"depth":1,"stack":[]},
{"pc":1,"op":"STOP","gas":24946982,"gasCost":0,"depth":1,"stack":["0x1337"]}]}`,
},
+ // Tests issue #33014 where accessing nil block number override panics.
+ {
+ blockNumber: rpc.BlockNumber(0),
+ call: ethapi.TransactionArgs{
+ From: &accounts[0].addr,
+ To: &accounts[1].addr,
+ Value: (*hexutil.Big)(big.NewInt(1000)),
+ },
+ config: &TraceCallConfig{
+ BlockOverrides: &override.BlockOverrides{},
+ },
+ expectErr: nil,
+ expect: `{"gas":21000,"failed":false,"returnValue":"0x","structLogs":[]}`,
+ },
}
for i, testspec := range testSuite {
result, err := api.TraceCall(context.Background(), testspec.call, rpc.BlockNumberOrHash{BlockNumber: &testspec.blockNumber}, testspec.config)
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/suicide_cancun.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/suicide_cancun.json
new file mode 100644
index 0000000000..cdabe66913
--- /dev/null
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/suicide_cancun.json
@@ -0,0 +1,101 @@
+{
+ "context": {
+ "difficulty": "0",
+ "gasLimit": "8000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "number": "1",
+ "timestamp": "1000",
+ "baseFeePerGas": "7"
+ },
+ "genesis": {
+ "alloc": {
+ "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "0x10000000000000000",
+ "nonce": "0",
+ "code": "0x",
+ "storage": {}
+ },
+ "0x1111111111111111111111111111111111111111": {
+ "balance": "0x0",
+ "nonce": "0",
+ "code": "0x",
+ "storage": {}
+ },
+ "0x2222222222222222222222222222222222222222": {
+ "balance": "0xde0b6b3a7640000",
+ "nonce": "1",
+ "code": "0x6099600155731111111111111111111111111111111111111111ff",
+ "storage": {
+ "0x0000000000000000000000000000000000000000000000000000000000000001": "0x000000000000000000000000000000000000000000000000000000000000abcd",
+ "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000000000000000000000000000000000000000001234"
+ }
+ }
+ },
+ "config": {
+ "chainId": 1,
+ "homesteadBlock": 0,
+ "eip150Block": 0,
+ "eip155Block": 0,
+ "eip158Block": 0,
+ "byzantiumBlock": 0,
+ "constantinopleBlock": 0,
+ "petersburgBlock": 0,
+ "istanbulBlock": 0,
+ "berlinBlock": 0,
+ "londonBlock": 0,
+ "mergeNetsplitBlock": 0,
+ "shanghaiTime": 0,
+ "cancunTime": 0,
+ "terminalTotalDifficulty": 0,
+ "terminalTotalDifficultyPassed": true
+ },
+ "difficulty": "0",
+ "extraData": "0x",
+ "gasLimit": "8000000",
+ "hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0",
+ "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "timestamp": "0"
+ },
+ "input": "0xf860800a830186a094222222222222222222222222222222222222222280801ba0c4829400221936e8016721406f84b4710ead5608f15c785a3cedc20a7aebaab2a033e8e6e12cc432098b5ce8a409691f977867249073a3fc7804e8676c4d159475",
+ "tracerConfig": {
+ "diffMode": true
+ },
+ "result": {
+ "pre": {
+ "0x2222222222222222222222222222222222222222": {
+ "balance": "0xde0b6b3a7640000",
+ "nonce": 1,
+ "code": "0x6099600155731111111111111111111111111111111111111111ff",
+ "codeHash": "0x701bdb1d43777a9304905a100f758955d130e09c8e86d97e3f6becccdc001048",
+ "storage": {
+ "0x0000000000000000000000000000000000000000000000000000000000000001": "0x000000000000000000000000000000000000000000000000000000000000abcd"
+ }
+ },
+ "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "0x10000000000000000"
+ }
+ },
+ "post": {
+ "0x0000000000000000000000000000000000000000": {
+ "balance": "0x2aed3"
+ },
+ "0x1111111111111111111111111111111111111111": {
+ "balance": "0xde0b6b3a7640000"
+ },
+ "0x2222222222222222222222222222222222222222": {
+ "balance": "0x0",
+ "storage": {
+ "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000000000000000099"
+ }
+ },
+ "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "0xfffffffffff70e96",
+ "nonce": 1
+ }
+ }
+ }
+}
diff --git a/eth/tracers/native/prestate.go b/eth/tracers/native/prestate.go
index 5d836159d1..250ad39642 100644
--- a/eth/tracers/native/prestate.go
+++ b/eth/tracers/native/prestate.go
@@ -131,7 +131,15 @@ func (t *prestateTracer) OnOpcode(pc uint64, opcode byte, gas, cost uint64, scop
addr := common.Address(stackData[stackLen-1].Bytes20())
t.lookupAccount(addr)
if op == vm.SELFDESTRUCT {
- t.deleted[caller] = true
+ if t.chainConfig.IsCancun(t.env.BlockNumber, t.env.Time) {
+ // EIP-6780: only delete if created in same transaction
+ if t.created[caller] {
+ t.deleted[caller] = true
+ }
+ } else {
+ // Pre-EIP-6780: always delete
+ t.deleted[caller] = true
+ }
}
case stackLen >= 5 && (op == vm.DELEGATECALL || op == vm.CALL || op == vm.STATICCALL || op == vm.CALLCODE):
addr := common.Address(stackData[stackLen-2].Bytes20())
diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go
index 8b26f5b3ca..5008378da6 100644
--- a/ethclient/ethclient.go
+++ b/ethclient/ethclient.go
@@ -23,6 +23,7 @@ import (
"errors"
"fmt"
"math/big"
+ "time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
@@ -705,6 +706,39 @@ func (ec *Client) SendTransaction(ctx context.Context, tx *types.Transaction) er
return ec.c.CallContext(ctx, nil, "eth_sendRawTransaction", hexutil.Encode(data))
}
+// SendTransactionSync submits a signed tx and waits for a receipt (or until
+// the optional timeout elapses on the server side). If timeout == 0, the server
+// uses its default.
+func (ec *Client) SendTransactionSync(
+ ctx context.Context,
+ tx *types.Transaction,
+ timeout *time.Duration,
+) (*types.Receipt, error) {
+ raw, err := tx.MarshalBinary()
+ if err != nil {
+ return nil, err
+ }
+ return ec.SendRawTransactionSync(ctx, raw, timeout)
+}
+
+func (ec *Client) SendRawTransactionSync(
+ ctx context.Context,
+ rawTx []byte,
+ timeout *time.Duration,
+) (*types.Receipt, error) {
+ var ms *hexutil.Uint64
+ if timeout != nil {
+ if d := hexutil.Uint64(timeout.Milliseconds()); d > 0 {
+ ms = &d
+ }
+ }
+ var receipt types.Receipt
+ if err := ec.c.CallContext(ctx, &receipt, "eth_sendRawTransactionSync", hexutil.Bytes(rawTx), ms); err != nil {
+ return nil, err
+ }
+ return &receipt, nil
+}
+
// RevertErrorData returns the 'revert reason' data of a contract call.
//
// This can be used with CallContract and EstimateGas, and only when the server is Geth.
@@ -828,3 +862,89 @@ func (p *rpcProgress) toSyncProgress() *ethereum.SyncProgress {
StateIndexRemaining: uint64(p.StateIndexRemaining),
}
}
+
+// SimulateOptions represents the options for eth_simulateV1.
+type SimulateOptions struct {
+ BlockStateCalls []SimulateBlock `json:"blockStateCalls"`
+ TraceTransfers bool `json:"traceTransfers"`
+ Validation bool `json:"validation"`
+ ReturnFullTransactions bool `json:"returnFullTransactions"`
+}
+
+// SimulateBlock represents a batch of calls to be simulated.
+type SimulateBlock struct {
+ BlockOverrides *ethereum.BlockOverrides `json:"blockOverrides,omitempty"`
+ StateOverrides map[common.Address]ethereum.OverrideAccount `json:"stateOverrides,omitempty"`
+ Calls []ethereum.CallMsg `json:"calls"`
+}
+
+// MarshalJSON implements json.Marshaler for SimulateBlock.
+func (s SimulateBlock) MarshalJSON() ([]byte, error) {
+ type Alias struct {
+ BlockOverrides *ethereum.BlockOverrides `json:"blockOverrides,omitempty"`
+ StateOverrides map[common.Address]ethereum.OverrideAccount `json:"stateOverrides,omitempty"`
+ Calls []interface{} `json:"calls"`
+ }
+ calls := make([]interface{}, len(s.Calls))
+ for i, call := range s.Calls {
+ calls[i] = toCallArg(call)
+ }
+ return json.Marshal(Alias{
+ BlockOverrides: s.BlockOverrides,
+ StateOverrides: s.StateOverrides,
+ Calls: calls,
+ })
+}
+
+//go:generate go run github.com/fjl/gencodec -type SimulateCallResult -field-override simulateCallResultMarshaling -out gen_simulate_call_result.go
+
+// SimulateCallResult is the result of a simulated call.
+type SimulateCallResult struct {
+ ReturnValue []byte `json:"returnData"`
+ Logs []*types.Log `json:"logs"`
+ GasUsed uint64 `json:"gasUsed"`
+ Status uint64 `json:"status"`
+ Error *CallError `json:"error,omitempty"`
+}
+
+type simulateCallResultMarshaling struct {
+ ReturnValue hexutil.Bytes
+ GasUsed hexutil.Uint64
+ Status hexutil.Uint64
+}
+
+// CallError represents an error from a simulated call.
+type CallError struct {
+ Code int `json:"code"`
+ Message string `json:"message"`
+ Data string `json:"data,omitempty"`
+}
+
+//go:generate go run github.com/fjl/gencodec -type SimulateBlockResult -field-override simulateBlockResultMarshaling -out gen_simulate_block_result.go
+
+// SimulateBlockResult represents the result of a simulated block.
+type SimulateBlockResult struct {
+ Number *big.Int `json:"number"`
+ Hash common.Hash `json:"hash"`
+ Timestamp uint64 `json:"timestamp"`
+ GasLimit uint64 `json:"gasLimit"`
+ GasUsed uint64 `json:"gasUsed"`
+ FeeRecipient common.Address `json:"miner"`
+ BaseFeePerGas *big.Int `json:"baseFeePerGas,omitempty"`
+ Calls []SimulateCallResult `json:"calls"`
+}
+
+type simulateBlockResultMarshaling struct {
+ Number *hexutil.Big
+ Timestamp hexutil.Uint64
+ GasLimit hexutil.Uint64
+ GasUsed hexutil.Uint64
+ BaseFeePerGas *hexutil.Big
+}
+
+// SimulateV1 executes transactions on top of a base state.
+func (ec *Client) SimulateV1(ctx context.Context, opts SimulateOptions, blockNrOrHash *rpc.BlockNumberOrHash) ([]SimulateBlockResult, error) {
+ var result []SimulateBlockResult
+ err := ec.c.CallContext(ctx, &result, "eth_simulateV1", opts, blockNrOrHash)
+ return result, err
+}
diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go
index 62ec8b39b6..f98829967e 100644
--- a/ethclient/ethclient_test.go
+++ b/ethclient/ethclient_test.go
@@ -929,3 +929,250 @@ func ExampleRevertErrorData() {
// revert: 08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000a75736572206572726f72
// message: user error
}
+
+func TestSimulateV1(t *testing.T) {
+ backend, _, err := newTestBackend(t, nil, false)
+ if err != nil {
+ t.Fatalf("Failed to create test backend: %v", err)
+ }
+ defer backend.Close()
+
+ client := ethclient.NewClient(backend.Attach())
+ defer client.Close()
+
+ ctx := context.Background()
+
+ // Get current base fee
+ header, err := client.HeaderByNumber(ctx, nil)
+ if err != nil {
+ t.Fatalf("Failed to get header: %v", err)
+ }
+
+ // Simple test: transfer ETH from one account to another
+ from := testAddr
+ to := common.HexToAddress("0x0000000000000000000000000000000000000001")
+ value := big.NewInt(100)
+ gas := uint64(100000)
+ maxFeePerGas := new(big.Int).Mul(header.BaseFee, big.NewInt(2))
+
+ opts := ethclient.SimulateOptions{
+ BlockStateCalls: []ethclient.SimulateBlock{
+ {
+ Calls: []ethereum.CallMsg{
+ {
+ From: from,
+ To: &to,
+ Value: value,
+ Gas: gas,
+ GasFeeCap: maxFeePerGas,
+ },
+ },
+ },
+ },
+ Validation: true,
+ }
+
+ results, err := client.SimulateV1(ctx, opts, nil)
+ if err != nil {
+ t.Fatalf("SimulateV1 failed: %v", err)
+ }
+
+ if len(results) != 1 {
+ t.Fatalf("expected 1 block result, got %d", len(results))
+ }
+
+ if len(results[0].Calls) != 1 {
+ t.Fatalf("expected 1 call result, got %d", len(results[0].Calls))
+ }
+
+ // Check that the transaction succeeded
+ if results[0].Calls[0].Status != 1 {
+ t.Errorf("expected status 1 (success), got %d", results[0].Calls[0].Status)
+ }
+
+ if results[0].Calls[0].Error != nil {
+ t.Errorf("expected no error, got %v", results[0].Calls[0].Error)
+ }
+}
+
+func TestSimulateV1WithBlockOverrides(t *testing.T) {
+ backend, _, err := newTestBackend(t, nil, false)
+ if err != nil {
+ t.Fatalf("Failed to create test backend: %v", err)
+ }
+ defer backend.Close()
+
+ client := ethclient.NewClient(backend.Attach())
+ defer client.Close()
+
+ ctx := context.Background()
+
+ // Get current base fee
+ header, err := client.HeaderByNumber(ctx, nil)
+ if err != nil {
+ t.Fatalf("Failed to get header: %v", err)
+ }
+
+ from := testAddr
+ to := common.HexToAddress("0x0000000000000000000000000000000000000001")
+ value := big.NewInt(100)
+ gas := uint64(100000)
+ maxFeePerGas := new(big.Int).Mul(header.BaseFee, big.NewInt(2))
+
+ // Override timestamp only
+ timestamp := uint64(1234567890)
+
+ opts := ethclient.SimulateOptions{
+ BlockStateCalls: []ethclient.SimulateBlock{
+ {
+ BlockOverrides: ðereum.BlockOverrides{
+ Time: timestamp,
+ },
+ Calls: []ethereum.CallMsg{
+ {
+ From: from,
+ To: &to,
+ Value: value,
+ Gas: gas,
+ GasFeeCap: maxFeePerGas,
+ },
+ },
+ },
+ },
+ Validation: true,
+ }
+
+ results, err := client.SimulateV1(ctx, opts, nil)
+ if err != nil {
+ t.Fatalf("SimulateV1 with block overrides failed: %v", err)
+ }
+
+ if len(results) != 1 {
+ t.Fatalf("expected 1 block result, got %d", len(results))
+ }
+
+ // Verify the timestamp was overridden
+ if results[0].Timestamp != timestamp {
+ t.Errorf("expected timestamp %d, got %d", timestamp, results[0].Timestamp)
+ }
+}
+
+func TestSimulateV1WithStateOverrides(t *testing.T) {
+ backend, _, err := newTestBackend(t, nil, false)
+ if err != nil {
+ t.Fatalf("Failed to create test backend: %v", err)
+ }
+ defer backend.Close()
+
+ client := ethclient.NewClient(backend.Attach())
+ defer client.Close()
+
+ ctx := context.Background()
+
+ // Get current base fee
+ header, err := client.HeaderByNumber(ctx, nil)
+ if err != nil {
+ t.Fatalf("Failed to get header: %v", err)
+ }
+
+ from := testAddr
+ to := common.HexToAddress("0x0000000000000000000000000000000000000001")
+ value := big.NewInt(1000000000000000000) // 1 ETH
+ gas := uint64(100000)
+ maxFeePerGas := new(big.Int).Mul(header.BaseFee, big.NewInt(2))
+
+ // Override the balance of the 'from' address
+ balanceStr := "1000000000000000000000"
+ balance := new(big.Int)
+ balance.SetString(balanceStr, 10)
+
+ stateOverrides := map[common.Address]ethereum.OverrideAccount{
+ from: {
+ Balance: balance,
+ },
+ }
+
+ opts := ethclient.SimulateOptions{
+ BlockStateCalls: []ethclient.SimulateBlock{
+ {
+ StateOverrides: stateOverrides,
+ Calls: []ethereum.CallMsg{
+ {
+ From: from,
+ To: &to,
+ Value: value,
+ Gas: gas,
+ GasFeeCap: maxFeePerGas,
+ },
+ },
+ },
+ },
+ Validation: true,
+ }
+
+ results, err := client.SimulateV1(ctx, opts, nil)
+ if err != nil {
+ t.Fatalf("SimulateV1 with state overrides failed: %v", err)
+ }
+
+ if len(results) != 1 {
+ t.Fatalf("expected 1 block result, got %d", len(results))
+ }
+
+ if results[0].Calls[0].Status != 1 {
+ t.Errorf("expected status 1 (success), got %d", results[0].Calls[0].Status)
+ }
+}
+
+func TestSimulateV1WithBlockNumberOrHash(t *testing.T) {
+ backend, _, err := newTestBackend(t, nil, false)
+ if err != nil {
+ t.Fatalf("Failed to create test backend: %v", err)
+ }
+ defer backend.Close()
+
+ client := ethclient.NewClient(backend.Attach())
+ defer client.Close()
+
+ ctx := context.Background()
+
+ // Get current base fee
+ header, err := client.HeaderByNumber(ctx, nil)
+ if err != nil {
+ t.Fatalf("Failed to get header: %v", err)
+ }
+
+ from := testAddr
+ to := common.HexToAddress("0x0000000000000000000000000000000000000001")
+ value := big.NewInt(100)
+ gas := uint64(100000)
+ maxFeePerGas := new(big.Int).Mul(header.BaseFee, big.NewInt(2))
+
+ opts := ethclient.SimulateOptions{
+ BlockStateCalls: []ethclient.SimulateBlock{
+ {
+ Calls: []ethereum.CallMsg{
+ {
+ From: from,
+ To: &to,
+ Value: value,
+ Gas: gas,
+ GasFeeCap: maxFeePerGas,
+ },
+ },
+ },
+ },
+ Validation: true,
+ }
+
+ // Simulate on the latest block
+ latest := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
+ results, err := client.SimulateV1(ctx, opts, &latest)
+ if err != nil {
+ t.Fatalf("SimulateV1 with latest block failed: %v", err)
+ }
+
+ if len(results) != 1 {
+ t.Fatalf("expected 1 block result, got %d", len(results))
+ }
+}
diff --git a/ethclient/gen_simulate_block_result.go b/ethclient/gen_simulate_block_result.go
new file mode 100644
index 0000000000..b8cd6ebf2f
--- /dev/null
+++ b/ethclient/gen_simulate_block_result.go
@@ -0,0 +1,80 @@
+// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
+
+package ethclient
+
+import (
+ "encoding/json"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+)
+
+var _ = (*simulateBlockResultMarshaling)(nil)
+
+// MarshalJSON marshals as JSON.
+func (s SimulateBlockResult) MarshalJSON() ([]byte, error) {
+ type SimulateBlockResult struct {
+ Number *hexutil.Big `json:"number"`
+ Hash common.Hash `json:"hash"`
+ Timestamp hexutil.Uint64 `json:"timestamp"`
+ GasLimit hexutil.Uint64 `json:"gasLimit"`
+ GasUsed hexutil.Uint64 `json:"gasUsed"`
+ FeeRecipient common.Address `json:"miner"`
+ BaseFeePerGas *hexutil.Big `json:"baseFeePerGas,omitempty"`
+ Calls []SimulateCallResult `json:"calls"`
+ }
+ var enc SimulateBlockResult
+ enc.Number = (*hexutil.Big)(s.Number)
+ enc.Hash = s.Hash
+ enc.Timestamp = hexutil.Uint64(s.Timestamp)
+ enc.GasLimit = hexutil.Uint64(s.GasLimit)
+ enc.GasUsed = hexutil.Uint64(s.GasUsed)
+ enc.FeeRecipient = s.FeeRecipient
+ enc.BaseFeePerGas = (*hexutil.Big)(s.BaseFeePerGas)
+ enc.Calls = s.Calls
+ return json.Marshal(&enc)
+}
+
+// UnmarshalJSON unmarshals from JSON.
+func (s *SimulateBlockResult) UnmarshalJSON(input []byte) error {
+ type SimulateBlockResult struct {
+ Number *hexutil.Big `json:"number"`
+ Hash *common.Hash `json:"hash"`
+ Timestamp *hexutil.Uint64 `json:"timestamp"`
+ GasLimit *hexutil.Uint64 `json:"gasLimit"`
+ GasUsed *hexutil.Uint64 `json:"gasUsed"`
+ FeeRecipient *common.Address `json:"miner"`
+ BaseFeePerGas *hexutil.Big `json:"baseFeePerGas,omitempty"`
+ Calls []SimulateCallResult `json:"calls"`
+ }
+ var dec SimulateBlockResult
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ if dec.Number != nil {
+ s.Number = (*big.Int)(dec.Number)
+ }
+ if dec.Hash != nil {
+ s.Hash = *dec.Hash
+ }
+ if dec.Timestamp != nil {
+ s.Timestamp = uint64(*dec.Timestamp)
+ }
+ if dec.GasLimit != nil {
+ s.GasLimit = uint64(*dec.GasLimit)
+ }
+ if dec.GasUsed != nil {
+ s.GasUsed = uint64(*dec.GasUsed)
+ }
+ if dec.FeeRecipient != nil {
+ s.FeeRecipient = *dec.FeeRecipient
+ }
+ if dec.BaseFeePerGas != nil {
+ s.BaseFeePerGas = (*big.Int)(dec.BaseFeePerGas)
+ }
+ if dec.Calls != nil {
+ s.Calls = dec.Calls
+ }
+ return nil
+}
diff --git a/ethclient/gen_simulate_call_result.go b/ethclient/gen_simulate_call_result.go
new file mode 100644
index 0000000000..55e14cd697
--- /dev/null
+++ b/ethclient/gen_simulate_call_result.go
@@ -0,0 +1,61 @@
+// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
+
+package ethclient
+
+import (
+ "encoding/json"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+var _ = (*simulateCallResultMarshaling)(nil)
+
+// MarshalJSON marshals as JSON.
+func (s SimulateCallResult) MarshalJSON() ([]byte, error) {
+ type SimulateCallResult struct {
+ ReturnValue hexutil.Bytes `json:"returnData"`
+ Logs []*types.Log `json:"logs"`
+ GasUsed hexutil.Uint64 `json:"gasUsed"`
+ Status hexutil.Uint64 `json:"status"`
+ Error *CallError `json:"error,omitempty"`
+ }
+ var enc SimulateCallResult
+ enc.ReturnValue = s.ReturnValue
+ enc.Logs = s.Logs
+ enc.GasUsed = hexutil.Uint64(s.GasUsed)
+ enc.Status = hexutil.Uint64(s.Status)
+ enc.Error = s.Error
+ return json.Marshal(&enc)
+}
+
+// UnmarshalJSON unmarshals from JSON.
+func (s *SimulateCallResult) UnmarshalJSON(input []byte) error {
+ type SimulateCallResult struct {
+ ReturnValue *hexutil.Bytes `json:"returnData"`
+ Logs []*types.Log `json:"logs"`
+ GasUsed *hexutil.Uint64 `json:"gasUsed"`
+ Status *hexutil.Uint64 `json:"status"`
+ Error *CallError `json:"error,omitempty"`
+ }
+ var dec SimulateCallResult
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ if dec.ReturnValue != nil {
+ s.ReturnValue = *dec.ReturnValue
+ }
+ if dec.Logs != nil {
+ s.Logs = dec.Logs
+ }
+ if dec.GasUsed != nil {
+ s.GasUsed = uint64(*dec.GasUsed)
+ }
+ if dec.Status != nil {
+ s.Status = uint64(*dec.Status)
+ }
+ if dec.Error != nil {
+ s.Error = dec.Error
+ }
+ return nil
+}
diff --git a/ethclient/gethclient/gethclient.go b/ethclient/gethclient/gethclient.go
index 54997cbf51..6a0f5eb312 100644
--- a/ethclient/gethclient/gethclient.go
+++ b/ethclient/gethclient/gethclient.go
@@ -19,7 +19,6 @@ package gethclient
import (
"context"
- "encoding/json"
"fmt"
"math/big"
"runtime"
@@ -280,97 +279,8 @@ func toCallArg(msg ethereum.CallMsg) interface{} {
return arg
}
-// OverrideAccount specifies the state of an account to be overridden.
-type OverrideAccount struct {
- // Nonce sets nonce of the account. Note: the nonce override will only
- // be applied when it is set to a non-zero value.
- Nonce uint64
+// OverrideAccount is an alias for ethereum.OverrideAccount.
+type OverrideAccount = ethereum.OverrideAccount
- // Code sets the contract code. The override will be applied
- // when the code is non-nil, i.e. setting empty code is possible
- // using an empty slice.
- Code []byte
-
- // Balance sets the account balance.
- Balance *big.Int
-
- // State sets the complete storage. The override will be applied
- // when the given map is non-nil. Using an empty map wipes the
- // entire contract storage during the call.
- State map[common.Hash]common.Hash
-
- // StateDiff allows overriding individual storage slots.
- StateDiff map[common.Hash]common.Hash
-}
-
-func (a OverrideAccount) MarshalJSON() ([]byte, error) {
- type acc struct {
- Nonce hexutil.Uint64 `json:"nonce,omitempty"`
- Code string `json:"code,omitempty"`
- Balance *hexutil.Big `json:"balance,omitempty"`
- State interface{} `json:"state,omitempty"`
- StateDiff map[common.Hash]common.Hash `json:"stateDiff,omitempty"`
- }
-
- output := acc{
- Nonce: hexutil.Uint64(a.Nonce),
- Balance: (*hexutil.Big)(a.Balance),
- StateDiff: a.StateDiff,
- }
- if a.Code != nil {
- output.Code = hexutil.Encode(a.Code)
- }
- if a.State != nil {
- output.State = a.State
- }
- return json.Marshal(output)
-}
-
-// BlockOverrides specifies the set of header fields to override.
-type BlockOverrides struct {
- // Number overrides the block number.
- Number *big.Int
- // Difficulty overrides the block difficulty.
- Difficulty *big.Int
- // Time overrides the block timestamp. Time is applied only when
- // it is non-zero.
- Time uint64
- // GasLimit overrides the block gas limit. GasLimit is applied only when
- // it is non-zero.
- GasLimit uint64
- // Coinbase overrides the block coinbase. Coinbase is applied only when
- // it is different from the zero address.
- Coinbase common.Address
- // Random overrides the block extra data which feeds into the RANDOM opcode.
- // Random is applied only when it is a non-zero hash.
- Random common.Hash
- // BaseFee overrides the block base fee.
- BaseFee *big.Int
-}
-
-func (o BlockOverrides) MarshalJSON() ([]byte, error) {
- type override struct {
- Number *hexutil.Big `json:"number,omitempty"`
- Difficulty *hexutil.Big `json:"difficulty,omitempty"`
- Time hexutil.Uint64 `json:"time,omitempty"`
- GasLimit hexutil.Uint64 `json:"gasLimit,omitempty"`
- Coinbase *common.Address `json:"feeRecipient,omitempty"`
- Random *common.Hash `json:"prevRandao,omitempty"`
- BaseFee *hexutil.Big `json:"baseFeePerGas,omitempty"`
- }
-
- output := override{
- Number: (*hexutil.Big)(o.Number),
- Difficulty: (*hexutil.Big)(o.Difficulty),
- Time: hexutil.Uint64(o.Time),
- GasLimit: hexutil.Uint64(o.GasLimit),
- BaseFee: (*hexutil.Big)(o.BaseFee),
- }
- if o.Coinbase != (common.Address{}) {
- output.Coinbase = &o.Coinbase
- }
- if o.Random != (common.Hash{}) {
- output.Random = &o.Random
- }
- return json.Marshal(output)
-}
+// BlockOverrides is an alias for ethereum.BlockOverrides.
+type BlockOverrides = ethereum.BlockOverrides
diff --git a/go.mod b/go.mod
index 794edcdaa6..e0ef38503a 100644
--- a/go.mod
+++ b/go.mod
@@ -82,7 +82,8 @@ require (
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
- github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e // indirect
+ github.com/DataDog/zstd v1.4.5 // indirect
+ github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6
github.com/StackExchange/wmi v1.2.1 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 // indirect
diff --git a/go.sum b/go.sum
index 6268271dbf..92cb7fb8cc 100644
--- a/go.sum
+++ b/go.sum
@@ -12,10 +12,12 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkM
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o=
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
-github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e h1:ZIWapoIRN1VqT8GR8jAwb1Ie9GyehWjVcGh32Y2MznE=
-github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
+github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
+github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
+github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU=
+github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6/go.mod h1:ioLG6R+5bUSO1oeGSDxOV3FADARuMoytZCSX6MEMQkI=
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0=
diff --git a/interfaces.go b/interfaces.go
index 2828af1cc9..21d42c6d34 100644
--- a/interfaces.go
+++ b/interfaces.go
@@ -19,10 +19,12 @@ package ethereum
import (
"context"
+ "encoding/json"
"errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)
@@ -293,3 +295,98 @@ type BlockNumberReader interface {
type ChainIDReader interface {
ChainID(ctx context.Context) (*big.Int, error)
}
+
+// OverrideAccount specifies the state of an account to be overridden.
+type OverrideAccount struct {
+ // Nonce sets nonce of the account. Note: the nonce override will only
+ // be applied when it is set to a non-zero value.
+ Nonce uint64
+
+ // Code sets the contract code. The override will be applied
+ // when the code is non-nil, i.e. setting empty code is possible
+ // using an empty slice.
+ Code []byte
+
+ // Balance sets the account balance.
+ Balance *big.Int
+
+ // State sets the complete storage. The override will be applied
+ // when the given map is non-nil. Using an empty map wipes the
+ // entire contract storage during the call.
+ State map[common.Hash]common.Hash
+
+ // StateDiff allows overriding individual storage slots.
+ StateDiff map[common.Hash]common.Hash
+}
+
+func (a OverrideAccount) MarshalJSON() ([]byte, error) {
+ type acc struct {
+ Nonce hexutil.Uint64 `json:"nonce,omitempty"`
+ Code string `json:"code,omitempty"`
+ Balance *hexutil.Big `json:"balance,omitempty"`
+ State interface{} `json:"state,omitempty"`
+ StateDiff map[common.Hash]common.Hash `json:"stateDiff,omitempty"`
+ }
+
+ output := acc{
+ Nonce: hexutil.Uint64(a.Nonce),
+ Balance: (*hexutil.Big)(a.Balance),
+ StateDiff: a.StateDiff,
+ }
+ if a.Code != nil {
+ output.Code = hexutil.Encode(a.Code)
+ }
+ if a.State != nil {
+ output.State = a.State
+ }
+ return json.Marshal(output)
+}
+
+// BlockOverrides specifies the set of header fields to override.
+type BlockOverrides struct {
+ // Number overrides the block number.
+ Number *big.Int
+ // Difficulty overrides the block difficulty.
+ Difficulty *big.Int
+ // Time overrides the block timestamp. Time is applied only when
+ // it is non-zero.
+ Time uint64
+ // GasLimit overrides the block gas limit. GasLimit is applied only when
+ // it is non-zero.
+ GasLimit uint64
+ // Coinbase overrides the block coinbase. Coinbase is applied only when
+ // it is different from the zero address.
+ Coinbase common.Address
+ // Random overrides the block extra data which feeds into the RANDOM opcode.
+ // Random is applied only when it is a non-zero hash.
+ Random common.Hash
+ // BaseFee overrides the block base fee.
+ BaseFee *big.Int
+}
+
+func (o BlockOverrides) MarshalJSON() ([]byte, error) {
+ type override struct {
+ Number *hexutil.Big `json:"number,omitempty"`
+ Difficulty *hexutil.Big `json:"difficulty,omitempty"`
+ Time hexutil.Uint64 `json:"time,omitempty"`
+ GasLimit hexutil.Uint64 `json:"gasLimit,omitempty"`
+ Coinbase *common.Address `json:"feeRecipient,omitempty"`
+ Random *common.Hash `json:"prevRandao,omitempty"`
+ BaseFee *hexutil.Big `json:"baseFeePerGas,omitempty"`
+ }
+
+ output := override{
+ Number: (*hexutil.Big)(o.Number),
+ Difficulty: (*hexutil.Big)(o.Difficulty),
+ Time: hexutil.Uint64(o.Time),
+ GasLimit: hexutil.Uint64(o.GasLimit),
+ BaseFee: (*hexutil.Big)(o.BaseFee),
+ }
+ if o.Coinbase != (common.Address{}) {
+ output.Coinbase = &o.Coinbase
+ }
+ if o.Random != (common.Hash{}) {
+ output.Random = &o.Random
+ }
+ return json.Marshal(output)
+}
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 507f64b962..e4f8825f96 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -56,6 +56,7 @@ import (
const estimateGasErrorRatio = 0.015
var errBlobTxNotSupported = errors.New("signing blob transactions not supported")
+var errSubClosed = errors.New("chain subscription closed")
// EthereumAPI provides an API to access Ethereum related information.
type EthereumAPI struct {
@@ -1920,6 +1921,104 @@ func (api *TransactionAPI) SendRawTransaction(ctx context.Context, input hexutil
return SubmitTransaction(ctx, api.b, tx)
}
+// SendRawTransactionSync will add the signed transaction to the transaction pool
+// and wait until the transaction has been included in a block and return the receipt, or the timeout.
+func (api *TransactionAPI) SendRawTransactionSync(ctx context.Context, input hexutil.Bytes, timeoutMs *hexutil.Uint64) (map[string]interface{}, error) {
+ tx := new(types.Transaction)
+ if err := tx.UnmarshalBinary(input); err != nil {
+ return nil, err
+ }
+
+ // Convert legacy blob transaction proofs.
+ // TODO: remove in go-ethereum v1.17.x
+ if sc := tx.BlobTxSidecar(); sc != nil {
+ exp := api.currentBlobSidecarVersion()
+ if sc.Version == types.BlobSidecarVersion0 && exp == types.BlobSidecarVersion1 {
+ if err := sc.ToV1(); err != nil {
+ return nil, fmt.Errorf("blob sidecar conversion failed: %v", err)
+ }
+ tx = tx.WithBlobTxSidecar(sc)
+ }
+ }
+
+ ch := make(chan core.ChainEvent, 128)
+ sub := api.b.SubscribeChainEvent(ch)
+ defer sub.Unsubscribe()
+
+ hash, err := SubmitTransaction(ctx, api.b, tx)
+ if err != nil {
+ return nil, err
+ }
+
+ var (
+ maxTimeout = api.b.RPCTxSyncMaxTimeout()
+ defaultTimeout = api.b.RPCTxSyncDefaultTimeout()
+ timeout = defaultTimeout
+ )
+ if timeoutMs != nil && *timeoutMs > 0 {
+ req := time.Duration(*timeoutMs) * time.Millisecond
+ if req > maxTimeout {
+ timeout = maxTimeout
+ } else {
+ timeout = req
+ }
+ }
+ receiptCtx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+
+ // Fast path.
+ if r, err := api.GetTransactionReceipt(receiptCtx, hash); err == nil && r != nil {
+ return r, nil
+ }
+
+ // Monitor the receipts
+ for {
+ select {
+ case <-receiptCtx.Done():
+ // If server-side wait window elapsed, return the structured timeout.
+ if errors.Is(receiptCtx.Err(), context.DeadlineExceeded) {
+ return nil, &txSyncTimeoutError{
+ msg: fmt.Sprintf("The transaction was added to the transaction pool but wasn't processed in %v", timeout),
+ hash: hash,
+ }
+ }
+ return nil, receiptCtx.Err()
+
+ case err, ok := <-sub.Err():
+ if !ok {
+ return nil, errSubClosed
+ }
+ return nil, err
+
+ case ev, ok := <-ch:
+ if !ok {
+ return nil, errSubClosed
+ }
+ rs, txs := ev.Receipts, ev.Transactions
+ if len(rs) == 0 || len(rs) != len(txs) {
+ continue
+ }
+ for i := range rs {
+ if rs[i].TxHash == hash {
+ if rs[i].BlockNumber != nil && rs[i].BlockHash != (common.Hash{}) {
+ signer := types.LatestSigner(api.b.ChainConfig())
+ return MarshalReceipt(
+ rs[i],
+ rs[i].BlockHash,
+ rs[i].BlockNumber.Uint64(),
+ signer,
+ txs[i],
+ int(rs[i].TransactionIndex),
+ api.b.ChainConfig(),
+ ), nil
+ }
+ return api.GetTransactionReceipt(receiptCtx, hash)
+ }
+ }
+ }
+ }
+}
+
// Sign calculates an ECDSA signature for:
// keccak256("\x19Ethereum Signed Message:\n" + len(message) + message).
//
diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go
index 0e89eb0d65..153efde5ba 100644
--- a/internal/ethapi/api_test.go
+++ b/internal/ethapi/api_test.go
@@ -598,6 +598,19 @@ type testBackend struct {
pending *types.Block
pendingReceipts types.Receipts
+
+ chainFeed *event.Feed
+ autoMine bool
+
+ sentTx *types.Transaction
+ sentTxHash common.Hash
+
+ syncDefaultTimeout time.Duration
+ syncMaxTimeout time.Duration
+}
+
+func fakeBlockHash(txh common.Hash) common.Hash {
+ return crypto.Keccak256Hash([]byte("testblock"), txh.Bytes())
}
func newTestBackend(t *testing.T, n int, gspec *core.Genesis, engine consensus.Engine, generator func(i int, b *core.BlockGen)) *testBackend {
@@ -624,6 +637,7 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, engine consensus.E
acc: acc,
pending: blocks[n],
pendingReceipts: receipts[n],
+ chainFeed: new(event.Feed),
}
return backend
}
@@ -745,19 +759,64 @@ func (b testBackend) GetEVM(ctx context.Context, state *state.StateDB, header *t
return vm.NewEVM(context, state, b.chain.Config(), *vmConfig)
}
func (b testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
- panic("implement me")
+ return b.chainFeed.Subscribe(ch)
}
func (b testBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription {
panic("implement me")
}
-func (b testBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
- panic("implement me")
+func (b *testBackend) SendTx(ctx context.Context, tx *types.Transaction) error {
+ b.sentTx = tx
+ b.sentTxHash = tx.Hash()
+
+ if b.autoMine {
+ // Synthesize a "mined" receipt at head+1
+ num := b.chain.CurrentHeader().Number.Uint64() + 1
+ receipt := &types.Receipt{
+ TxHash: tx.Hash(),
+ Status: types.ReceiptStatusSuccessful,
+ BlockHash: fakeBlockHash(tx.Hash()),
+ BlockNumber: new(big.Int).SetUint64(num),
+ TransactionIndex: 0,
+ CumulativeGasUsed: 21000,
+ GasUsed: 21000,
+ }
+ // Broadcast a ChainEvent that includes the receipts and txs
+ b.chainFeed.Send(core.ChainEvent{
+ Header: &types.Header{
+ Number: new(big.Int).SetUint64(num),
+ },
+ Receipts: types.Receipts{receipt},
+ Transactions: types.Transactions{tx},
+ })
+ }
+ return nil
}
-func (b testBackend) GetCanonicalTransaction(txHash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64) {
+func (b *testBackend) GetCanonicalTransaction(txHash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64) {
+ // Treat the auto-mined tx as canonically placed at head+1.
+ if b.autoMine && txHash == b.sentTxHash {
+ num := b.chain.CurrentHeader().Number.Uint64() + 1
+ return true, b.sentTx, fakeBlockHash(txHash), num, 0
+ }
tx, blockHash, blockNumber, index := rawdb.ReadCanonicalTransaction(b.db, txHash)
return tx != nil, tx, blockHash, blockNumber, index
}
-func (b testBackend) GetCanonicalReceipt(tx *types.Transaction, blockHash common.Hash, blockNumber, blockIndex uint64) (*types.Receipt, error) {
+func (b *testBackend) GetCanonicalReceipt(tx *types.Transaction, blockHash common.Hash, blockNumber, blockIndex uint64) (*types.Receipt, error) {
+ if b.autoMine && tx != nil && tx.Hash() == b.sentTxHash &&
+ blockHash == fakeBlockHash(tx.Hash()) &&
+ blockIndex == 0 &&
+ blockNumber == b.chain.CurrentHeader().Number.Uint64()+1 {
+ return &types.Receipt{
+ Type: tx.Type(),
+ Status: types.ReceiptStatusSuccessful,
+ CumulativeGasUsed: 21000,
+ GasUsed: 21000,
+ EffectiveGasPrice: big.NewInt(1),
+ BlockHash: blockHash,
+ BlockNumber: new(big.Int).SetUint64(blockNumber),
+ TransactionIndex: 0,
+ TxHash: tx.Hash(),
+ }, nil
+ }
return b.chain.GetCanonicalReceipt(tx, blockHash, blockNumber, blockIndex)
}
func (b testBackend) TxIndexDone() bool {
@@ -4062,3 +4121,109 @@ func (b configTimeBackend) HeaderByNumber(_ context.Context, n rpc.BlockNumber)
func (b configTimeBackend) CurrentHeader() *types.Header {
return &types.Header{Time: b.time}
}
+
+func (b *testBackend) RPCTxSyncDefaultTimeout() time.Duration {
+ if b.syncDefaultTimeout != 0 {
+ return b.syncDefaultTimeout
+ }
+ return 2 * time.Second
+}
+func (b *testBackend) RPCTxSyncMaxTimeout() time.Duration {
+ if b.syncMaxTimeout != 0 {
+ return b.syncMaxTimeout
+ }
+ return 5 * time.Minute
+}
+func (b *backendMock) RPCTxSyncDefaultTimeout() time.Duration { return 2 * time.Second }
+func (b *backendMock) RPCTxSyncMaxTimeout() time.Duration { return 5 * time.Minute }
+
+func makeSignedRaw(t *testing.T, api *TransactionAPI, from, to common.Address, value *big.Int) (hexutil.Bytes, *types.Transaction) {
+ t.Helper()
+
+ fillRes, err := api.FillTransaction(context.Background(), TransactionArgs{
+ From: &from,
+ To: &to,
+ Value: (*hexutil.Big)(value),
+ })
+ if err != nil {
+ t.Fatalf("FillTransaction failed: %v", err)
+ }
+ signRes, err := api.SignTransaction(context.Background(), argsFromTransaction(fillRes.Tx, from))
+ if err != nil {
+ t.Fatalf("SignTransaction failed: %v", err)
+ }
+ return signRes.Raw, signRes.Tx
+}
+
+// makeSelfSignedRaw is a convenience for a 0-ETH self-transfer.
+func makeSelfSignedRaw(t *testing.T, api *TransactionAPI, addr common.Address) (hexutil.Bytes, *types.Transaction) {
+ return makeSignedRaw(t, api, addr, addr, big.NewInt(0))
+}
+
+func TestSendRawTransactionSync_Success(t *testing.T) {
+ t.Parallel()
+ genesis := &core.Genesis{
+ Config: params.TestChainConfig,
+ Alloc: types.GenesisAlloc{},
+ }
+ b := newTestBackend(t, 0, genesis, ethash.NewFaker(), nil)
+ b.autoMine = true // immediately “mines” the tx in-memory
+
+ api := NewTransactionAPI(b, new(AddrLocker))
+
+ raw, _ := makeSelfSignedRaw(t, api, b.acc.Address)
+
+ receipt, err := api.SendRawTransactionSync(context.Background(), raw, nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if receipt == nil {
+ t.Fatalf("expected non-nil receipt")
+ }
+ if _, ok := receipt["blockNumber"]; !ok {
+ t.Fatalf("expected blockNumber in receipt, got %#v", receipt)
+ }
+}
+
+func TestSendRawTransactionSync_Timeout(t *testing.T) {
+ t.Parallel()
+
+ genesis := &core.Genesis{
+ Config: params.TestChainConfig,
+ Alloc: types.GenesisAlloc{},
+ }
+ b := newTestBackend(t, 0, genesis, ethash.NewFaker(), nil)
+ b.autoMine = false // don't mine, should time out
+
+ api := NewTransactionAPI(b, new(AddrLocker))
+
+ raw, _ := makeSelfSignedRaw(t, api, b.acc.Address)
+
+ timeout := hexutil.Uint64(200) // 200ms
+ receipt, err := api.SendRawTransactionSync(context.Background(), raw, &timeout)
+
+ if receipt != nil {
+ t.Fatalf("expected nil receipt, got %#v", receipt)
+ }
+ if err == nil {
+ t.Fatalf("expected timeout error, got nil")
+ }
+ // assert error shape & data (hash)
+ var de interface {
+ ErrorCode() int
+ ErrorData() interface{}
+ }
+ if !errors.As(err, &de) {
+ t.Fatalf("expected data error with code/data, got %T %v", err, err)
+ }
+ if de.ErrorCode() != errCodeTxSyncTimeout {
+ t.Fatalf("expected code %d, got %d", errCodeTxSyncTimeout, de.ErrorCode())
+ }
+ tx := new(types.Transaction)
+ if e := tx.UnmarshalBinary(raw); e != nil {
+ t.Fatal(e)
+ }
+ if got, want := de.ErrorData(), tx.Hash().Hex(); got != want {
+ t.Fatalf("expected ErrorData=%s, got %v", want, got)
+ }
+}
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index e094bb53be..3c8708dacb 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -53,6 +53,8 @@ type Backend interface {
RPCEVMTimeout() time.Duration // global timeout for eth_call over rpc: DoS protection
RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs
UnprotectedAllowed() bool // allows only for EIP155 transactions.
+ RPCTxSyncDefaultTimeout() time.Duration
+ RPCTxSyncMaxTimeout() time.Duration
// Blockchain API
SetHead(number uint64)
diff --git a/internal/ethapi/errors.go b/internal/ethapi/errors.go
index 154938fa0e..30711a0167 100644
--- a/internal/ethapi/errors.go
+++ b/internal/ethapi/errors.go
@@ -21,6 +21,7 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/vm"
@@ -33,6 +34,11 @@ type revertError struct {
reason string // revert reason hex encoded
}
+type txSyncTimeoutError struct {
+ msg string
+ hash common.Hash
+}
+
// ErrorCode returns the JSON error code for a revert.
// See: https://ethereum.org/en/developers/docs/apis/json-rpc/#error-codes
func (e *revertError) ErrorCode() int {
@@ -108,6 +114,7 @@ const (
errCodeInvalidParams = -32602
errCodeReverted = -32000
errCodeVMError = -32015
+ errCodeTxSyncTimeout = 4
)
func txValidationError(err error) *invalidTxError {
@@ -168,3 +175,7 @@ type blockGasLimitReachedError struct{ message string }
func (e *blockGasLimitReachedError) Error() string { return e.message }
func (e *blockGasLimitReachedError) ErrorCode() int { return errCodeBlockGasLimitReached }
+
+func (e *txSyncTimeoutError) Error() string { return e.msg }
+func (e *txSyncTimeoutError) ErrorCode() int { return errCodeTxSyncTimeout }
+func (e *txSyncTimeoutError) ErrorData() interface{} { return e.hash.Hex() }
diff --git a/internal/jsre/jsre.go b/internal/jsre/jsre.go
index 0dfeae8e1b..4512115f16 100644
--- a/internal/jsre/jsre.go
+++ b/internal/jsre/jsre.go
@@ -201,7 +201,7 @@ loop:
if !isFunc {
panic(re.vm.ToValue("js error: timer/timeout callback is not a function"))
}
- call(goja.Null(), timer.call.Arguments...)
+ call(goja.Null(), timer.call.Arguments[2:]...)
_, inreg := registry[timer] // when clearInterval is called from within the callback don't reset it
if timer.interval && inreg {
diff --git a/node/defaults.go b/node/defaults.go
index 307d9e186a..6c643e2b54 100644
--- a/node/defaults.go
+++ b/node/defaults.go
@@ -22,6 +22,7 @@ import (
"path/filepath"
"runtime"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/rpc"
@@ -90,7 +91,7 @@ func DefaultDataDir() string {
// is non-empty, use it, otherwise DTRT and check %LOCALAPPDATA%.
fallback := filepath.Join(home, "AppData", "Roaming", "Ethereum")
appdata := windowsAppData()
- if appdata == "" || isNonEmptyDir(fallback) {
+ if appdata == "" || common.IsNonEmptyDir(fallback) {
return fallback
}
return filepath.Join(appdata, "Ethereum")
@@ -113,16 +114,6 @@ func windowsAppData() string {
return v
}
-func isNonEmptyDir(dir string) bool {
- f, err := os.Open(dir)
- if err != nil {
- return false
- }
- names, _ := f.Readdir(1)
- f.Close()
- return len(names) > 0
-}
-
func homeDir() string {
if home := os.Getenv("HOME"); home != "" {
return home
diff --git a/p2p/server.go b/p2p/server.go
index a74df7f4fb..c84b7dce7f 100644
--- a/p2p/server.go
+++ b/p2p/server.go
@@ -490,6 +490,11 @@ func (srv *Server) setupDiscovery() error {
}
srv.discv5, err = discover.ListenV5(sconn, srv.localnode, cfg)
if err != nil {
+ // Clean up v4 if v5 setup fails.
+ if srv.discv4 != nil {
+ srv.discv4.Close()
+ srv.discv4 = nil
+ }
return err
}
srv.discmix.AddSource(srv.discv5.RandomNodes())
@@ -814,7 +819,9 @@ func (srv *Server) listenLoop() {
time.Sleep(time.Millisecond * 200)
continue
} else if err != nil {
- srv.log.Debug("Read error", "err", err)
+ if !errors.Is(err, net.ErrClosed) {
+ srv.log.Debug("Read error", "err", err)
+ }
slots <- struct{}{}
return
}
diff --git a/p2p/server_test.go b/p2p/server_test.go
index d42926cf4c..7bc7379099 100644
--- a/p2p/server_test.go
+++ b/p2p/server_test.go
@@ -579,6 +579,33 @@ func TestServerInboundThrottle(t *testing.T) {
}
}
+func TestServerDiscoveryV5FailureRollsBackV4(t *testing.T) {
+ badBootstrap := enode.NewV4(&newkey().PublicKey, net.ParseIP("127.0.0.1"), 30303, 0) // invalid V5 of a V4 node
+ srv := &Server{
+ Config: Config{
+ PrivateKey: newkey(),
+ ListenAddr: "",
+ DiscAddr: "127.0.0.1:0",
+ MaxPeers: 5,
+ DiscoveryV4: true,
+ DiscoveryV5: true,
+ BootstrapNodesV5: []*enode.Node{badBootstrap},
+ Logger: testlog.Logger(t, log.LvlTrace),
+ },
+ }
+ err := srv.Start()
+ if err == nil {
+ t.Fatal("expected discovery v5 startup failure")
+ }
+ if !strings.Contains(err.Error(), "bad bootstrap node") {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if srv.DiscoveryV4() != nil {
+ t.Fatal("discovery v4 not cleaned after failure")
+ }
+ srv.Stop()
+}
+
func listenFakeAddr(network, laddr string, remoteAddr net.Addr) (net.Listener, error) {
l, err := net.Listen(network, laddr)
if err == nil {
diff --git a/params/config.go b/params/config.go
index f1b177e0d6..647236c0f7 100644
--- a/params/config.go
+++ b/params/config.go
@@ -238,9 +238,11 @@ var (
CancunTime: newUint64(0),
TerminalTotalDifficulty: big.NewInt(0),
PragueTime: newUint64(0),
+ OsakaTime: newUint64(0),
BlobScheduleConfig: &BlobScheduleConfig{
Cancun: DefaultCancunBlobConfig,
Prague: DefaultPragueBlobConfig,
+ Osaka: DefaultOsakaBlobConfig,
},
}
diff --git a/rpc/client.go b/rpc/client.go
index 04d1e0170f..e1a65970e5 100644
--- a/rpc/client.go
+++ b/rpc/client.go
@@ -32,7 +32,6 @@ import (
)
var (
- ErrBadResult = errors.New("bad result in JSON-RPC response")
ErrClientQuit = errors.New("client is closed")
ErrNoResult = errors.New("JSON-RPC response has no result")
ErrMissingBatchResponse = errors.New("response batch did not contain a response to this call")
diff --git a/rpc/client_test.go b/rpc/client_test.go
index 1cfb52ecdd..8f6ec21227 100644
--- a/rpc/client_test.go
+++ b/rpc/client_test.go
@@ -973,7 +973,7 @@ func (l *flakeyListener) Accept() (net.Conn, error) {
c, err := l.Listener.Accept()
if err == nil {
- timeout := time.Duration(rand.Int63n(int64(l.maxKillTimeout)))
+ timeout := max(time.Millisecond*10, time.Duration(rand.Int63n(int64(l.maxKillTimeout))))
time.AfterFunc(timeout, func() {
log.Debug(fmt.Sprintf("killing conn %v after %v", c.LocalAddr(), timeout))
c.Close()
diff --git a/trie/transition.go b/trie/transition.go
index da49c6cdc2..c6eecd3937 100644
--- a/trie/transition.go
+++ b/trie/transition.go
@@ -211,7 +211,8 @@ func (t *TransitionTrie) UpdateStem(key []byte, values [][]byte) error {
func (t *TransitionTrie) Copy() *TransitionTrie {
return &TransitionTrie{
overlay: t.overlay.Copy(),
- base: t.base.Copy(),
+ // base in immutable, so there is no need to copy it
+ base: t.base,
storage: t.storage,
}
}
diff --git a/triedb/pathdb/history_index_block.go b/triedb/pathdb/history_index_block.go
index 7648b99226..5abdee682a 100644
--- a/triedb/pathdb/history_index_block.go
+++ b/triedb/pathdb/history_index_block.go
@@ -25,10 +25,10 @@ import (
)
const (
- indexBlockDescSize = 14 // The size of index block descriptor
- indexBlockEntriesCap = 4096 // The maximum number of entries can be grouped in a block
- indexBlockRestartLen = 256 // The restart interval length of index block
- historyIndexBatch = 1_000_000 // The number of state history indexes for constructing or deleting as batch
+ indexBlockDescSize = 14 // The size of index block descriptor
+ indexBlockEntriesCap = 4096 // The maximum number of entries can be grouped in a block
+ indexBlockRestartLen = 256 // The restart interval length of index block
+ historyIndexBatch = 512 * 1024 // The number of state history indexes for constructing or deleting as batch
)
// indexBlockDesc represents a descriptor for an index block, which contains a
diff --git a/triedb/pathdb/history_indexer.go b/triedb/pathdb/history_indexer.go
index 368ff78d41..893ccd6523 100644
--- a/triedb/pathdb/history_indexer.go
+++ b/triedb/pathdb/history_indexer.go
@@ -40,6 +40,11 @@ const (
stateHistoryIndexVersion = stateHistoryIndexV0 // the current state index version
trienodeHistoryIndexV0 = uint8(0) // initial version of trienode index structure
trienodeHistoryIndexVersion = trienodeHistoryIndexV0 // the current trienode index version
+
+ // estimations for calculating the batch size for atomic database commit
+ estimatedStateHistoryIndexSize = 3 // The average size of each state history index entry is approximately 2–3 bytes
+ estimatedTrienodeHistoryIndexSize = 3 // The average size of each trienode history index entry is approximately 2-3 bytes
+ estimatedIndexBatchSizeFactor = 32 // The factor counts for the write amplification for each entry
)
// indexVersion returns the latest index version for the given history type.
@@ -150,6 +155,22 @@ func (b *batchIndexer) process(h history, id uint64) error {
return b.finish(false)
}
+// makeBatch constructs a database batch based on the number of pending entries.
+// The batch size is roughly estimated to minimize repeated resizing rounds,
+// as accurately predicting the exact size is technically challenging.
+func (b *batchIndexer) makeBatch() ethdb.Batch {
+ var size int
+ switch b.typ {
+ case typeStateHistory:
+ size = estimatedStateHistoryIndexSize
+ case typeTrienodeHistory:
+ size = estimatedTrienodeHistoryIndexSize
+ default:
+ panic(fmt.Sprintf("unknown history type %d", b.typ))
+ }
+ return b.db.NewBatchWithSize(size * estimatedIndexBatchSizeFactor * b.pending)
+}
+
// finish writes the accumulated state indexes into the disk if either the
// memory limitation is reached or it's requested forcibly.
func (b *batchIndexer) finish(force bool) error {
@@ -160,7 +181,7 @@ func (b *batchIndexer) finish(force bool) error {
return nil
}
var (
- batch = b.db.NewBatch()
+ batch = b.makeBatch()
batchMu sync.RWMutex
start = time.Now()
eg errgroup.Group
diff --git a/triedb/pathdb/history_trienode.go b/triedb/pathdb/history_trienode.go
index f5eb590a9a..3f45b41117 100644
--- a/triedb/pathdb/history_trienode.go
+++ b/triedb/pathdb/history_trienode.go
@@ -22,7 +22,6 @@ import (
"fmt"
"iter"
"maps"
- "math"
"slices"
"sort"
"time"
@@ -202,17 +201,6 @@ func (h *trienodeHistory) encode() ([]byte, []byte, []byte, error) {
binary.Write(&headerSection, binary.BigEndian, h.meta.block) // 8 byte
for _, owner := range h.owners {
- // Fill the header section with offsets at key and value section
- headerSection.Write(owner.Bytes()) // 32 bytes
- binary.Write(&headerSection, binary.BigEndian, uint32(keySection.Len())) // 4 bytes
-
- // The offset to the value section is theoretically unnecessary, since the
- // individual value offset is already tracked in the key section. However,
- // we still keep it here for two reasons:
- // - It's cheap to store (only 4 bytes for each trie).
- // - It can be useful for decoding the trie data when key is not required (e.g., in hash mode).
- binary.Write(&headerSection, binary.BigEndian, uint32(valueSection.Len())) // 4 bytes
-
// Fill the key section with node index
var (
prevKey []byte
@@ -266,6 +254,21 @@ func (h *trienodeHistory) encode() ([]byte, []byte, []byte, error) {
if _, err := keySection.Write(trailer); err != nil {
return nil, nil, nil, err
}
+
+ // Fill the header section with the offsets of the key and value sections.
+ // Note that the key/value offsets are intentionally tracked *after* encoding
+ // them into their respective sections, ensuring each offset refers to the end
+ // position. For n trie chunks, n offset pairs are sufficient to uniquely locate
+ // the corresponding data.
+ headerSection.Write(owner.Bytes()) // 32 bytes
+ binary.Write(&headerSection, binary.BigEndian, uint32(keySection.Len())) // 4 bytes
+
+ // The offset to the value section is theoretically unnecessary, since the
+ // individual value offset is already tracked in the key section. However,
+ // we still keep it here for two reasons:
+ // - It's cheap to store (only 4 bytes for each trie).
+ // - It can be useful for decoding the trie data when key is not required (e.g., in hash mode).
+ binary.Write(&headerSection, binary.BigEndian, uint32(valueSection.Len())) // 4 bytes
}
return headerSection.Bytes(), keySection.Bytes(), valueSection.Bytes(), nil
}
@@ -370,11 +373,15 @@ func decodeSingle(keySection []byte, onValue func([]byte, int, int) error) ([]st
for keyOff < keyLimit {
// Validate the key and value offsets within the single trie data chunk
if items%trienodeDataBlockRestartLen == 0 {
- if keyOff != int(keyOffsets[items/trienodeDataBlockRestartLen]) {
- return nil, fmt.Errorf("key offset is not matched, recorded: %d, want: %d", keyOffsets[items/trienodeDataBlockRestartLen], keyOff)
+ restartIndex := items / trienodeDataBlockRestartLen
+ if restartIndex >= len(keyOffsets) {
+ return nil, fmt.Errorf("restart index out of range: %d, available restarts: %d", restartIndex, len(keyOffsets))
+ }
+ if keyOff != int(keyOffsets[restartIndex]) {
+ return nil, fmt.Errorf("key offset is not matched, recorded: %d, want: %d", keyOffsets[restartIndex], keyOff)
}
- if valOff != int(valOffsets[items/trienodeDataBlockRestartLen]) {
- return nil, fmt.Errorf("value offset is not matched, recorded: %d, want: %d", valOffsets[items/trienodeDataBlockRestartLen], valOff)
+ if valOff != int(valOffsets[restartIndex]) {
+ return nil, fmt.Errorf("value offset is not matched, recorded: %d, want: %d", valOffsets[restartIndex], valOff)
}
}
// Resolve the entry from key section
@@ -471,22 +478,22 @@ func (h *trienodeHistory) decode(header []byte, keySection []byte, valueSection
for i := range len(owners) {
// Resolve the boundary of key section
- keyStart := keyOffsets[i]
- keyLimit := len(keySection)
- if i != len(owners)-1 {
- keyLimit = int(keyOffsets[i+1])
+ var keyStart, keyLimit uint32
+ if i != 0 {
+ keyStart = keyOffsets[i-1]
}
- if int(keyStart) > len(keySection) || keyLimit > len(keySection) {
+ keyLimit = keyOffsets[i]
+ if int(keyStart) > len(keySection) || int(keyLimit) > len(keySection) {
return fmt.Errorf("invalid key offsets: keyStart: %d, keyLimit: %d, size: %d", keyStart, keyLimit, len(keySection))
}
// Resolve the boundary of value section
- valStart := valueOffsets[i]
- valLimit := len(valueSection)
- if i != len(owners)-1 {
- valLimit = int(valueOffsets[i+1])
+ var valStart, valLimit uint32
+ if i != 0 {
+ valStart = valueOffsets[i-1]
}
- if int(valStart) > len(valueSection) || valLimit > len(valueSection) {
+ valLimit = valueOffsets[i]
+ if int(valStart) > len(valueSection) || int(valLimit) > len(valueSection) {
return fmt.Errorf("invalid value offsets: valueStart: %d, valueLimit: %d, size: %d", valStart, valLimit, len(valueSection))
}
@@ -506,33 +513,27 @@ type iRange struct {
limit uint32
}
+func (ir iRange) len() uint32 {
+ return ir.limit - ir.start
+}
+
// singleTrienodeHistoryReader provides read access to a single trie within the
// trienode history. It stores an offset to the trie's position in the history,
// along with a set of per-node offsets that can be resolved on demand.
type singleTrienodeHistoryReader struct {
id uint64
reader ethdb.AncientReader
- valueRange iRange // value range within the total value section
+ valueRange iRange // value range within the global value section
valueInternalOffsets map[string]iRange // value offset within the single trie data
}
func newSingleTrienodeHistoryReader(id uint64, reader ethdb.AncientReader, keyRange iRange, valueRange iRange) (*singleTrienodeHistoryReader, error) {
- // TODO(rjl493456442) partial freezer read should be supported
- keyData, err := rawdb.ReadTrienodeHistoryKeySection(reader, id)
+ keyData, err := rawdb.ReadTrienodeHistoryKeySection(reader, id, uint64(keyRange.start), uint64(keyRange.len()))
if err != nil {
return nil, err
}
- keyStart := int(keyRange.start)
- keyLimit := int(keyRange.limit)
- if keyRange.limit == math.MaxUint32 {
- keyLimit = len(keyData)
- }
- if len(keyData) < keyStart || len(keyData) < keyLimit {
- return nil, fmt.Errorf("key section too short, start: %d, limit: %d, size: %d", keyStart, keyLimit, len(keyData))
- }
-
valueOffsets := make(map[string]iRange)
- _, err = decodeSingle(keyData[keyStart:keyLimit], func(key []byte, start int, limit int) error {
+ _, err = decodeSingle(keyData, func(key []byte, start int, limit int) error {
valueOffsets[string(key)] = iRange{
start: uint32(start),
limit: uint32(limit),
@@ -556,20 +557,7 @@ func (sr *singleTrienodeHistoryReader) read(path string) ([]byte, error) {
if !exists {
return nil, fmt.Errorf("trienode %v not found", []byte(path))
}
- // TODO(rjl493456442) partial freezer read should be supported
- valueData, err := rawdb.ReadTrienodeHistoryValueSection(sr.reader, sr.id)
- if err != nil {
- return nil, err
- }
- if len(valueData) < int(sr.valueRange.start) {
- return nil, fmt.Errorf("value section too short, start: %d, size: %d", sr.valueRange.start, len(valueData))
- }
- entryStart := sr.valueRange.start + offset.start
- entryLimit := sr.valueRange.start + offset.limit
- if len(valueData) < int(entryStart) || len(valueData) < int(entryLimit) {
- return nil, fmt.Errorf("value section too short, start: %d, limit: %d, size: %d", entryStart, entryLimit, len(valueData))
- }
- return valueData[int(entryStart):int(entryLimit)], nil
+ return rawdb.ReadTrienodeHistoryValueSection(sr.reader, sr.id, uint64(sr.valueRange.start+offset.start), uint64(offset.len()))
}
// trienodeHistoryReader provides read access to node data in the trie node history.
@@ -610,27 +598,23 @@ func (r *trienodeHistoryReader) decodeHeader() error {
}
for i, owner := range owners {
// Decode the key range for this trie chunk
- var keyLimit uint32
- if i == len(owners)-1 {
- keyLimit = math.MaxUint32
- } else {
- keyLimit = keyOffsets[i+1]
+ var keyStart uint32
+ if i != 0 {
+ keyStart = keyOffsets[i-1]
}
r.keyRanges[owner] = iRange{
- start: keyOffsets[i],
- limit: keyLimit,
+ start: keyStart,
+ limit: keyOffsets[i],
}
// Decode the value range for this trie chunk
- var valLimit uint32
- if i == len(owners)-1 {
- valLimit = math.MaxUint32
- } else {
- valLimit = valOffsets[i+1]
+ var valStart uint32
+ if i != 0 {
+ valStart = valOffsets[i-1]
}
r.valRanges[owner] = iRange{
- start: valOffsets[i],
- limit: valLimit,
+ start: valStart,
+ limit: valOffsets[i],
}
}
return nil
diff --git a/version/version.go b/version/version.go
index a0ec349603..a67c9cc55e 100644
--- a/version/version.go
+++ b/version/version.go
@@ -25,7 +25,7 @@ import (
const (
Major = 1 // Major version component of the current release
Minor = 16 // Minor version component of the current release
- Patch = 5 // Patch version component of the current release
+ Patch = 6 // Patch version component of the current release
Meta = "stable" // Version metadata to append to the version string
)