diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..65d2304f55 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +version: 2 +updates: + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "weekly" + target-branch: "develop" \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 52978c6ede..cee66dd041 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,12 @@ # Changelog +## v1.7.2 +v1.7.2 is for BSC Mainnet [Osaka/Mendel hardfork](https://github.com/bnb-chain/BEPs/blob/master/BEPs/BEP-658.md),the hard fork time is 2026-04-28 02:30:00 AM UTC + +### BUGFIX +- [\#3597](https://github.com/bnb-chain/bsc/pull/3597) miner: support blob sidecar validation for bids +- [\#3590](https://github.com/bnb-chain/bsc/pull/3590) eth: delayed p2p message decoding +- [\#3601](https://github.com/bnb-chain/bsc/pull/3601) core: reject future chasing heads for DA checks + ## v1.7.1 v1.7.1 is for BSC Chapel testnet [Osaka/Mendel hardfork](https://github.com/bnb-chain/BEPs/blob/master/BEPs/BEP-658.md),the hard fork time is 2026-03-24 02:30:00 AM UTC diff --git a/cmd/devp2p/internal/ethtest/protocol.go b/cmd/devp2p/internal/ethtest/protocol.go index e84a1fb123..5c2f7d9e48 100644 --- a/cmd/devp2p/internal/ethtest/protocol.go +++ b/cmd/devp2p/internal/ethtest/protocol.go @@ -86,9 +86,3 @@ func protoOffset(proto Proto) uint64 { panic("unhandled protocol") } } - -// msgTypePtr is the constraint for protocol message types. -type msgTypePtr[U any] interface { - *U - Kind() byte -} diff --git a/cmd/devp2p/internal/ethtest/snap.go b/cmd/devp2p/internal/ethtest/snap.go index f4fce0931f..7c1ca70cc0 100644 --- a/cmd/devp2p/internal/ethtest/snap.go +++ b/cmd/devp2p/internal/ethtest/snap.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" ) @@ -937,10 +938,14 @@ func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error { } // write0 request + paths, err := rlp.EncodeToRawList(tc.paths) + if err != nil { + panic(err) + } req := &snap.GetTrieNodesPacket{ ID: uint64(rand.Int63()), Root: tc.root, - Paths: tc.paths, + Paths: paths, Bytes: tc.nBytes, } msg, err := conn.snapRequest(snap.GetTrieNodesMsg, req) diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index 2346e96950..ae2fe5b92b 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/internal/utesting" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/rlp" "github.com/holiman/uint256" ) @@ -151,7 +152,11 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) { if err != nil { t.Fatalf("failed to get headers for given request: %v", err) } - if !headersMatch(expected, headers.BlockHeadersRequest) { + received, err := headers.List.Items() + if err != nil { + t.Fatalf("invalid headers received: %v", err) + } + if !headersMatch(expected, received) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers) } } @@ -237,7 +242,7 @@ concurrently, with different request IDs.`) // Wait for responses. // Note they can arrive in either order. - resp, err := collectResponses(conn, 2, func(msg *eth.BlockHeadersPacket) uint64 { + resp, err := collectHeaderResponses(conn, 2, func(msg *eth.BlockHeadersPacket) uint64 { if msg.RequestId != 111 && msg.RequestId != 222 { t.Fatalf("response with unknown request ID: %v", msg.RequestId) } @@ -248,17 +253,11 @@ concurrently, with different request IDs.`) } // Check if headers match. - resp1 := resp[111] - if expected, err := s.chain.GetHeaders(req1); err != nil { - t.Fatalf("failed to get expected headers for request 1: %v", err) - } else if !headersMatch(expected, resp1.BlockHeadersRequest) { - t.Fatalf("header mismatch for request ID %v: \nexpected %v \ngot %v", 111, expected, resp1) - } - resp2 := resp[222] - if expected, err := s.chain.GetHeaders(req2); err != nil { - t.Fatalf("failed to get expected headers for request 2: %v", err) - } else if !headersMatch(expected, resp2.BlockHeadersRequest) { - t.Fatalf("header mismatch for request ID %v: \nexpected %v \ngot %v", 222, expected, resp2) + if err := s.checkHeadersAgainstChain(req1, resp[111]); err != nil { + t.Fatal(err) + } + if err := s.checkHeadersAgainstChain(req2, resp[222]); err != nil { + t.Fatal(err) } } @@ -303,8 +302,8 @@ same request ID. The node should handle the request by responding to both reques // Wait for the responses. They can arrive in either order, and we can't tell them // apart by their request ID, so use the number of headers instead. - resp, err := collectResponses(conn, 2, func(msg *eth.BlockHeadersPacket) uint64 { - id := uint64(len(msg.BlockHeadersRequest)) + resp, err := collectHeaderResponses(conn, 2, func(msg *eth.BlockHeadersPacket) uint64 { + id := uint64(msg.List.Len()) if id != 2 && id != 3 { t.Fatalf("invalid number of headers in response: %d", id) } @@ -315,26 +314,35 @@ same request ID. The node should handle the request by responding to both reques } // Check if headers match. - resp1 := resp[2] - if expected, err := s.chain.GetHeaders(request1); err != nil { - t.Fatalf("failed to get expected headers for request 1: %v", err) - } else if !headersMatch(expected, resp1.BlockHeadersRequest) { - t.Fatalf("headers mismatch: \nexpected %v \ngot %v", expected, resp1) - } - resp2 := resp[3] - if expected, err := s.chain.GetHeaders(request2); err != nil { - t.Fatalf("failed to get expected headers for request 2: %v", err) - } else if !headersMatch(expected, resp2.BlockHeadersRequest) { - t.Fatalf("headers mismatch: \nexpected %v \ngot %v", expected, resp2) + if err := s.checkHeadersAgainstChain(request1, resp[2]); err != nil { + t.Fatal(err) + } + if err := s.checkHeadersAgainstChain(request2, resp[3]); err != nil { + t.Fatal(err) + } +} + +func (s *Suite) checkHeadersAgainstChain(req *eth.GetBlockHeadersPacket, resp *eth.BlockHeadersPacket) error { + received2, err := resp.List.Items() + if err != nil { + return fmt.Errorf("invalid headers in response with request ID %v (%d items): %v", resp.RequestId, resp.List.Len(), err) + } + if expected, err := s.chain.GetHeaders(req); err != nil { + return fmt.Errorf("test chain failed to get expected headers for request: %v", err) + } else if !headersMatch(expected, received2) { + return fmt.Errorf("header mismatch for request ID %v (%d items): \nexpected %v \ngot %v", resp.RequestId, resp.List.Len(), expected, resp) } + return nil } // collectResponses waits for n messages of type T on the given connection. // The messsages are collected according to the 'identity' function. -func collectResponses[T any, P msgTypePtr[T]](conn *Conn, n int, identity func(P) uint64) (map[uint64]P, error) { - resp := make(map[uint64]P, n) +// +// This function is written in a generic way to handle +func collectHeaderResponses(conn *Conn, n int, identity func(*eth.BlockHeadersPacket) uint64) (map[uint64]*eth.BlockHeadersPacket, error) { + resp := make(map[uint64]*eth.BlockHeadersPacket, n) for range n { - r := new(T) + r := new(eth.BlockHeadersPacket) if err := conn.ReadMsg(ethProto, eth.BlockHeadersMsg, r); err != nil { return resp, fmt.Errorf("read error: %v", err) } @@ -373,10 +381,8 @@ and expects a response.`) if got, want := headers.RequestId, req.RequestId; got != want { t.Fatalf("unexpected request id") } - if expected, err := s.chain.GetHeaders(req); err != nil { - t.Fatalf("failed to get expected block headers: %v", err) - } else if !headersMatch(expected, headers.BlockHeadersRequest) { - t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers) + if err := s.checkHeadersAgainstChain(req, headers); err != nil { + t.Fatal(err) } } @@ -407,9 +413,8 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) { if got, want := resp.RequestId, req.RequestId; got != want { t.Fatalf("unexpected request id in respond", got, want) } - bodies := resp.BlockBodiesResponse - if len(bodies) != len(req.GetBlockBodiesRequest) { - t.Fatalf("wrong bodies in response: expected %d bodies, got %d", len(req.GetBlockBodiesRequest), len(bodies)) + if resp.List.Len() != len(req.GetBlockBodiesRequest) { + t.Fatalf("wrong bodies in response: expected %d bodies, got %d", len(req.GetBlockBodiesRequest), resp.List.Len()) } } @@ -433,7 +438,7 @@ func (s *Suite) TestGetReceipts(t *utesting.T) { } } - // Create block bodies request. + // Create receipts request. req := ð.GetReceiptsPacket{ RequestId: 66, GetReceiptsRequest: (eth.GetReceiptsRequest)(hashes), @@ -449,8 +454,8 @@ func (s *Suite) TestGetReceipts(t *utesting.T) { if got, want := resp.RequestId, req.RequestId; got != want { t.Fatalf("unexpected request id in respond", got, want) } - if len(resp.List) != len(req.GetReceiptsRequest) { - t.Fatalf("wrong bodies in response: expected %d bodies, got %d", len(req.GetReceiptsRequest), len(resp.List)) + if resp.List.Len() != len(req.GetReceiptsRequest) { + t.Fatalf("wrong receipts in response: expected %d receipts, got %d", len(req.GetReceiptsRequest), resp.List.Len()) } } @@ -804,7 +809,11 @@ on another peer connection using GetPooledTransactions.`) if got, want := msg.RequestId, req.RequestId; got != want { t.Fatalf("unexpected request id in response: got %d, want %d", got, want) } - for _, got := range msg.PooledTransactionsResponse { + responseTxs, err := msg.List.Items() + if err != nil { + t.Fatalf("invalid transactions in response: %v", err) + } + for _, got := range responseTxs { if _, exists := set[got.Hash()]; !exists { t.Fatalf("unexpected tx received: %v", got.Hash()) } @@ -976,7 +985,9 @@ func (s *Suite) TestBlobViolations(t *utesting.T) { if err := conn.ReadMsg(ethProto, eth.GetPooledTransactionsMsg, req); err != nil { t.Fatalf("reading pooled tx request failed: %v", err) } - resp := eth.PooledTransactionsPacket{RequestId: req.RequestId, PooledTransactionsResponse: test.resp} + + encTxs, _ := rlp.EncodeToRawList(test.resp) + resp := eth.PooledTransactionsPacket{RequestId: req.RequestId, List: encTxs} if err := conn.Write(ethProto, eth.PooledTransactionsMsg, resp); err != nil { t.Fatalf("writing pooled tx response failed: %v", err) } @@ -1104,7 +1115,8 @@ func (s *Suite) testBadBlobTx(t *utesting.T, tx *types.Transaction, badTx *types // the good peer is connected, and has announced the tx. // proceed to send the incorrect one from the bad peer. - resp := eth.PooledTransactionsPacket{RequestId: req.RequestId, PooledTransactionsResponse: eth.PooledTransactionsResponse(types.Transactions{badTx})} + encTxs, _ := rlp.EncodeToRawList([]*types.Transaction{badTx}) + resp := eth.PooledTransactionsPacket{RequestId: req.RequestId, List: encTxs} if err := conn.Write(ethProto, eth.PooledTransactionsMsg, resp); err != nil { errc <- fmt.Errorf("writing pooled tx response failed: %v", err) return @@ -1164,7 +1176,8 @@ func (s *Suite) testBadBlobTx(t *utesting.T, tx *types.Transaction, badTx *types return } - resp := eth.PooledTransactionsPacket{RequestId: req.RequestId, PooledTransactionsResponse: eth.PooledTransactionsResponse(types.Transactions{tx})} + encTxs, _ := rlp.EncodeToRawList([]*types.Transaction{tx}) + resp := eth.PooledTransactionsPacket{RequestId: req.RequestId, List: encTxs} if err := conn.Write(ethProto, eth.PooledTransactionsMsg, resp); err != nil { errc <- fmt.Errorf("writing pooled tx response failed: %v", err) return diff --git a/cmd/devp2p/internal/ethtest/transaction.go b/cmd/devp2p/internal/ethtest/transaction.go index cbbbbce8d9..8ce26f3e1a 100644 --- a/cmd/devp2p/internal/ethtest/transaction.go +++ b/cmd/devp2p/internal/ethtest/transaction.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/rlp" ) // sendTxs sends the given transactions to the node and @@ -51,7 +52,8 @@ func (s *Suite) sendTxs(t *utesting.T, txs []*types.Transaction) error { return fmt.Errorf("peering failed: %v", err) } - if err = sendConn.Write(ethProto, eth.TransactionsMsg, eth.TransactionsPacket(txs)); err != nil { + encTxs, _ := rlp.EncodeToRawList(txs) + if err = sendConn.Write(ethProto, eth.TransactionsMsg, eth.TransactionsPacket{RawList: encTxs}); err != nil { return fmt.Errorf("failed to write message to connection: %v", err) } @@ -68,7 +70,8 @@ func (s *Suite) sendTxs(t *utesting.T, txs []*types.Transaction) error { } switch msg := msg.(type) { case *eth.TransactionsPacket: - for _, tx := range *msg { + txs, _ := msg.Items() + for _, tx := range txs { got[tx.Hash()] = true } case *eth.NewPooledTransactionHashesPacket: @@ -80,9 +83,10 @@ func (s *Suite) sendTxs(t *utesting.T, txs []*types.Transaction) error { if err != nil { t.Logf("invalid GetBlockHeaders request: %v", err) } + encHeaders, _ := rlp.EncodeToRawList(headers) recvConn.Write(ethProto, eth.BlockHeadersMsg, ð.BlockHeadersPacket{ - RequestId: msg.RequestId, - BlockHeadersRequest: headers, + RequestId: msg.RequestId, + List: encHeaders, }) default: return fmt.Errorf("unexpected eth wire msg: %s", pretty.Sdump(msg)) @@ -167,9 +171,10 @@ func (s *Suite) sendInvalidTxs(t *utesting.T, txs []*types.Transaction) error { if err != nil { t.Logf("invalid GetBlockHeaders request: %v", err) } + encHeaders, _ := rlp.EncodeToRawList(headers) recvConn.Write(ethProto, eth.BlockHeadersMsg, ð.BlockHeadersPacket{ - RequestId: msg.RequestId, - BlockHeadersRequest: headers, + RequestId: msg.RequestId, + List: encHeaders, }) default: return fmt.Errorf("unexpected eth message: %v", pretty.Sdump(msg)) diff --git a/cmd/extradump/main.go b/cmd/extradump/main.go index 49fbc73b8f..fe67bb327a 100644 --- a/cmd/extradump/main.go +++ b/cmd/extradump/main.go @@ -12,10 +12,10 @@ import ( "sort" "strings" + "github.com/bits-and-blooms/bitset" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" - "github.com/willf/bitset" ) // follow define in parlia diff --git a/cmd/keeper/go.mod b/cmd/keeper/go.mod index a78d7ebde9..11be3e061b 100644 --- a/cmd/keeper/go.mod +++ b/cmd/keeper/go.mod @@ -25,6 +25,7 @@ require ( github.com/cometbft/cometbft v0.37.0 // indirect github.com/consensys/gnark-crypto v0.18.1 // indirect github.com/cosmos/gogoproto v1.4.1 // indirect + github.com/cosmos/iavl v0.12.0 // indirect github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -84,21 +85,20 @@ require ( github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/tendermint/go-amino v0.14.1 // indirect - github.com/tendermint/iavl v0.12.0 // indirect github.com/tendermint/tendermint v0.31.15 // indirect github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e // indirect github.com/tklauser/go-sysconf v0.3.13 // indirect github.com/tklauser/numcpus v0.7.0 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect - golang.org/x/crypto v0.45.0 // indirect + golang.org/x/crypto v0.46.0 // indirect golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 // indirect - golang.org/x/net v0.47.0 // indirect - golang.org/x/sync v0.18.0 // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/sync v0.19.0 // indirect golang.org/x/sys v0.40.0 // indirect - golang.org/x/text v0.31.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect - google.golang.org/grpc v1.69.4 // indirect - google.golang.org/protobuf v1.36.3 // indirect + golang.org/x/text v0.32.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect + google.golang.org/grpc v1.79.3 // indirect + google.golang.org/protobuf v1.36.10 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/cmd/keeper/go.sum b/cmd/keeper/go.sum index 9d361bfff9..e7d99e25e4 100644 --- a/cmd/keeper/go.sum +++ b/cmd/keeper/go.sum @@ -58,6 +58,8 @@ github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d h1:49RLWk1j44Xu4fj github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= github.com/cosmos/gogoproto v1.4.1 h1:WoyH+0/jbCTzpKNvyav5FL1ZTWsp1im1MxEpJEzKUB8= github.com/cosmos/gogoproto v1.4.1/go.mod h1:Ac9lzL4vFpBMcptJROQ6dQ4M3pOEK5Z/l0Q9p+LoCr4= +github.com/cosmos/iavl v0.12.0 h1:lcK28rWK6Q/+iKQlMksi/pKnqQZLIdQocSfazSR4fKE= +github.com/cosmos/iavl v0.12.0/go.mod h1:5paARKGkQVKis4t9KbBnXTKdzl+sdDAhR3rIp+iAZzs= github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg= @@ -283,8 +285,6 @@ github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzH github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/tendermint/go-amino v0.14.1 h1:o2WudxNfdLNBwMyl2dqOJxiro5rfrEaU0Ugs6offJMk= github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYMbR92AaJVmKso= -github.com/tendermint/iavl v0.12.0 h1:xcaFAr+ycqCj7WN1RzL2EfcBioRDOHcU1oWcg83K028= -github.com/tendermint/iavl v0.12.0/go.mod h1:EoKMMv++tDOL5qKKVnoIqtVPshRrEPeJ0WsgDOLAauM= github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e h1:cR8/SYRgyQCt5cNCMniB/ZScMkhI9nk8U5C7SbISXjo= github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e/go.mod h1:Tu4lItkATkonrYuvtVjG0/rhy15qrNGNTjPdaphtZ/8= github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4= @@ -309,16 +309,16 @@ go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= -go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= -go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo= golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -330,16 +330,16 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -355,13 +355,13 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= -golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -374,21 +374,23 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA= -google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= -google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= -google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= -google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= +google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= -google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/consensus/parlia/parlia.go b/consensus/parlia/parlia.go index 5c1288fe47..3127eb16fb 100644 --- a/consensus/parlia/parlia.go +++ b/consensus/parlia/parlia.go @@ -15,10 +15,10 @@ import ( "sync" "time" + "github.com/bits-and-blooms/bitset" "github.com/ethereum/go-ethereum/common/lru" "github.com/holiman/uint256" "github.com/prysmaticlabs/prysm/v5/crypto/bls" - "github.com/willf/bitset" "golang.org/x/crypto/sha3" "github.com/ethereum/go-ethereum/accounts" diff --git a/consensus/parlia/snapshot.go b/consensus/parlia/snapshot.go index 047ddf4754..75b2e25ab3 100644 --- a/consensus/parlia/snapshot.go +++ b/consensus/parlia/snapshot.go @@ -25,6 +25,7 @@ import ( "math" "sort" + "github.com/bits-and-blooms/bitset" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/consensus" @@ -33,7 +34,6 @@ import ( "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" - "github.com/willf/bitset" ) // Snapshot is the state of the validatorSet at a given point. diff --git a/core/blockchain.go b/core/blockchain.go index 127bf78327..3367283add 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1396,7 +1396,11 @@ func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error { // UpdateChasingHead update remote best chain head, used by DA check now. func (bc *BlockChain) UpdateChasingHead(head *types.Header) { - bc.chasingHead.Store(head) + if head.Time > uint64(time.Now().Unix()) { + log.Warn("Ignoring future chasing head", "number", head.Number, "time", head.Time) + return + } + bc.chasingHead.Store(types.CopyHeader(head)) } // ChasingHead return the best chain head of peers. diff --git a/core/data_availability.go b/core/data_availability.go index 1f60efffa3..8bbd235d8e 100644 --- a/core/data_availability.go +++ b/core/data_availability.go @@ -66,10 +66,15 @@ func IsDataAvailable(chain consensus.ChainHeaderReader, block *types.Block) (err return nil } - // only required to check within MinTimeDurationForBlobRequests seconds's DA + // Only trusted heads may decide whether recent blob data can be discarded. + // ChasingHead is populated from remote peer sync state, so future values must + // never be allowed to suppress DA checks. highest := chain.ChasingHead() current := chain.CurrentHeader() - if highest == nil || highest.Number.Cmp(current.Number) < 0 { + now := uint64(time.Now().Unix()) + if highest == nil || highest.Number == nil || highest.Time > now { + highest = current + } else if current != nil && current.Number != nil && highest.Number.Cmp(current.Number) < 0 { highest = current } if block.Time()+params.MinTimeDurationForBlobRequests < highest.Time { diff --git a/core/data_availability_test.go b/core/data_availability_test.go index c0cd40220f..16411049fb 100644 --- a/core/data_availability_test.go +++ b/core/data_availability_test.go @@ -4,6 +4,7 @@ import ( "crypto/rand" "math/big" "testing" + "time" "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" gokzg4844 "github.com/crate-crypto/go-kzg-4844" @@ -150,6 +151,41 @@ func TestIsDataAvailable(t *testing.T) { } } +func TestIsDataAvailableRejectsFutureChasingHead(t *testing.T) { + blockTime := uint64(time.Now().Unix()) + cfg := params.ParliaTestChainConfig + + badBlob := emptyBlob + badBlob[0] = 0x01 + badSidecar := &types.BlobTxSidecar{ + Blobs: []kzg4844.Blob{badBlob}, + Commitments: []kzg4844.Commitment{emptyBlobCommit}, + Proofs: []kzg4844.Proof{emptyBlobProof}, + } + makeBlock := func() *types.Block { + block := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(1), + Time: blockTime, + }).WithBody(types.Body{Transactions: types.Transactions{ + createMockDATx(cfg, badSidecar), + }}) + return block.WithSidecars(collectBlobsFromTxs(block.Header(), block.Transactions())) + } + + block := makeBlock() + reader := &mockSplitDAHeaderReader{ + config: cfg, + currentNum: 1, + currentTime: blockTime, + chasingNum: 100000, + chasingTime: blockTime + 30*86400, + } + + err := IsDataAvailable(reader, block) + require.Error(t, err) + require.NotEmpty(t, block.Sidecars()) +} + func TestCheckDataAvailableInBatch(t *testing.T) { hr := NewMockDAHeaderReader(params.ParliaTestChainConfig) tests := []struct { @@ -322,6 +358,14 @@ type mockDAHeaderReader struct { chasingHeadTime uint64 } +type mockSplitDAHeaderReader struct { + config *params.ChainConfig + currentNum uint64 + currentTime uint64 + chasingNum uint64 + chasingTime uint64 +} + func NewMockDAHeaderReader(config *params.ChainConfig) *mockDAHeaderReader { return &mockDAHeaderReader{ config: config, @@ -379,6 +423,81 @@ func (r *mockDAHeaderReader) GetVerifiedBlockByHash(hash common.Hash) *types.Hea panic("not supported") } +func (r *mockSplitDAHeaderReader) Config() *params.ChainConfig { + return r.config +} + +func (r *mockSplitDAHeaderReader) CurrentHeader() *types.Header { + return &types.Header{ + Number: new(big.Int).SetUint64(r.currentNum), + Time: r.currentTime, + } +} + +func (r *mockSplitDAHeaderReader) ChasingHead() *types.Header { + if r.chasingNum == 0 && r.chasingTime == 0 { + return nil + } + return &types.Header{ + Number: new(big.Int).SetUint64(r.chasingNum), + Time: r.chasingTime, + } +} + +func (r *mockSplitDAHeaderReader) GenesisHeader() *types.Header { + panic("not supported") +} + +func (r *mockSplitDAHeaderReader) GetHeader(hash common.Hash, number uint64) *types.Header { + panic("not supported") +} + +func (r *mockSplitDAHeaderReader) GetHeaderByNumber(number uint64) *types.Header { + panic("not supported") +} + +func (r *mockSplitDAHeaderReader) GetHeaderByHash(hash common.Hash) *types.Header { + panic("not supported") +} + +func (r *mockSplitDAHeaderReader) GetTd(hash common.Hash, number uint64) *big.Int { + panic("not supported") +} + +func (r *mockSplitDAHeaderReader) GetHighestVerifiedHeader() *types.Header { + panic("not supported") +} + +func (r *mockSplitDAHeaderReader) GetVerifiedBlockByHash(hash common.Hash) *types.Header { + panic("not supported") +} + +func TestUpdateChasingHeadRejectsFutureHeader(t *testing.T) { + now := uint64(time.Now().Unix()) + bc := &BlockChain{hc: &HeaderChain{}} + bc.hc.SetCurrentHeader(&types.Header{ + Number: big.NewInt(100), + Time: now, + }) + + valid := &types.Header{ + Number: big.NewInt(101), + Time: now, + } + bc.UpdateChasingHead(valid) + require.NotNil(t, bc.ChasingHead()) + require.Equal(t, valid.Number.Uint64(), bc.ChasingHead().Number.Uint64()) + require.Equal(t, valid.Time, bc.ChasingHead().Time) + + future := &types.Header{ + Number: big.NewInt(102), + Time: now + uint64(time.Hour/time.Second), + } + bc.UpdateChasingHead(future) + require.Equal(t, valid.Number.Uint64(), bc.ChasingHead().Number.Uint64()) + require.Equal(t, valid.Time, bc.ChasingHead().Time) +} + func createMockDATx(config *params.ChainConfig, sidecar *types.BlobTxSidecar) *types.Transaction { if sidecar == nil { tx := &types.DynamicFeeTx{ diff --git a/core/txpool/validation.go b/core/txpool/validation.go index b7a712f0c0..76338e04ec 100644 --- a/core/txpool/validation.go +++ b/core/txpool/validation.go @@ -151,7 +151,7 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types return fmt.Errorf("%w: gas tip cap %v, minimum needed %v", ErrTxGasPriceTooLow, tx.GasTipCap(), opts.MinTip) } if tx.Type() == types.BlobTxType { - return validateBlobTx(tx, head, opts) + return ValidateBlobTx(tx, head, opts) } if tx.Type() == types.SetCodeTxType { if len(tx.SetCodeAuthorizations()) == 0 { @@ -161,8 +161,9 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types return nil } -// validateBlobTx implements the blob-transaction specific validations. -func validateBlobTx(tx *types.Transaction, head *types.Header, opts *ValidationOptions) error { +// ValidateBlobTx validates blob-transaction specific fields including sidecar +// commitment hashes and KZG proofs. +func ValidateBlobTx(tx *types.Transaction, head *types.Header, opts *ValidationOptions) error { sidecar := tx.BlobTxSidecar() if sidecar == nil { return errors.New("missing sidecar in blob transaction") diff --git a/core/vm/contracts_lightclient.go b/core/vm/contracts_lightclient.go index 62ddf0c465..d83c537644 100644 --- a/core/vm/contracts_lightclient.go +++ b/core/vm/contracts_lightclient.go @@ -7,7 +7,7 @@ import ( "net/url" "strings" - "github.com/tendermint/iavl" + "github.com/cosmos/iavl" "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/crypto/secp256k1" cmn "github.com/tendermint/tendermint/libs/common" diff --git a/core/vm/contracts_lightclient_test.go b/core/vm/contracts_lightclient_test.go index e669fbaea7..e3211b07da 100644 --- a/core/vm/contracts_lightclient_test.go +++ b/core/vm/contracts_lightclient_test.go @@ -5,9 +5,9 @@ import ( "encoding/hex" "testing" + "github.com/cosmos/iavl" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/iavl" "github.com/tendermint/tendermint/crypto/merkle" cmn "github.com/tendermint/tendermint/libs/common" diff --git a/core/vm/lightclient/v1/multistoreproof.go b/core/vm/lightclient/v1/multistoreproof.go index 742e8bcdb7..cec65041b7 100644 --- a/core/vm/lightclient/v1/multistoreproof.go +++ b/core/vm/lightclient/v1/multistoreproof.go @@ -4,7 +4,7 @@ import ( "bytes" "fmt" - "github.com/tendermint/iavl" + "github.com/cosmos/iavl" "github.com/tendermint/tendermint/crypto/merkle" cmn "github.com/tendermint/tendermint/libs/common" ) diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index f6491db91a..7a0dcb4590 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -262,10 +262,12 @@ func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) { blobs := eth.ServiceGetBlockBodiesQuery(dlp.chain, hashes) - bodies := make([]*eth.BlockBody, len(blobs)) + bodies := make([]*types.Body, len(blobs)) + ethbodies := make([]eth.BlockBody, len(blobs)) for i, blob := range blobs { - bodies[i] = new(eth.BlockBody) + bodies[i] = new(types.Body) rlp.DecodeBytes(blob, bodies[i]) + rlp.DecodeBytes(blob, ðbodies[i]) } var ( txsHashes = make([]common.Hash, len(bodies)) @@ -281,9 +283,13 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et Peer: dlp.id, } res := ð.Response{ - Req: req, - Res: (*eth.BlockBodiesResponse)(&bodies), - Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes}, + Req: req, + Res: (*eth.BlockBodiesResponse)(ðbodies), + Meta: eth.BlockBodyHashes{ + TransactionRoots: txsHashes, + UncleHashes: uncleHashes, + WithdrawalRoots: withdrawalHashes, + }, Time: 1, Done: make(chan error, 1), // Ignore the returned status } @@ -332,14 +338,14 @@ func (dlp *downloadTesterPeer) ID() string { // RequestAccountRange fetches a batch of accounts rooted in a specific account // trie, starting with the origin. -func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error { +func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes int) error { // Create the request and service it req := &snap.GetAccountRangePacket{ ID: id, Root: root, Origin: origin, Limit: limit, - Bytes: bytes, + Bytes: uint64(bytes), } slimaccs, proofs := snap.ServiceGetAccountRangeQuery(dlp.chain, req) @@ -358,7 +364,7 @@ func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limi // RequestStorageRanges fetches a batch of storage slots belonging to one or // more accounts. If slots from only one account is requested, an origin marker // may also be used to retrieve from there. -func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { +func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes int) error { // Create the request and service it req := &snap.GetStorageRangesPacket{ ID: id, @@ -366,7 +372,7 @@ func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, Root: root, Origin: origin, Limit: limit, - Bytes: bytes, + Bytes: uint64(bytes), } storage, proofs := snap.ServiceGetStorageRangesQuery(dlp.chain, req) @@ -383,25 +389,28 @@ func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, } // RequestByteCodes fetches a batch of bytecodes by hash. -func (dlp *downloadTesterPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error { +func (dlp *downloadTesterPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes int) error { req := &snap.GetByteCodesPacket{ ID: id, Hashes: hashes, - Bytes: bytes, + Bytes: uint64(bytes), } codes := snap.ServiceGetByteCodesQuery(dlp.chain, req) go dlp.dl.downloader.SnapSyncer.OnByteCodes(dlp, id, codes) return nil } -// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in -// a specific state trie. -func (dlp *downloadTesterPeer) RequestTrieNodes(id uint64, root common.Hash, paths []snap.TrieNodePathSet, bytes uint64) error { +// RequestTrieNodes fetches a batch of account or storage trie nodes. +func (dlp *downloadTesterPeer) RequestTrieNodes(id uint64, root common.Hash, count int, paths []snap.TrieNodePathSet, bytes int) error { + encPaths, err := rlp.EncodeToRawList(paths) + if err != nil { + panic(err) + } req := &snap.GetTrieNodesPacket{ ID: id, Root: root, - Paths: paths, - Bytes: bytes, + Paths: encPaths, + Bytes: uint64(bytes), } nodes, _ := snap.ServiceGetTrieNodesQuery(dlp.chain, req, time.Now()) go dlp.dl.downloader.SnapSyncer.OnTrieNodes(dlp, id, nodes) diff --git a/eth/downloader/fetchers_concurrent_bodies.go b/eth/downloader/fetchers_concurrent_bodies.go index 49f3644fd9..aa5f8e1801 100644 --- a/eth/downloader/fetchers_concurrent_bodies.go +++ b/eth/downloader/fetchers_concurrent_bodies.go @@ -89,15 +89,14 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan // deliver is responsible for taking a generic response packet from the concurrent // fetcher, unpacking the body data and delivering it to the downloader's queue. func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - txs, uncles, withdrawals, sidecars := packet.Res.(*eth.BlockBodiesResponse).Unpack() - hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes} - - accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2], sidecars) + resp := packet.Res.(*eth.BlockBodiesResponse) + meta := packet.Meta.(eth.BlockBodyHashes) + accepted, err := q.queue.DeliverBodies(peer.id, meta, *resp) switch { - case err == nil && len(txs) == 0: + case err == nil && len(*resp) == 0: peer.log.Trace("Requested bodies delivered") case err == nil: - peer.log.Trace("Delivered new batch of bodies", "count", len(txs), "accepted", accepted) + peer.log.Trace("Delivered new batch of bodies", "count", len(*resp), "accepted", accepted) default: peer.log.Debug("Failed to deliver retrieved bodies", "err", err) } diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 217cffa136..8f2845f240 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -29,11 +29,10 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" ) @@ -790,70 +789,69 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, hashes []comm // DeliverBodies injects a block body retrieval response into the results queue. // The method returns the number of blocks bodies accepted from the delivery and // also wakes any threads waiting for data delivery. -func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListHashes []common.Hash, - uncleLists [][]*types.Header, uncleListHashes []common.Hash, - withdrawalLists [][]*types.Withdrawal, withdrawalListHashes []common.Hash, sidecars []types.BlobSidecars, -) (int, error) { +func (q *queue) DeliverBodies(id string, hashes eth.BlockBodyHashes, bodies []eth.BlockBody) (int, error) { q.lock.Lock() defer q.lock.Unlock() + var txLists [][]*types.Transaction + var uncleLists [][]*types.Header + var withdrawalLists [][]*types.Withdrawal + var sidecarLists []types.BlobSidecars + validate := func(index int, header *types.Header) error { - if txListHashes[index] != header.TxHash { + if hashes.TransactionRoots[index] != header.TxHash { return errInvalidBody } - if uncleListHashes[index] != header.UncleHash { + if hashes.UncleHashes[index] != header.UncleHash { return errInvalidBody } if header.WithdrawalsHash == nil { // nil hash means that withdrawals should not be present in body - if withdrawalLists[index] != nil { + if bodies[index].Withdrawals != nil { return errInvalidBody } } else { // non-nil hash: body must have withdrawals - if withdrawalLists[index] == nil { + if bodies[index].Withdrawals == nil { return errInvalidBody } - if withdrawalListHashes[index] != *header.WithdrawalsHash { + if hashes.WithdrawalRoots[index] != *header.WithdrawalsHash { return errInvalidBody } } - // Blocks must have a number of blobs corresponding to the header gas usage, - // and zero before the Cancun hardfork. - var blobs int - for _, tx := range txLists[index] { - // Count the number of blobs to validate against the header's blobGasUsed - blobs += len(tx.BlobHashes()) - - // Validate the data blobs individually too - if tx.Type() == types.BlobTxType { - if len(tx.BlobHashes()) == 0 { - return errInvalidBody - } - for _, hash := range tx.BlobHashes() { - if !kzg4844.IsValidVersionedHash(hash[:]) { - return errInvalidBody - } - } - if tx.BlobTxSidecar() != nil { - return errInvalidBody - } - } + + // decode + txs, err := bodies[index].Transactions.Items() + if err != nil { + return fmt.Errorf("%w: bad transactions: %v", errInvalidBody, err) } - if header.BlobGasUsed != nil { - if want := *header.BlobGasUsed / params.BlobTxBlobGasPerBlob; uint64(blobs) != want { // div because the header is surely good vs the body might be bloated - return errInvalidBody + txLists = append(txLists, txs) + uncles, err := bodies[index].Uncles.Items() + if err != nil { + return fmt.Errorf("%w: bad uncles: %v", errInvalidBody, err) + } + uncleLists = append(uncleLists, uncles) + if bodies[index].Withdrawals != nil { + withdrawals, err := bodies[index].Withdrawals.Items() + if err != nil { + return fmt.Errorf("%w: bad withdrawals: %v", errInvalidBody, err) } + withdrawalLists = append(withdrawalLists, withdrawals) } else { - if blobs != 0 { - return errInvalidBody - } + withdrawalLists = append(withdrawalLists, nil) } - - // do some sanity check for sidecar - for _, sidecar := range sidecars[index] { - if err := sidecar.SanityCheck(header.Number, header.Hash()); err != nil { - return err + if bodies[index].Sidecars != nil { + sidecars, err := bodies[index].Sidecars.Items() + if err != nil { + return fmt.Errorf("%w: bad sidecars: %v", errInvalidBody, err) + } + for _, sidecar := range sidecars { + if err := sidecar.SanityCheck(header.Number, header.Hash()); err != nil { + return err + } } + sidecarLists = append(sidecarLists, sidecars) + } else { + sidecarLists = append(sidecarLists, nil) } return nil } @@ -862,11 +860,12 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListH result.Transactions = txLists[index] result.Uncles = uncleLists[index] result.Withdrawals = withdrawalLists[index] - result.Sidecars = sidecars[index] + result.Sidecars = sidecarLists[index] result.SetBodyDone() } + nresults := len(hashes.TransactionRoots) return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, - bodyReqTimer, bodyInMeter, bodyDropMeter, len(txLists), validate, reconstruct) + bodyReqTimer, bodyInMeter, bodyDropMeter, nresults, validate, reconstruct) } // DeliverReceipts injects a receipt retrieval response into the results queue. diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go index f66c2c4ab7..c7e8a0d1d6 100644 --- a/eth/downloader/queue_test.go +++ b/eth/downloader/queue_test.go @@ -30,8 +30,10 @@ import ( "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" ) @@ -323,26 +325,31 @@ func XTestDelivery(t *testing.T) { emptyList []*types.Header txset [][]*types.Transaction uncleset [][]*types.Header + bodies []eth.BlockBody ) numToSkip := rand.Intn(len(f.Headers)) for _, hdr := range f.Headers[0 : len(f.Headers)-numToSkip] { - txset = append(txset, world.getTransactions(hdr.Number.Uint64())) + txs := world.getTransactions(hdr.Number.Uint64()) + txset = append(txset, txs) uncleset = append(uncleset, emptyList) + txsList, _ := rlp.EncodeToRawList(txs) + bodies = append(bodies, eth.BlockBody{Transactions: txsList}) + } + hashes := eth.BlockBodyHashes{ + TransactionRoots: make([]common.Hash, len(txset)), + UncleHashes: make([]common.Hash, len(uncleset)), + WithdrawalRoots: make([]common.Hash, len(txset)), } - var ( - txsHashes = make([]common.Hash, len(txset)) - uncleHashes = make([]common.Hash, len(uncleset)) - ) hasher := trie.NewStackTrie(nil) for i, txs := range txset { - txsHashes[i] = types.DeriveSha(types.Transactions(txs), hasher) + hashes.TransactionRoots[i] = types.DeriveSha(types.Transactions(txs), hasher) } for i, uncles := range uncleset { - uncleHashes[i] = types.CalcUncleHash(uncles) + hashes.UncleHashes[i] = types.CalcUncleHash(uncles) } + time.Sleep(100 * time.Millisecond) - _, err := q.DeliverBodies(peer.id, txset, txsHashes, uncleset, uncleHashes, nil, nil, nil) - if err != nil { + if _, err := q.DeliverBodies(peer.id, hashes, bodies); err != nil { fmt.Printf("delivered %d bodies %v\n", len(txset), err) } } else { diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go index ee909e9e66..a4726974de 100644 --- a/eth/fetcher/block_fetcher.go +++ b/eth/fetcher/block_fetcher.go @@ -91,7 +91,7 @@ type bodyRequesterFn func([]common.Hash, chan *eth.Response) (*eth.Request, erro type headerVerifierFn func(header *types.Header) error // blockBroadcasterFn is a callback type for broadcasting a block to connected peers. -type blockBroadcasterFn func(block *types.Block, propagate bool) +type blockBroadcasterFn func(peer string, block *types.Block, propagate bool) // chainHeightFn is a callback type to retrieve the current chain height. type chainHeightFn func() uint64 @@ -598,7 +598,32 @@ func (f *BlockFetcher) loop() { case res := <-resCh: res.Done <- nil // Ignoring withdrawals here, will set it to empty later if EmptyWithdrawalsHash in header. - txs, uncles, _, sidecars := res.Res.(*eth.BlockBodiesResponse).Unpack() + bodies := *res.Res.(*eth.BlockBodiesResponse) + txs := make([][]*types.Transaction, len(bodies)) + uncles := make([][]*types.Header, len(bodies)) + sidecars := make([]types.BlobSidecars, len(bodies)) + for i, body := range bodies { + var err error + if txs[i], err = body.Transactions.Items(); err != nil { + log.Debug("Failed to decode block body transactions", "peer", peer, "err", err) + f.dropPeer(peer) + return + } + if uncles[i], err = body.Uncles.Items(); err != nil { + log.Debug("Failed to decode block body uncles", "peer", peer, "err", err) + f.dropPeer(peer) + return + } + if body.Sidecars != nil { + if sidecars[i], err = body.Sidecars.Items(); err != nil { + log.Debug("Failed to decode block body sidecars", "peer", peer, "err", err) + f.dropPeer(peer) + return + } + } else { + sidecars[i] = nil + } + } f.FilterBodies(peer, txs, uncles, sidecars, time.Now()) case <-timeout.C: @@ -912,7 +937,7 @@ func (f *BlockFetcher) importBlocks(op *blockOrHeaderInject) { case nil: // All ok, quickly propagate to our peers blockBroadcastOutTimer.UpdateSince(block.ReceivedAt) - go f.broadcastBlock(block, true) + go f.broadcastBlock(peer, block, true) case consensus.ErrFutureBlock: log.Error("Received future block", "peer", peer, "number", block.Number(), "hash", hash, "err", err) @@ -935,7 +960,7 @@ func (f *BlockFetcher) importBlocks(op *blockOrHeaderInject) { } // If import succeeded, broadcast the block blockAnnounceOutTimer.UpdateSince(block.ReceivedAt) - go f.broadcastBlock(block, false) + go f.broadcastBlock(peer, block, false) // Invoke the testing hook if needed if f.importedHook != nil { diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go index 0ff06a3231..acb824171c 100644 --- a/eth/fetcher/block_fetcher_test.go +++ b/eth/fetcher/block_fetcher_test.go @@ -34,10 +34,33 @@ import ( "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb" ) +func encodeRL[T any](slice []T) rlp.RawList[T] { + rl, err := rlp.EncodeToRawList(slice) + if err != nil { + panic(err) + } + return rl +} + +func encodeBody(b *types.Block) eth.BlockBody { + return eth.BlockBody{ + Transactions: encodeRL([]*types.Transaction(b.Transactions())), + Uncles: encodeRL(b.Uncles()), + } +} + +func encodeBodyFromParts(txs []*types.Transaction, uncles []*types.Header) eth.BlockBody { + return eth.BlockBody{ + Transactions: encodeRL(txs), + Uncles: encodeRL(uncles), + } +} + var ( testdb = rawdb.NewMemoryDatabase() testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -128,7 +151,7 @@ func (f *fetcherTester) verifyHeader(header *types.Header) error { } // broadcastBlock is a nop placeholder for the block broadcasting. -func (f *fetcherTester) broadcastBlock(block *types.Block, propagate bool) { +func (f *fetcherTester) broadcastBlock(peer string, block *types.Block, propagate bool) { } // chainHeight retrieves the current height (block number) of the chain. @@ -227,19 +250,16 @@ func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*typ } } // Return on a new thread - bodies := make([]*eth.BlockBody, len(transactions)) + bodies := make(eth.BlockBodiesResponse, len(transactions)) for i, txs := range transactions { - bodies[i] = ð.BlockBody{ - Transactions: txs, - Uncles: uncles[i], - } + bodies[i] = encodeBodyFromParts(txs, uncles[i]) } req := ð.Request{ Peer: peer, } res := ð.Response{ Req: req, - Res: (*eth.BlockBodiesResponse)(&bodies), + Res: &bodies, Time: drift, Done: make(chan error, 1), // Ignore the returned status } @@ -1001,17 +1021,14 @@ func newMockBodyRequester(delay time.Duration) *mockBodyRequester { func (m *mockBodyRequester) requestBodies(hashes []common.Hash, ch chan *eth.Response) (*eth.Request, error) { go func() { time.Sleep(m.delay) - var bodies []*eth.BlockBody + var bodies eth.BlockBodiesResponse for _, hash := range hashes { if body, ok := m.bodies[hash]; ok { - bodies = append(bodies, ð.BlockBody{ - Transactions: body.Transactions, - Uncles: body.Uncles, - }) + bodies = append(bodies, encodeBodyFromParts(body.Transactions, body.Uncles)) } } ch <- ð.Response{ - Res: (*eth.BlockBodiesResponse)(&bodies), + Res: &bodies, } }() return ð.Request{}, nil @@ -1047,7 +1064,7 @@ func TestBlockFetcherMultiplePeers(t *testing.T) { return nil }, // broadcastBlock - func(block *types.Block, propagate bool) {}, + func(peer string, block *types.Block, propagate bool) {}, // chainHeight - returns the height of the highest block in the chain func() uint64 { var maxHeight uint64 = 0 @@ -1100,18 +1117,15 @@ func TestBlockFetcherMultiplePeers(t *testing.T) { bodyRequester := func(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) { go func() { // Return requested body - bodies := make([]*eth.BlockBody, 0) + var bodies eth.BlockBodiesResponse for _, hash := range hashes { if hash == block.Hash() { - bodies = append(bodies, ð.BlockBody{ - Transactions: block.Transactions(), - Uncles: block.Uncles(), - }) + bodies = append(bodies, encodeBody(block)) } } res := ð.Response{ Req: ð.Request{}, - Res: (*eth.BlockBodiesResponse)(&bodies), + Res: &bodies, Done: make(chan error, 1), } sink <- res @@ -1195,18 +1209,15 @@ func TestBlockFetcherMultiplePeers(t *testing.T) { bodyRequester := func(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) { go func() { // Return requested body - bodies := make([]*eth.BlockBody, 0) + var bodies eth.BlockBodiesResponse for _, hash := range hashes { if hash == newBlock.Hash() { - bodies = append(bodies, ð.BlockBody{ - Transactions: newBlock.Transactions(), - Uncles: newBlock.Uncles(), - }) + bodies = append(bodies, encodeBody(newBlock)) } } res := ð.Response{ Req: ð.Request{}, - Res: (*eth.BlockBodiesResponse)(&bodies), + Res: &bodies, Done: make(chan error, 1), } sink <- res @@ -1268,7 +1279,7 @@ func TestQuickBlockFetching(t *testing.T) { fetcher := NewBlockFetcher( blockRetriever.getBlock, func(header *types.Header) error { return nil }, - func(block *types.Block, propagate bool) {}, + func(peer string, block *types.Block, propagate bool) {}, func() uint64 { return 10 }, // Current height func() uint64 { return 5 }, // Finalized height func(blocks types.Blocks) (int, error) { diff --git a/eth/handler.go b/eth/handler.go index 586a4fab61..83c628b23c 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -317,15 +317,21 @@ func newHandler(config *handlerConfig) (*handler, error) { return h.chain.InsertChain(blocks) } - broadcastBlockWithCheck := func(block *types.Block, propagate bool) { + broadcastBlockWithCheck := func(peer string, block *types.Block, propagate bool) { if propagate { if !(block.Header().WithdrawalsHash == nil && block.Withdrawals() == nil) && !(block.Header().EmptyWithdrawalsHash() && block.Withdrawals() != nil && len(block.Withdrawals()) == 0) { - log.Error("Propagated block has invalid withdrawals") + log.Error("Propagated block has invalid withdrawals", "peer", peer) return } if err := core.IsDataAvailable(h.chain, block); err != nil { - log.Error("Propagating block with invalid sidecars", "number", block.Number(), "hash", block.Hash(), "err", err) + var peerAddr string + if p := h.peers.peer(peer); p != nil { + if addr := p.RemoteAddr(); addr != nil { + peerAddr = addr.String() + } + } + log.Error("Propagating block with invalid sidecars", "number", block.Number(), "hash", block.Hash(), "peer", peer[:16], "peerAddr", peerAddr, "err", err) return } } diff --git a/eth/handler_eth.go b/eth/handler_eth.go index d96b3b849c..eb3f231be2 100644 --- a/eth/handler_eth.go +++ b/eth/handler_eth.go @@ -72,19 +72,42 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { return h.txFetcher.Notify(peer.ID(), packet.Types, packet.Sizes, packet.Hashes) case *eth.TransactionsPacket: - for _, tx := range *packet { - if tx.Type() == types.BlobTxType { - return errors.New("disallowed broadcast blob transaction") - } + txs, err := packet.Items() + if err != nil { + return fmt.Errorf("Transactions: %v", err) + } + if err := handleTransactions(peer, txs, true); err != nil { + return fmt.Errorf("Transactions: %v", err) + } + return h.txFetcher.Enqueue(peer.ID(), txs, false) + + case *eth.PooledTransactionsPacket: + txs, err := packet.List.Items() + if err != nil { + return fmt.Errorf("PooledTransactions: %v", err) } - return h.txFetcher.Enqueue(peer.ID(), *packet, false) - - case *eth.PooledTransactionsResponse: - // If we receive any blob transactions missing sidecars, or with - // sidecars that don't correspond to the versioned hashes reported - // in the header, disconnect from the sending peer. - for _, tx := range *packet { - if tx.Type() == types.BlobTxType { + if err := handleTransactions(peer, txs, false); err != nil { + return fmt.Errorf("PooledTransactions: %v", err) + } + return h.txFetcher.Enqueue(peer.ID(), txs, true) + + default: + return fmt.Errorf("unexpected eth packet type: %T", packet) + } +} + +// handleTransactions marks all given transactions as known to the peer +// and performs basic validations. +func handleTransactions(peer *eth.Peer, list []*types.Transaction, directBroadcast bool) error { + seen := make(map[common.Hash]struct{}) + for _, tx := range list { + if tx.Type() == types.BlobTxType { + if directBroadcast { + return errors.New("disallowed broadcast blob transaction") + } else { + // If we receive any blob transactions missing sidecars, or with + // sidecars that don't correspond to the versioned hashes reported + // in the header, disconnect from the sending peer. if tx.BlobTxSidecar() == nil { return errors.New("received sidecar-less blob transaction") } @@ -93,11 +116,18 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { } } } - return h.txFetcher.Enqueue(peer.ID(), *packet, true) - default: - return fmt.Errorf("unexpected eth packet type: %T", packet) + // Check for duplicates. + hash := tx.Hash() + if _, exists := seen[hash]; exists { + return fmt.Errorf("multiple copies of the same hash %v", hash) + } + seen[hash] = struct{}{} + + // Mark as known. + peer.MarkTransaction(hash) } + return nil } // handleBlockAnnounces is invoked from a peer's message handler when it transmits a diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index 76c1882b28..e3aead1caa 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -63,11 +63,19 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { return nil case *eth.TransactionsPacket: - h.txBroadcasts.Send(([]*types.Transaction)(*packet)) + txs, err := packet.Items() + if err != nil { + return err + } + h.txBroadcasts.Send(txs) return nil - case *eth.PooledTransactionsResponse: - h.txBroadcasts.Send(([]*types.Transaction)(*packet)) + case *eth.PooledTransactionsPacket: + txs, err := packet.List.Items() + if err != nil { + return err + } + h.txBroadcasts.Send(txs) return nil default: diff --git a/eth/protocols/eth/dispatcher.go b/eth/protocols/eth/dispatcher.go index cba40596fc..3f78fb4646 100644 --- a/eth/protocols/eth/dispatcher.go +++ b/eth/protocols/eth/dispatcher.go @@ -22,6 +22,7 @@ import ( "time" "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/tracker" ) var ( @@ -47,9 +48,10 @@ type Request struct { sink chan *Response // Channel to deliver the response on cancel chan struct{} // Channel to cancel requests ahead of time - code uint64 // Message code of the request packet - want uint64 // Message code of the response packet - data interface{} // Data content of the request packet + code uint64 // Message code of the request packet + want uint64 // Message code of the response packet + numItems int // Number of requested items + data interface{} // Data content of the request packet Peer string // Demultiplexer if cross-peer requests are batched together Sent time.Time // Timestamp when the request was sent @@ -190,20 +192,31 @@ func (p *Peer) dispatchResponse(res *Response, metadata func() interface{}) erro func (p *Peer) dispatcher() { pending := make(map[uint64]*Request) +loop: for { select { case reqOp := <-p.reqDispatch: req := reqOp.req req.Sent = time.Now() - requestTracker.Track(p.id, p.version, req.code, req.want, req.id) - err := p2p.Send(p.rw, req.code, req.data) - reqOp.fail <- err - - if err == nil { - pending[req.id] = req + treq := tracker.Request{ + ID: req.id, + ReqCode: req.code, + RespCode: req.want, + Size: req.numItems, + } + if err := p.tracker.Track(treq); err != nil { + reqOp.fail <- err + continue loop + } + if err := p2p.Send(p.rw, req.code, req.data); err != nil { + reqOp.fail <- err + continue loop } + pending[req.id] = req + reqOp.fail <- nil + case cancelOp := <-p.reqCancel: // Retrieve the pending request to cancel and short circuit if it // has already been serviced and is not available anymore @@ -220,9 +233,6 @@ func (p *Peer) dispatcher() { res := resOp.res res.Req = pending[res.id] - // Independent if the request exists or not, track this packet - requestTracker.Fulfil(p.id, p.version, res.code, res.id) - switch { case res.Req == nil: // Response arrived with an untracked ID. Since even cancelled @@ -249,6 +259,7 @@ func (p *Peer) dispatcher() { } case <-p.term: + p.tracker.Stop() return } } diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index f4490b7cfd..12f4a4b016 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -25,9 +25,11 @@ import ( "math/big" "math/rand" "os" + "reflect" "testing" "time" + "github.com/davecgh/go-spew/spew" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/beacon" @@ -46,6 +48,7 @@ import ( "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" "github.com/holiman/uint256" ) @@ -365,8 +368,8 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { GetBlockHeadersRequest: tt.query, }) if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket{ - RequestId: 123, - BlockHeadersRequest: headers, + RequestId: 123, + List: encodeRL(headers), }); err != nil { t.Errorf("test %d: headers mismatch: %v", i, err) } @@ -379,7 +382,7 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { RequestId: 456, GetBlockHeadersRequest: tt.query, }) - expected := &BlockHeadersPacket{RequestId: 456, BlockHeadersRequest: headers} + expected := &BlockHeadersPacket{RequestId: 456, List: encodeRL(headers)} if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil { t.Errorf("test %d by hash: headers mismatch: %v", i, err) } @@ -442,7 +445,7 @@ func testGetBlockBodies(t *testing.T, protocol uint) { // Collect the hashes to request, and the response to expect var ( hashes []common.Hash - bodies []*BlockBody + bodies []BlockBody seen = make(map[int64]bool) ) for j := 0; j < tt.random; j++ { @@ -454,7 +457,7 @@ func testGetBlockBodies(t *testing.T, protocol uint) { block := backend.chain.GetBlockByNumber(uint64(num)) hashes = append(hashes, block.Hash()) if len(bodies) < tt.expected { - bodies = append(bodies, &BlockBody{Transactions: block.Transactions(), Uncles: block.Uncles(), Withdrawals: block.Withdrawals()}) + bodies = append(bodies, encodeBody(block)) } break } @@ -464,7 +467,7 @@ func testGetBlockBodies(t *testing.T, protocol uint) { hashes = append(hashes, hash) if tt.available[j] && len(bodies) < tt.expected { block := backend.chain.GetBlockByHash(hash) - bodies = append(bodies, &BlockBody{Transactions: block.Transactions(), Uncles: block.Uncles(), Withdrawals: block.Withdrawals()}) + bodies = append(bodies, encodeBody(block)) } } @@ -474,14 +477,69 @@ func testGetBlockBodies(t *testing.T, protocol uint) { GetBlockBodiesRequest: hashes, }) if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket{ - RequestId: 123, - BlockBodiesResponse: bodies, + RequestId: 123, + List: encodeRL(bodies), }); err != nil { t.Fatalf("test %d: bodies mismatch: %v", i, err) } } } +func encodeBody(b *types.Block) BlockBody { + body := BlockBody{ + Transactions: encodeRL([]*types.Transaction(b.Transactions())), + Uncles: encodeRL(b.Uncles()), + } + if b.Withdrawals() != nil { + wd := encodeRL([]*types.Withdrawal(b.Withdrawals())) + body.Withdrawals = &wd + } + return body +} + +func TestHashBody(t *testing.T) { + key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + signer := types.NewCancunSigner(big.NewInt(1)) + + // create block 1 + header := &types.Header{Number: big.NewInt(11)} + txs := []*types.Transaction{ + types.MustSignNewTx(key, signer, &types.DynamicFeeTx{ + ChainID: big.NewInt(1), + Nonce: 1, + Data: []byte("testing"), + }), + types.MustSignNewTx(key, signer, &types.LegacyTx{ + Nonce: 2, + Data: []byte("testing"), + }), + } + uncles := []*types.Header{{Number: big.NewInt(10)}} + body1 := &types.Body{Transactions: txs, Uncles: uncles} + block1 := types.NewBlock(header, body1, nil, trie.NewStackTrie(nil)) + + // create block 2 (has withdrawals) + header2 := &types.Header{Number: big.NewInt(12)} + body2 := &types.Body{ + Withdrawals: []*types.Withdrawal{{Index: 10}, {Index: 11}}, + } + block2 := types.NewBlock(header2, body2, nil, trie.NewStackTrie(nil)) + + expectedHashes := BlockBodyHashes{ + TransactionRoots: []common.Hash{block1.TxHash(), block2.TxHash()}, + WithdrawalRoots: []common.Hash{common.Hash{}, *block2.Header().WithdrawalsHash}, + UncleHashes: []common.Hash{block1.UncleHash(), block2.UncleHash()}, + } + + // compute hash like protocol handler does + protocolBodies := []BlockBody{encodeBody(block1), encodeBody(block2)} + hashes := hashBodyParts(protocolBodies) + if !reflect.DeepEqual(hashes, expectedHashes) { + t.Errorf("wrong hashes: %s", spew.Sdump(hashes)) + t.Logf("expected: %s", spew.Sdump(expectedHashes)) + } +} + // Tests that the transaction receipts can be retrieved based on hashes. func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) } @@ -533,13 +591,13 @@ func testGetBlockReceipts(t *testing.T, protocol uint) { // Collect the hashes to request, and the response to expect var ( hashes []common.Hash - receipts []*ReceiptList68 + receipts rlp.RawList[*ReceiptList68] ) for i := uint64(0); i <= backend.chain.CurrentBlock().Number.Uint64(); i++ { block := backend.chain.GetBlockByNumber(i) hashes = append(hashes, block.Hash()) trs := backend.chain.GetReceiptsByHash(block.Hash()) - receipts = append(receipts, NewReceiptList68(trs)) + receipts.Append(NewReceiptList68(trs)) } // Send the hash request and verify the response @@ -841,10 +899,18 @@ func testGetPooledTransaction(t *testing.T, blobTx bool) { RequestId: 123, GetPooledTransactionsRequest: []common.Hash{tx.Hash()}, }) - if err := p2p.ExpectMsg(peer.app, PooledTransactionsMsg, PooledTransactionsPacket{ - RequestId: 123, - PooledTransactionsResponse: []*types.Transaction{tx}, + if err := p2p.ExpectMsg(peer.app, PooledTransactionsMsg, &PooledTransactionsPacket{ + RequestId: 123, + List: encodeRL([]*types.Transaction{tx}), }); err != nil { t.Errorf("pooled transaction mismatch: %v", err) } } + +func encodeRL[T any](slice []T) rlp.RawList[T] { + rl, err := rlp.EncodeToRawList(slice) + if err != nil { + panic(err) + } + return rl +} diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index 81c3501cdd..ca9b142e7f 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -17,22 +17,21 @@ package eth import ( + "bytes" "encoding/json" "fmt" - "time" + "math" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/tracker" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" ) -// requestTracker is a singleton tracker for eth/66 and newer request times. -var requestTracker = tracker.New(ProtocolName, 5*time.Minute) - func handleGetBlockHeaders(backend Backend, msg Decoder, peer *Peer) error { // Decode the complex header query var query GetBlockHeadersPacket @@ -244,7 +243,12 @@ func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesRequ continue } sidecars := chain.GetSidecarsByHash(hash) - bodyWithSidecars := &BlockBody{ + bodyWithSidecars := &struct { + Transactions []*types.Transaction + Uncles []*types.Header + Withdrawals []*types.Withdrawal `rlp:"optional"` + Sidecars types.BlobSidecars `rlp:"optional"` + }{ Transactions: body.Transactions, Uncles: body.Uncles, Withdrawals: body.Withdrawals, @@ -411,9 +415,18 @@ func handleBlockHeaders(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(res); err != nil { return err } + tresp := tracker.Response{ID: res.RequestId, MsgCode: BlockHeadersMsg, Size: res.List.Len()} + if err := peer.tracker.Fulfil(tresp); err != nil { + return fmt.Errorf("BlockHeaders: %w", err) + } + headers, err := res.List.Items() + if err != nil { + return fmt.Errorf("BlockHeaders: %w", err) + } + metadata := func() interface{} { - hashes := make([]common.Hash, len(res.BlockHeadersRequest)) - for i, header := range res.BlockHeadersRequest { + hashes := make([]common.Hash, len(headers)) + for i, header := range headers { hashes[i] = header.Hash() } return hashes @@ -421,7 +434,7 @@ func handleBlockHeaders(backend Backend, msg Decoder, peer *Peer) error { return peer.dispatchResponse(&Response{ id: res.RequestId, code: BlockHeadersMsg, - Res: &res.BlockHeadersRequest, + Res: (*BlockHeadersRequest)(&headers), }, metadata) } @@ -431,53 +444,150 @@ func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(res); err != nil { return err } - metadata := func() interface{} { - var ( - txsHashes = make([]common.Hash, len(res.BlockBodiesResponse)) - uncleHashes = make([]common.Hash, len(res.BlockBodiesResponse)) - withdrawalHashes = make([]common.Hash, len(res.BlockBodiesResponse)) - ) - hasher := trie.NewStackTrie(nil) - for i, body := range res.BlockBodiesResponse { - txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) - uncleHashes[i] = types.CalcUncleHash(body.Uncles) - if body.Withdrawals != nil { - withdrawalHashes[i] = types.DeriveSha(types.Withdrawals(body.Withdrawals), hasher) - } - } - return [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes} + + // Check against the request. + length := res.List.Len() + tresp := tracker.Response{ID: res.RequestId, MsgCode: BlockBodiesMsg, Size: length} + if err := peer.tracker.Fulfil(tresp); err != nil { + return fmt.Errorf("BlockBodies: %w", err) } + + // Collect items and dispatch. + items, err := res.List.Items() + if err != nil { + return fmt.Errorf("BlockBodies: %w", err) + } + metadata := func() any { return hashBodyParts(items) } return peer.dispatchResponse(&Response{ id: res.RequestId, code: BlockBodiesMsg, - Res: &res.BlockBodiesResponse, + Res: (*BlockBodiesResponse)(&items), }, metadata) } +// BlockBodyHashes contains the lists of block body part roots for a list of block bodies. +type BlockBodyHashes struct { + TransactionRoots []common.Hash + WithdrawalRoots []common.Hash + UncleHashes []common.Hash +} + +func hashBodyParts(items []BlockBody) BlockBodyHashes { + h := BlockBodyHashes{ + TransactionRoots: make([]common.Hash, len(items)), + WithdrawalRoots: make([]common.Hash, len(items)), + UncleHashes: make([]common.Hash, len(items)), + } + hasher := trie.NewStackTrie(nil) + for i, body := range items { + // txs + txsList := newDerivableRawList(&body.Transactions, writeTxForHash) + h.TransactionRoots[i] = types.DeriveSha(txsList, hasher) + // uncles + if body.Uncles.Len() == 0 { + h.UncleHashes[i] = types.EmptyUncleHash + } else { + h.UncleHashes[i] = crypto.Keccak256Hash(body.Uncles.Bytes()) + } + // withdrawals + if body.Withdrawals != nil { + wdlist := newDerivableRawList(body.Withdrawals, nil) + h.WithdrawalRoots[i] = types.DeriveSha(wdlist, hasher) + } + } + return h +} + +// derivableRawList implements types.DerivableList for a serialized RLP list. +type derivableRawList struct { + data []byte + offsets []uint32 + write func([]byte, *bytes.Buffer) +} + +func newDerivableRawList[T any](list *rlp.RawList[T], write func([]byte, *bytes.Buffer)) *derivableRawList { + dl := derivableRawList{data: list.Content(), write: write} + if dl.write == nil { + // default transform is identity + dl.write = func(b []byte, buf *bytes.Buffer) { buf.Write(b) } + } + // Assert to ensure 32-bit offsets are valid. This can never trigger + // unless a block body component or p2p receipt list is larger than 4GB. + if uint(len(dl.data)) > math.MaxUint32 { + panic("list data too big for derivableRawList") + } + it := list.ContentIterator() + dl.offsets = make([]uint32, list.Len()) + for i := 0; it.Next(); i++ { + dl.offsets[i] = uint32(it.Offset()) + } + return &dl +} + +// Len returns the number of items in the list. +func (dl *derivableRawList) Len() int { + return len(dl.offsets) +} + +// EncodeIndex writes the i'th item to the buffer. +func (dl *derivableRawList) EncodeIndex(i int, buf *bytes.Buffer) { + start := dl.offsets[i] + end := uint32(len(dl.data)) + if i != len(dl.offsets)-1 { + end = dl.offsets[i+1] + } + dl.write(dl.data[start:end], buf) +} + +// writeTxForHash changes a transaction in 'network encoding' into the format used for +// the transactions MPT. +func writeTxForHash(tx []byte, buf *bytes.Buffer) { + k, content, _, _ := rlp.Split(tx) + if k == rlp.List { + buf.Write(tx) // legacy tx + } else { + buf.Write(content) // typed tx + } +} + func handleReceipts[L ReceiptsList](backend Backend, msg Decoder, peer *Peer) error { // A batch of receipts arrived to one of our previous requests res := new(ReceiptsPacket[L]) if err := msg.Decode(res); err != nil { return err } + + tresp := tracker.Response{ID: res.RequestId, MsgCode: ReceiptsMsg, Size: res.List.Len()} + if err := peer.tracker.Fulfil(tresp); err != nil { + return fmt.Errorf("Receipts: %w", err) + } + // Assign temporary hashing buffer to each list item, the same buffer is shared // between all receipt list instances. + receiptLists, err := res.List.Items() + if err != nil { + return fmt.Errorf("Receipts: %w", err) + } buffers := new(receiptListBuffers) - for i := range res.List { - res.List[i].setBuffers(buffers) + for i := range receiptLists { + receiptLists[i].setBuffers(buffers) } metadata := func() interface{} { hasher := trie.NewStackTrie(nil) - hashes := make([]common.Hash, len(res.List)) - for i := range res.List { - hashes[i] = types.DeriveSha(res.List[i], hasher) + hashes := make([]common.Hash, len(receiptLists)) + for i := range receiptLists { + hashes[i] = types.DeriveSha(receiptLists[i].Derivable(), hasher) } return hashes } var enc ReceiptsRLPResponse - for i := range res.List { - enc = append(enc, res.List[i].EncodeForStorage()) + for i := range receiptLists { + encReceipts, err := receiptLists[i].EncodeForStorage() + if err != nil { + return fmt.Errorf("Receipts: invalid list %d: %v", i, err) + } + enc = append(enc, encReceipts) } return peer.dispatchResponse(&Response{ id: res.RequestId, @@ -501,7 +611,7 @@ func handleNewPooledTransactionHashes(backend Backend, msg Decoder, peer *Peer) } // Schedule all the unknown hashes for retrieval for _, hash := range ann.Hashes { - peer.markTransaction(hash) + peer.MarkTransaction(hash) } return backend.Handle(peer, ann) } @@ -549,12 +659,8 @@ func handleTransactions(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(&txs); err != nil { return err } - for i, tx := range txs { - // Validate and mark the remote transaction - if tx == nil { - return fmt.Errorf("Transactions: transaction %d is nil", i) - } - peer.markTransaction(tx.Hash()) + if txs.Len() > maxTransactionAnnouncements { + return fmt.Errorf("too many transactions") } return backend.Handle(peer, &txs) } @@ -564,21 +670,22 @@ func handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error { if !backend.AcceptTxs() { return nil } - // Transactions can be processed, parse all of them and deliver to the pool - var txs PooledTransactionsPacket - if err := msg.Decode(&txs); err != nil { + + // Check against request and decode. + var resp PooledTransactionsPacket + if err := msg.Decode(&resp); err != nil { return err } - for i, tx := range txs.PooledTransactionsResponse { - // Validate and mark the remote transaction - if tx == nil { - return fmt.Errorf("PooledTransactions: transaction %d is nil", i) - } - peer.markTransaction(tx.Hash()) + tresp := tracker.Response{ + ID: resp.RequestId, + MsgCode: PooledTransactionsMsg, + Size: resp.List.Len(), + } + if err := peer.tracker.Fulfil(tresp); err != nil { + return fmt.Errorf("PooledTransactions: %w", err) } - requestTracker.Fulfil(peer.id, peer.version, PooledTransactionsMsg, txs.RequestId) - return backend.Handle(peer, &txs.PooledTransactionsResponse) + return backend.Handle(peer, &resp) } func handleBlockRangeUpdate(backend Backend, msg Decoder, peer *Peer) error { diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go index d7ef06b265..5dc78f1ec0 100644 --- a/eth/protocols/eth/peer.go +++ b/eth/protocols/eth/peer.go @@ -21,6 +21,7 @@ import ( "math/rand" "sync" "sync/atomic" + "time" mapset "github.com/deckarep/golang-set/v2" "github.com/ethereum/go-ethereum/common" @@ -28,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/tracker" "github.com/ethereum/go-ethereum/rlp" ) @@ -61,9 +63,10 @@ const ( // Peer is a collection of relevant information we have about a `eth` peer. type Peer struct { + *p2p.Peer // The embedded P2P package peer + id string // Unique ID for the peer, cached - *p2p.Peer // The embedded P2P package peer rw p2p.MsgReadWriter // Input/output streams for snap version uint // Protocol version negotiated lastRange atomic.Pointer[BlockRangeUpdatePacket] @@ -82,6 +85,7 @@ type Peer struct { txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests txAnnounce chan []common.Hash // Channel used to queue transaction announcement requests + tracker *tracker.Tracker reqDispatch chan *request // Dispatch channel to send requests and track then until fulfillment reqCancel chan *cancel // Dispatch channel to cancel pending requests and untrack them resDispatch chan *response // Dispatch channel to fulfil pending requests and untrack them @@ -94,8 +98,10 @@ type Peer struct { // NewPeer creates a wrapper for a network connection and negotiated protocol // version. func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter, txpool TxPool) *Peer { + cap := p2p.Cap{Name: ProtocolName, Version: version} + id := p.ID().String() peer := &Peer{ - id: p.ID().String(), + id: id, Peer: p, rw: rw, version: version, @@ -105,6 +111,7 @@ func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter, txpool TxPool) *Pe queuedBlockAnns: make(chan *types.Block, maxQueuedBlockAnns), txBroadcast: make(chan []common.Hash), txAnnounce: make(chan []common.Hash), + tracker: tracker.New(cap, id, 5*time.Minute), reqDispatch: make(chan *request), reqCancel: make(chan *cancel), resDispatch: make(chan *response), @@ -202,9 +209,9 @@ func (p *Peer) markBlock(hash common.Hash) { p.knownBlocks.Add(hash) } -// markTransaction marks a transaction as known for the peer, ensuring that it +// MarkTransaction marks a transaction as known for the peer, ensuring that it // will never be propagated to this particular peer. -func (p *Peer) markTransaction(hash common.Hash) { +func (p *Peer) MarkTransaction(hash common.Hash) { // If we reached the memory allowance, drop a previously known transaction hash p.knownTxs.Add(hash) } @@ -376,10 +383,11 @@ func (p *Peer) RequestOneHeader(hash common.Hash, sink chan *Response) (*Request id := rand.Uint64() req := &Request{ - id: id, - sink: sink, - code: GetBlockHeadersMsg, - want: BlockHeadersMsg, + id: id, + sink: sink, + code: GetBlockHeadersMsg, + want: BlockHeadersMsg, + numItems: 1, data: &GetBlockHeadersPacket{ RequestId: id, GetBlockHeadersRequest: &GetBlockHeadersRequest{ @@ -403,10 +411,11 @@ func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, re id := rand.Uint64() req := &Request{ - id: id, - sink: sink, - code: GetBlockHeadersMsg, - want: BlockHeadersMsg, + id: id, + sink: sink, + code: GetBlockHeadersMsg, + want: BlockHeadersMsg, + numItems: amount, data: &GetBlockHeadersPacket{ RequestId: id, GetBlockHeadersRequest: &GetBlockHeadersRequest{ @@ -430,10 +439,11 @@ func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, rever id := rand.Uint64() req := &Request{ - id: id, - sink: sink, - code: GetBlockHeadersMsg, - want: BlockHeadersMsg, + id: id, + sink: sink, + code: GetBlockHeadersMsg, + want: BlockHeadersMsg, + numItems: amount, data: &GetBlockHeadersPacket{ RequestId: id, GetBlockHeadersRequest: &GetBlockHeadersRequest{ @@ -457,10 +467,11 @@ func (p *Peer) RequestBodies(hashes []common.Hash, sink chan *Response) (*Reques id := rand.Uint64() req := &Request{ - id: id, - sink: sink, - code: GetBlockBodiesMsg, - want: BlockBodiesMsg, + id: id, + sink: sink, + code: GetBlockBodiesMsg, + want: BlockBodiesMsg, + numItems: len(hashes), data: &GetBlockBodiesPacket{ RequestId: id, GetBlockBodiesRequest: hashes, @@ -478,10 +489,11 @@ func (p *Peer) RequestReceipts(hashes []common.Hash, sink chan *Response) (*Requ id := rand.Uint64() req := &Request{ - id: id, - sink: sink, - code: GetReceiptsMsg, - want: ReceiptsMsg, + id: id, + sink: sink, + code: GetReceiptsMsg, + want: ReceiptsMsg, + numItems: len(hashes), data: &GetReceiptsPacket{ RequestId: id, GetReceiptsRequest: hashes, @@ -498,7 +510,15 @@ func (p *Peer) RequestTxs(hashes []common.Hash) error { p.Log().Debug("Fetching batch of transactions", "count", len(hashes)) id := rand.Uint64() - requestTracker.Track(p.id, p.version, GetPooledTransactionsMsg, PooledTransactionsMsg, id) + err := p.tracker.Track(tracker.Request{ + ID: id, + ReqCode: GetPooledTransactionsMsg, + RespCode: PooledTransactionsMsg, + Size: len(hashes), + }) + if err != nil { + return err + } return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket{ RequestId: id, GetPooledTransactionsRequest: hashes, diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go index 60d8a2f6b9..84d9428d1f 100644 --- a/eth/protocols/eth/protocol.go +++ b/eth/protocols/eth/protocol.go @@ -49,6 +49,9 @@ var protocolLengths = map[uint]uint64{ETH68: 17, ETH69: 18} // maxMessageSize is the maximum cap on the size of a protocol message. const maxMessageSize = 10 * 1024 * 1024 +// This is the maximum number of transactions in a Transactions message. +const maxTransactionAnnouncements = 5000 + const ( StatusMsg = 0x00 NewBlockHashesMsg = 0x01 @@ -158,7 +161,9 @@ func (p *NewBlockHashesPacket) Unpack() ([]common.Hash, []uint64) { } // TransactionsPacket is the network packet for broadcasting new transactions. -type TransactionsPacket []*types.Transaction +type TransactionsPacket struct { + rlp.RawList[*types.Transaction] +} // GetBlockHeadersRequest represents a block header query. type GetBlockHeadersRequest struct { @@ -216,7 +221,7 @@ type BlockHeadersRequest []*types.Header // BlockHeadersPacket represents a block header response over with request ID wrapping. type BlockHeadersPacket struct { RequestId uint64 - BlockHeadersRequest + List rlp.RawList[*types.Header] } // BlockHeadersRLPResponse represents a block header response, to use when we already @@ -268,14 +273,11 @@ type GetBlockBodiesPacket struct { GetBlockBodiesRequest } -// BlockBodiesResponse is the network packet for block content distribution. -type BlockBodiesResponse []*BlockBody - // BlockBodiesPacket is the network packet for block content distribution with // request ID wrapping. type BlockBodiesPacket struct { RequestId uint64 - BlockBodiesResponse + List rlp.RawList[BlockBody] } // BlockBodiesRLPResponse is used for replying to block body requests, in cases @@ -289,27 +291,15 @@ type BlockBodiesRLPPacket struct { BlockBodiesRLPResponse } +// BlockBodiesResponse is the network packet for block content distribution. +type BlockBodiesResponse []BlockBody + // BlockBody represents the data content of a single block. type BlockBody struct { - Transactions []*types.Transaction // Transactions contained within a block - Uncles []*types.Header // Uncles contained within a block - Withdrawals []*types.Withdrawal `rlp:"optional"` // Withdrawals contained within a block - Sidecars types.BlobSidecars `rlp:"optional"` // Sidecars contained within a block -} - -// Unpack retrieves the transactions and uncles from the range packet and returns -// them in a split flat format that's more consistent with the internal data structures. -func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal, []types.BlobSidecars) { - var ( - txset = make([][]*types.Transaction, len(*p)) - uncleset = make([][]*types.Header, len(*p)) - withdrawalset = make([][]*types.Withdrawal, len(*p)) - sidecarset = make([]types.BlobSidecars, len(*p)) - ) - for i, body := range *p { - txset[i], uncleset[i], withdrawalset[i], sidecarset[i] = body.Transactions, body.Uncles, body.Withdrawals, body.Sidecars - } - return txset, uncleset, withdrawalset, sidecarset + Transactions rlp.RawList[*types.Transaction] + Uncles rlp.RawList[*types.Header] + Withdrawals *rlp.RawList[*types.Withdrawal] `rlp:"optional"` + Sidecars *rlp.RawList[*types.BlobSidecar] `rlp:"optional"` } // GetReceiptsRequest represents a block receipts query. @@ -328,15 +318,15 @@ type ReceiptsResponse []types.Receipts type ReceiptsList interface { *ReceiptList68 | *ReceiptList69 setBuffers(*receiptListBuffers) - EncodeForStorage() rlp.RawValue - types.DerivableList + EncodeForStorage() (rlp.RawValue, error) + Derivable() types.DerivableList } // ReceiptsPacket is the network packet for block receipts distribution with // request ID wrapping. type ReceiptsPacket[L ReceiptsList] struct { RequestId uint64 - List []L + List rlp.RawList[L] } // ReceiptsRLPResponse is used for receipts, when we already have it encoded @@ -371,7 +361,7 @@ type PooledTransactionsResponse []*types.Transaction // with request ID wrapping. type PooledTransactionsPacket struct { RequestId uint64 - PooledTransactionsResponse + List rlp.RawList[*types.Transaction] } // PooledTransactionsRLPResponse is the network packet for transaction distribution, used @@ -427,8 +417,8 @@ func (*NewPooledTransactionHashesPacket) Kind() byte { return NewPooledTransac func (*GetPooledTransactionsRequest) Name() string { return "GetPooledTransactions" } func (*GetPooledTransactionsRequest) Kind() byte { return GetPooledTransactionsMsg } -func (*PooledTransactionsResponse) Name() string { return "PooledTransactions" } -func (*PooledTransactionsResponse) Kind() byte { return PooledTransactionsMsg } +func (*PooledTransactionsPacket) Name() string { return "PooledTransactions" } +func (*PooledTransactionsPacket) Kind() byte { return PooledTransactionsMsg } func (*GetReceiptsRequest) Name() string { return "GetReceipts" } func (*GetReceiptsRequest) Kind() byte { return GetReceiptsMsg } diff --git a/eth/protocols/eth/protocol_test.go b/eth/protocols/eth/protocol_test.go index 8a2559a6c5..e37d72dcd6 100644 --- a/eth/protocols/eth/protocol_test.go +++ b/eth/protocols/eth/protocol_test.go @@ -78,34 +78,34 @@ func TestEmptyMessages(t *testing.T) { for i, msg := range []any{ // Headers GetBlockHeadersPacket{1111, nil}, - BlockHeadersPacket{1111, nil}, // Bodies GetBlockBodiesPacket{1111, nil}, - BlockBodiesPacket{1111, nil}, BlockBodiesRLPPacket{1111, nil}, // Receipts GetReceiptsPacket{1111, nil}, // Transactions GetPooledTransactionsPacket{1111, nil}, - PooledTransactionsPacket{1111, nil}, PooledTransactionsRLPPacket{1111, nil}, // Headers - BlockHeadersPacket{1111, BlockHeadersRequest([]*types.Header{})}, + BlockHeadersPacket{1111, encodeRL([]*types.Header{})}, // Bodies GetBlockBodiesPacket{1111, GetBlockBodiesRequest([]common.Hash{})}, - BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{})}, + BlockBodiesPacket{1111, encodeRL([]BlockBody{})}, BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{})}, // Receipts GetReceiptsPacket{1111, GetReceiptsRequest([]common.Hash{})}, - ReceiptsPacket[*ReceiptList68]{1111, []*ReceiptList68{}}, - ReceiptsPacket[*ReceiptList69]{1111, []*ReceiptList69{}}, + ReceiptsPacket[*ReceiptList68]{1111, encodeRL([]*ReceiptList68{})}, + ReceiptsPacket[*ReceiptList69]{1111, encodeRL([]*ReceiptList69{})}, // Transactions GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest([]common.Hash{})}, - PooledTransactionsPacket{1111, PooledTransactionsResponse([]*types.Transaction{})}, + PooledTransactionsPacket{1111, encodeRL([]*types.Transaction{})}, PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse([]rlp.RawValue{})}, } { - if have, _ := rlp.EncodeToBytes(msg); !bytes.Equal(have, want) { + have, err := rlp.EncodeToBytes(msg) + if err != nil { + t.Errorf("test %d, type %T, error: %v", i, msg, err) + } else if !bytes.Equal(have, want) { t.Errorf("test %d, type %T, have\n\t%x\nwant\n\t%x", i, msg, have, want) } } @@ -116,7 +116,7 @@ func TestMessages(t *testing.T) { // Some basic structs used during testing var ( header *types.Header - blockBody *BlockBody + blockBody BlockBody blockBodyRlp rlp.RawValue txs []*types.Transaction txRlps []rlp.RawValue @@ -150,9 +150,9 @@ func TestMessages(t *testing.T) { } } // init the block body data, both object and rlp form - blockBody = &BlockBody{ - Transactions: txs, - Uncles: []*types.Header{header}, + blockBody = BlockBody{ + Transactions: encodeRL(txs), + Uncles: encodeRL([]*types.Header{header}), } blockBodyRlp, err = rlp.EncodeToBytes(blockBody) if err != nil { @@ -211,7 +211,7 @@ func TestMessages(t *testing.T) { common.FromHex("ca820457c682270f050580"), }, { - BlockHeadersPacket{1111, BlockHeadersRequest{header}}, + BlockHeadersPacket{1111, encodeRL([]*types.Header{header})}, common.FromHex("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, { @@ -219,7 +219,7 @@ func TestMessages(t *testing.T) { common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{blockBody})}, + BlockBodiesPacket{1111, encodeRL([]BlockBody{blockBody})}, common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, { // Identical to non-rlp-shortcut version @@ -231,7 +231,7 @@ func TestMessages(t *testing.T) { common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - ReceiptsPacket[*ReceiptList68]{1111, []*ReceiptList68{NewReceiptList68(receipts)}}, + ReceiptsPacket[*ReceiptList68]{1111, encodeRL([]*ReceiptList68{NewReceiptList68(receipts)})}, common.FromHex("f902e6820457f902e0f902ddf901688082014db9010000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000004000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ffb9016f01f9016b018201bcb9010000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000001000000000000000000000000000000000000000000000000040000000000000000000000000004000000000000000000000000000000000000000000000000000000008000400000000000000000000000000000000000000000000000000000000000000000000000000000040f862f860940000000000000000000000000000000000000022f842a00000000000000000000000000000000000000000000000000000000000005668a0000000000000000000000000000000000000000000000000000000000000977386020f0f0f0608"), }, { @@ -240,7 +240,7 @@ func TestMessages(t *testing.T) { common.FromHex("f902e6820457f902e0f902ddf901688082014db9010000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000004000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ffb9016f01f9016b018201bcb9010000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000001000000000000000000000000000000000000000000000000040000000000000000000000000004000000000000000000000000000000000000000000000000000000008000400000000000000000000000000000000000000000000000000000000000000000000000000000040f862f860940000000000000000000000000000000000000022f842a00000000000000000000000000000000000000000000000000000000000005668a0000000000000000000000000000000000000000000000000000000000000977386020f0f0f0608"), }, { - ReceiptsPacket[*ReceiptList69]{1111, []*ReceiptList69{NewReceiptList69(receipts)}}, + ReceiptsPacket[*ReceiptList69]{1111, encodeRL([]*ReceiptList69{NewReceiptList69(receipts)})}, common.FromHex("f8da820457f8d5f8d3f866808082014df85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100fff86901018201bcf862f860940000000000000000000000000000000000000022f842a00000000000000000000000000000000000000000000000000000000000005668a0000000000000000000000000000000000000000000000000000000000000977386020f0f0f0608"), }, { @@ -248,7 +248,7 @@ func TestMessages(t *testing.T) { common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - PooledTransactionsPacket{1111, PooledTransactionsResponse(txs)}, + PooledTransactionsPacket{1111, encodeRL(txs)}, common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"), }, { diff --git a/eth/protocols/eth/receipt.go b/eth/protocols/eth/receipt.go index 45c4766b17..06956df9f2 100644 --- a/eth/protocols/eth/receipt.go +++ b/eth/protocols/eth/receipt.go @@ -50,8 +50,8 @@ func newReceipt(tr *types.Receipt) Receipt { } // decode68 parses a receipt in the eth/68 network encoding. -func (r *Receipt) decode68(buf *receiptListBuffers, s *rlp.Stream) error { - k, size, err := s.Kind() +func (r *Receipt) decode68(b []byte) error { + k, content, _, err := rlp.Split(b) if err != nil { return err } @@ -59,65 +59,84 @@ func (r *Receipt) decode68(buf *receiptListBuffers, s *rlp.Stream) error { *r = Receipt{} if k == rlp.List { // Legacy receipt. - return r.decodeInnerList(s, false, true) + return r.decodeInnerList(b, false, true) } // Typed receipt. - if size < 2 || size > maxReceiptSize { - return fmt.Errorf("invalid receipt size %d", size) + if len(content) < 2 || len(content) > maxReceiptSize { + return fmt.Errorf("invalid receipt size %d", len(content)) } - buf.tmp.Reset() - buf.tmp.Grow(int(size)) - payload := buf.tmp.Bytes()[:int(size)] - if err := s.ReadBytes(payload); err != nil { - return err - } - r.TxType = payload[0] - s2 := rlp.NewStream(bytes.NewReader(payload[1:]), 0) - return r.decodeInnerList(s2, false, true) + r.TxType = content[0] + return r.decodeInnerList(content[1:], false, true) } // decode69 parses a receipt in the eth/69 network encoding. -func (r *Receipt) decode69(s *rlp.Stream) error { +func (r *Receipt) decode69(b []byte) error { *r = Receipt{} - return r.decodeInnerList(s, true, false) + return r.decodeInnerList(b, true, false) } // decodeDatabase parses a receipt in the basic database encoding. -func (r *Receipt) decodeDatabase(txType byte, s *rlp.Stream) error { +func (r *Receipt) decodeDatabase(txType byte, b []byte) error { *r = Receipt{TxType: txType} - return r.decodeInnerList(s, false, false) + return r.decodeInnerList(b, false, false) } -func (r *Receipt) decodeInnerList(s *rlp.Stream, readTxType, readBloom bool) error { - _, err := s.List() +func (r *Receipt) decodeInnerList(input []byte, readTxType, readBloom bool) error { + input, _, err := rlp.SplitList(input) if err != nil { - return err + return fmt.Errorf("inner list: %v", err) } + + // txType if readTxType { - r.TxType, err = s.Uint8() + var txType uint64 + txType, input, err = rlp.SplitUint64(input) if err != nil { return fmt.Errorf("invalid txType: %w", err) } + if txType > 0x7f { + return fmt.Errorf("invalid txType: too large") + } + r.TxType = byte(txType) } - r.PostStateOrStatus, err = s.Bytes() + + // status + r.PostStateOrStatus, input, err = rlp.SplitString(input) if err != nil { return fmt.Errorf("invalid postStateOrStatus: %w", err) } - r.GasUsed, err = s.Uint64() + if len(r.PostStateOrStatus) > 1 && len(r.PostStateOrStatus) != 32 { + return fmt.Errorf("invalid postStateOrStatus length %d", len(r.PostStateOrStatus)) + } + + // gas + r.GasUsed, input, err = rlp.SplitUint64(input) if err != nil { return fmt.Errorf("invalid gasUsed: %w", err) } + + // bloom if readBloom { - var b types.Bloom - if err := s.ReadBytes(b[:]); err != nil { + var bloomBytes []byte + bloomBytes, input, err = rlp.SplitString(input) + if err != nil { return fmt.Errorf("invalid bloom: %v", err) } + if len(bloomBytes) != types.BloomByteLength { + return fmt.Errorf("invalid bloom length %d", len(bloomBytes)) + } } - r.Logs, err = s.Raw() + + // logs + _, rest, err := rlp.SplitList(input) if err != nil { return fmt.Errorf("invalid logs: %w", err) } - return s.ListEnd() + if len(rest) != 0 { + return fmt.Errorf("junk at end of receipt") + } + r.Logs = input + return nil } // encodeForStorage produces the the storage encoding, i.e. the result matches @@ -223,32 +242,45 @@ func initBuffers(buf **receiptListBuffers) { } // encodeForStorage encodes a list of receipts for the database. -func (buf *receiptListBuffers) encodeForStorage(rs []Receipt) rlp.RawValue { +func (buf *receiptListBuffers) encodeForStorage(rs rlp.RawList[Receipt], decode func([]byte, *Receipt) error) (rlp.RawValue, error) { var out bytes.Buffer w := &buf.enc w.Reset(&out) outer := w.List() - for _, receipts := range rs { - receipts.encodeForStorage(w) + it := rs.ContentIterator() + for it.Next() { + var receipt Receipt + if err := decode(it.Value(), &receipt); err != nil { + return nil, err + } + receipt.encodeForStorage(w) + } + if it.Err() != nil { + return nil, fmt.Errorf("bad list: %v", it.Err()) } w.ListEnd(outer) w.Flush() - return out.Bytes() + return out.Bytes(), nil } // ReceiptList68 is a block receipt list as downloaded by eth/68. // This also implements types.DerivableList for validation purposes. type ReceiptList68 struct { buf *receiptListBuffers - items []Receipt + items rlp.RawList[Receipt] } // NewReceiptList68 creates a receipt list. // This is slow, and exists for testing purposes. func NewReceiptList68(trs []*types.Receipt) *ReceiptList68 { - rl := &ReceiptList68{items: make([]Receipt, len(trs))} - for i, tr := range trs { - rl.items[i] = newReceipt(tr) + rl := new(ReceiptList68) + initBuffers(&rl.buf) + enc := rlp.NewEncoderBuffer(nil) + for _, tr := range trs { + r := newReceipt(tr) + r.encodeForNetwork68(rl.buf, &enc) + rl.items.AppendRaw(enc.ToBytes()) + enc.Reset(nil) } return rl } @@ -266,17 +298,12 @@ func blockReceiptsToNetwork68(blockReceipts, blockBody rlp.RawValue) ([]byte, er buf receiptListBuffers ) blockReceiptIter, _ := rlp.NewListIterator(blockReceipts) - innerReader := bytes.NewReader(nil) - innerStream := rlp.NewStream(innerReader, 0) w := rlp.NewEncoderBuffer(&out) outer := w.List() for i := 0; blockReceiptIter.Next(); i++ { - content := blockReceiptIter.Value() - innerReader.Reset(content) - innerStream.Reset(innerReader, uint64(len(content))) - var r Receipt txType, _ := nextTxType() - if err := r.decodeDatabase(txType, innerStream); err != nil { + var r Receipt + if err := r.decodeDatabase(txType, blockReceiptIter.Value()); err != nil { return nil, fmt.Errorf("invalid database receipt %d: %v", i, err) } r.encodeForNetwork68(&buf, &w) @@ -292,64 +319,51 @@ func (rl *ReceiptList68) setBuffers(buf *receiptListBuffers) { } // EncodeForStorage encodes the receipts for storage into the database. -func (rl *ReceiptList68) EncodeForStorage() rlp.RawValue { +func (rl *ReceiptList68) EncodeForStorage() (rlp.RawValue, error) { initBuffers(&rl.buf) - return rl.buf.encodeForStorage(rl.items) -} - -// Len implements types.DerivableList. -func (rl *ReceiptList68) Len() int { - return len(rl.items) + return rl.buf.encodeForStorage(rl.items, func(data []byte, r *Receipt) error { + return r.decode68(data) + }) } -// EncodeIndex implements types.DerivableList. -func (rl *ReceiptList68) EncodeIndex(i int, out *bytes.Buffer) { +// Derivable turns the receipts into a list that can derive the root hash. +func (rl *ReceiptList68) Derivable() types.DerivableList { initBuffers(&rl.buf) - rl.items[i].encodeForHash(rl.buf, out) + return newDerivableRawList(&rl.items, func(data []byte, outbuf *bytes.Buffer) { + var r Receipt + if r.decode68(data) == nil { + r.encodeForHash(rl.buf, outbuf) + } + }) } // DecodeRLP decodes a list of receipts from the network format. func (rl *ReceiptList68) DecodeRLP(s *rlp.Stream) error { - initBuffers(&rl.buf) - if _, err := s.List(); err != nil { - return err - } - for i := 0; s.MoreDataInList(); i++ { - var item Receipt - err := item.decode68(rl.buf, s) - if err != nil { - return fmt.Errorf("receipt %d: %v", i, err) - } - rl.items = append(rl.items, item) - } - return s.ListEnd() + return rl.items.DecodeRLP(s) } // EncodeRLP encodes the list into the network format of eth/68. -func (rl *ReceiptList68) EncodeRLP(_w io.Writer) error { - initBuffers(&rl.buf) - w := rlp.NewEncoderBuffer(_w) - outer := w.List() - for i := range rl.items { - rl.items[i].encodeForNetwork68(rl.buf, &w) - } - w.ListEnd(outer) - return w.Flush() +func (rl *ReceiptList68) EncodeRLP(w io.Writer) error { + return rl.items.EncodeRLP(w) } // ReceiptList69 is the block receipt list as downloaded by eth/69. // This implements types.DerivableList for validation purposes. type ReceiptList69 struct { buf *receiptListBuffers - items []Receipt + items rlp.RawList[Receipt] } // NewReceiptList69 creates a receipt list. // This is slow, and exists for testing purposes. func NewReceiptList69(trs []*types.Receipt) *ReceiptList69 { - rl := &ReceiptList69{items: make([]Receipt, len(trs))} - for i, tr := range trs { - rl.items[i] = newReceipt(tr) + rl := new(ReceiptList69) + enc := rlp.NewEncoderBuffer(nil) + for _, tr := range trs { + r := newReceipt(tr) + r.encodeForNetwork69(&enc) + rl.items.AppendRaw(enc.ToBytes()) + enc.Reset(nil) } return rl } @@ -360,47 +374,32 @@ func (rl *ReceiptList69) setBuffers(buf *receiptListBuffers) { } // EncodeForStorage encodes the receipts for storage into the database. -func (rl *ReceiptList69) EncodeForStorage() rlp.RawValue { +func (rl *ReceiptList69) EncodeForStorage() (rlp.RawValue, error) { initBuffers(&rl.buf) - return rl.buf.encodeForStorage(rl.items) -} - -// Len implements types.DerivableList. -func (rl *ReceiptList69) Len() int { - return len(rl.items) + return rl.buf.encodeForStorage(rl.items, func(data []byte, r *Receipt) error { + return r.decode69(data) + }) } -// EncodeIndex implements types.DerivableList. -func (rl *ReceiptList69) EncodeIndex(i int, out *bytes.Buffer) { +// Derivable turns the receipts into a list that can derive the root hash. +func (rl *ReceiptList69) Derivable() types.DerivableList { initBuffers(&rl.buf) - rl.items[i].encodeForHash(rl.buf, out) + return newDerivableRawList(&rl.items, func(data []byte, outbuf *bytes.Buffer) { + var r Receipt + if r.decode69(data) == nil { + r.encodeForHash(rl.buf, outbuf) + } + }) } // DecodeRLP decodes a list receipts from the network format. func (rl *ReceiptList69) DecodeRLP(s *rlp.Stream) error { - if _, err := s.List(); err != nil { - return err - } - for i := 0; s.MoreDataInList(); i++ { - var item Receipt - err := item.decode69(s) - if err != nil { - return fmt.Errorf("receipt %d: %v", i, err) - } - rl.items = append(rl.items, item) - } - return s.ListEnd() + return rl.items.DecodeRLP(s) } // EncodeRLP encodes the list into the network format of eth/69. -func (rl *ReceiptList69) EncodeRLP(_w io.Writer) error { - w := rlp.NewEncoderBuffer(_w) - outer := w.List() - for i := range rl.items { - rl.items[i].encodeForNetwork69(&w) - } - w.ListEnd(outer) - return w.Flush() +func (rl *ReceiptList69) EncodeRLP(w io.Writer) error { + return rl.items.EncodeRLP(w) } // blockReceiptsToNetwork69 takes a slice of rlp-encoded receipts, and transactions, diff --git a/eth/protocols/eth/receipt_test.go b/eth/protocols/eth/receipt_test.go index 3c73c07396..39a2728f7f 100644 --- a/eth/protocols/eth/receipt_test.go +++ b/eth/protocols/eth/receipt_test.go @@ -63,6 +63,18 @@ var receiptsTests = []struct { input: []types.ReceiptForStorage{{CumulativeGasUsed: 555, Status: 1, Logs: receiptsTestLogs2}}, txs: []*types.Transaction{types.NewTx(&types.AccessListTx{})}, }, + { + input: []types.ReceiptForStorage{ + {CumulativeGasUsed: 111, PostState: common.HexToHash("0x1111").Bytes(), Logs: receiptsTestLogs1}, + {CumulativeGasUsed: 222, Status: 0, Logs: receiptsTestLogs2}, + {CumulativeGasUsed: 333, Status: 1, Logs: nil}, + }, + txs: []*types.Transaction{ + types.NewTx(&types.LegacyTx{}), + types.NewTx(&types.AccessListTx{}), + types.NewTx(&types.DynamicFeeTx{}), + }, + }, } func init() { @@ -103,7 +115,10 @@ func TestReceiptList69(t *testing.T) { if err := rlp.DecodeBytes(network, &rl); err != nil { t.Fatalf("test[%d]: can't decode network receipts: %v", i, err) } - rlStorageEnc := rl.EncodeForStorage() + rlStorageEnc, err := rl.EncodeForStorage() + if err != nil { + t.Fatalf("test[%d]: error from EncodeForStorage: %v", i, err) + } if !bytes.Equal(rlStorageEnc, canonDB) { t.Fatalf("test[%d]: re-encoded receipts not equal\nhave: %x\nwant: %x", i, rlStorageEnc, canonDB) } @@ -113,7 +128,7 @@ func TestReceiptList69(t *testing.T) { } // compute root hash from ReceiptList69 and compare. - responseHash := types.DeriveSha(&rl, trie.NewStackTrie(nil)) + responseHash := types.DeriveSha(rl.Derivable(), trie.NewStackTrie(nil)) if responseHash != test.root { t.Fatalf("test[%d]: wrong root hash from ReceiptList69\nhave: %v\nwant: %v", i, responseHash, test.root) } @@ -140,7 +155,10 @@ func TestReceiptList68(t *testing.T) { if err := rlp.DecodeBytes(network, &rl); err != nil { t.Fatalf("test[%d]: can't decode network receipts: %v", i, err) } - rlStorageEnc := rl.EncodeForStorage() + rlStorageEnc, err := rl.EncodeForStorage() + if err != nil { + t.Fatalf("test[%d]: error from EncodeForStorage: %v", i, err) + } if !bytes.Equal(rlStorageEnc, canonDB) { t.Fatalf("test[%d]: re-encoded receipts not equal\nhave: %x\nwant: %x", i, rlStorageEnc, canonDB) } @@ -150,7 +168,7 @@ func TestReceiptList68(t *testing.T) { } // compute root hash from ReceiptList68 and compare. - responseHash := types.DeriveSha(&rl, trie.NewStackTrie(nil)) + responseHash := types.DeriveSha(rl.Derivable(), trie.NewStackTrie(nil)) if responseHash != test.root { t.Fatalf("test[%d]: wrong root hash from ReceiptList68\nhave: %v\nwant: %v", i, responseHash, test.root) } diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go index 3249720f90..071a0419fb 100644 --- a/eth/protocols/snap/handler.go +++ b/eth/protocols/snap/handler.go @@ -31,6 +31,8 @@ import ( "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/ethereum/go-ethereum/p2p/tracker" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/triedb/database" @@ -94,6 +96,7 @@ func MakeProtocols(backend Backend) []p2p.Protocol { Length: protocolLengths[version], Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { return backend.RunPeer(NewPeer(version, p, rw), func(peer *Peer) error { + defer peer.Close() return Handle(backend, peer) }) }, @@ -149,7 +152,6 @@ func HandleMessage(backend Backend, peer *Peer) error { // Handle the message depending on its contents switch { case msg.Code == GetAccountRangeMsg: - // Decode the account retrieval request var req GetAccountRangePacket if err := msg.Decode(&req); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) @@ -165,23 +167,40 @@ func HandleMessage(backend Backend, peer *Peer) error { }) case msg.Code == AccountRangeMsg: - // A range of accounts arrived to one of our previous requests - res := new(AccountRangePacket) + res := new(accountRangeInput) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } + + // Check response validity. + if len := res.Proof.Len(); len > 128 { + return fmt.Errorf("AccountRange: invalid proof (length %d)", len) + } + tresp := tracker.Response{ID: res.ID, MsgCode: AccountRangeMsg, Size: len(res.Accounts.Content())} + if err := peer.tracker.Fulfil(tresp); err != nil { + return err + } + + // Decode. + accounts, err := res.Accounts.Items() + if err != nil { + return fmt.Errorf("AccountRange: invalid accounts list: %v", err) + } + proof, err := res.Proof.Items() + if err != nil { + return fmt.Errorf("AccountRange: invalid proof: %v", err) + } + // Ensure the range is monotonically increasing - for i := 1; i < len(res.Accounts); i++ { - if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 { - return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:]) + for i := 1; i < len(accounts); i++ { + if bytes.Compare(accounts[i-1].Hash[:], accounts[i].Hash[:]) >= 0 { + return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, accounts[i-1].Hash[:], i, accounts[i].Hash[:]) } } - requestTracker.Fulfil(peer.id, peer.version, AccountRangeMsg, res.ID) - return backend.Handle(peer, res) + return backend.Handle(peer, &AccountRangePacket{res.ID, accounts, proof}) case msg.Code == GetStorageRangesMsg: - // Decode the storage retrieval request var req GetStorageRangesPacket if err := msg.Decode(&req); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) @@ -197,25 +216,42 @@ func HandleMessage(backend Backend, peer *Peer) error { }) case msg.Code == StorageRangesMsg: - // A range of storage slots arrived to one of our previous requests - res := new(StorageRangesPacket) + res := new(storageRangesInput) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } + + // Check response validity. + if len := res.Proof.Len(); len > 128 { + return fmt.Errorf("StorageRangesMsg: invalid proof (length %d)", len) + } + tresp := tracker.Response{ID: res.ID, MsgCode: StorageRangesMsg, Size: len(res.Slots.Content())} + if err := peer.tracker.Fulfil(tresp); err != nil { + return fmt.Errorf("StorageRangesMsg: %w", err) + } + + // Decode. + slotLists, err := res.Slots.Items() + if err != nil { + return fmt.Errorf("AccountRange: invalid accounts list: %v", err) + } + proof, err := res.Proof.Items() + if err != nil { + return fmt.Errorf("AccountRange: invalid proof: %v", err) + } + // Ensure the ranges are monotonically increasing - for i, slots := range res.Slots { + for i, slots := range slotLists { for j := 1; j < len(slots); j++ { if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 { return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:]) } } } - requestTracker.Fulfil(peer.id, peer.version, StorageRangesMsg, res.ID) - return backend.Handle(peer, res) + return backend.Handle(peer, &StorageRangesPacket{res.ID, slotLists, proof}) case msg.Code == GetByteCodesMsg: - // Decode bytecode retrieval request var req GetByteCodesPacket if err := msg.Decode(&req); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) @@ -230,17 +266,25 @@ func HandleMessage(backend Backend, peer *Peer) error { }) case msg.Code == ByteCodesMsg: - // A batch of byte codes arrived to one of our previous requests - res := new(ByteCodesPacket) + res := new(byteCodesInput) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - requestTracker.Fulfil(peer.id, peer.version, ByteCodesMsg, res.ID) - return backend.Handle(peer, res) + length := res.Codes.Len() + tresp := tracker.Response{ID: res.ID, MsgCode: ByteCodesMsg, Size: length} + if err := peer.tracker.Fulfil(tresp); err != nil { + return fmt.Errorf("ByteCodes: %w", err) + } + + codes, err := res.Codes.Items() + if err != nil { + return fmt.Errorf("ByteCodes: %w", err) + } + + return backend.Handle(peer, &ByteCodesPacket{res.ID, codes}) case msg.Code == GetTrieNodesMsg: - // Decode trie node retrieval request var req GetTrieNodesPacket if err := msg.Decode(&req); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) @@ -257,14 +301,21 @@ func HandleMessage(backend Backend, peer *Peer) error { }) case msg.Code == TrieNodesMsg: - // A batch of trie nodes arrived to one of our previous requests - res := new(TrieNodesPacket) + res := new(trieNodesInput) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - requestTracker.Fulfil(peer.id, peer.version, TrieNodesMsg, res.ID) - return backend.Handle(peer, res) + tresp := tracker.Response{ID: res.ID, MsgCode: TrieNodesMsg, Size: res.Nodes.Len()} + if err := peer.tracker.Fulfil(tresp); err != nil { + return fmt.Errorf("TrieNodes: %w", err) + } + nodes, err := res.Nodes.Items() + if err != nil { + return fmt.Errorf("TrieNodes: %w", err) + } + + return backend.Handle(peer, &TrieNodesPacket{res.ID, nodes}) default: return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code) @@ -512,21 +563,32 @@ func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, s if reader == nil { reader, _ = triedb.StateReader(req.Root) } + // Retrieve trie nodes until the packet size limit is reached var ( - nodes [][]byte - bytes uint64 - loads int // Trie hash expansions to count database reads + outerIt = req.Paths.ContentIterator() + nodes [][]byte + bytes uint64 + loads int // Trie hash expansions to count database reads ) - for _, pathset := range req.Paths { - switch len(pathset) { + for outerIt.Next() { + innerIt, err := rlp.NewListIterator(outerIt.Value()) + if err != nil { + return nodes, err + } + + switch innerIt.Count() { case 0: // Ensure we penalize invalid requests return nil, fmt.Errorf("%w: zero-item pathset requested", errBadRequest) case 1: // If we're only retrieving an account trie node, fetch it directly - blob, resolved, err := accTrie.GetNode(pathset[0]) + accKey := nextBytes(&innerIt) + if accKey == nil { + return nodes, fmt.Errorf("%w: invalid account node request", errBadRequest) + } + blob, resolved, err := accTrie.GetNode(accKey) loads += resolved // always account database reads, even for failures if err != nil { break @@ -535,33 +597,41 @@ func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, s bytes += uint64(len(blob)) default: - var stRoot common.Hash - // Storage slots requested, open the storage trie and retrieve from there + accKey := nextBytes(&innerIt) + if accKey == nil { + return nodes, fmt.Errorf("%w: invalid account storage request", errBadRequest) + } + var stRoot common.Hash if reader == nil { // We don't have the requested state snapshotted yet (or it is stale), // but can look up the account via the trie instead. - account, err := accTrie.GetAccountByHash(common.BytesToHash(pathset[0])) + account, err := accTrie.GetAccountByHash(common.BytesToHash(accKey)) loads += 8 // We don't know the exact cost of lookup, this is an estimate if err != nil || account == nil { break } stRoot = account.Root } else { - account, err := reader.Account(common.BytesToHash(pathset[0])) + account, err := reader.Account(common.BytesToHash(accKey)) loads++ // always account database reads, even for failures if err != nil || account == nil { break } stRoot = common.BytesToHash(account.Root) } - id := trie.StorageTrieID(req.Root, common.BytesToHash(pathset[0]), stRoot) + + id := trie.StorageTrieID(req.Root, common.BytesToHash(accKey), stRoot) stTrie, err := trie.NewStateTrie(id, triedb) loads++ // always account database reads, even for failures if err != nil { break } - for _, path := range pathset[1:] { + for innerIt.Next() { + path, _, err := rlp.SplitString(innerIt.Value()) + if err != nil { + return nil, fmt.Errorf("%w: invalid storage key: %v", errBadRequest, err) + } blob, resolved, err := stTrie.GetNode(path) loads += resolved // always account database reads, even for failures if err != nil { @@ -584,6 +654,17 @@ func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, s return nodes, nil } +func nextBytes(it *rlp.Iterator) []byte { + if !it.Next() { + return nil + } + content, _, err := rlp.SplitString(it.Value()) + if err != nil { + return nil + } + return content +} + // NodeInfo represents a short summary of the `snap` sub-protocol metadata // known about the host peer. type NodeInfo struct{} diff --git a/eth/protocols/snap/peer.go b/eth/protocols/snap/peer.go index c57931678c..0b96de4158 100644 --- a/eth/protocols/snap/peer.go +++ b/eth/protocols/snap/peer.go @@ -17,9 +17,13 @@ package snap import ( + "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/tracker" + "github.com/ethereum/go-ethereum/rlp" ) // Peer is a collection of relevant information we have about a `snap` peer. @@ -29,6 +33,7 @@ type Peer struct { *p2p.Peer // The embedded P2P package peer rw p2p.MsgReadWriter // Input/output streams for snap version uint // Protocol version negotiated + tracker *tracker.Tracker logger log.Logger // Contextual logger with the peer id injected } @@ -36,22 +41,26 @@ type Peer struct { // NewPeer creates a wrapper for a network connection and negotiated protocol // version. func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) *Peer { + cap := p2p.Cap{Name: ProtocolName, Version: version} id := p.ID().String() return &Peer{ id: id, Peer: p, rw: rw, version: version, + tracker: tracker.New(cap, id, 1*time.Minute), logger: log.New("peer", id[:8]), } } // NewFakePeer creates a fake snap peer without a backing p2p peer, for testing purposes. func NewFakePeer(version uint, id string, rw p2p.MsgReadWriter) *Peer { + cap := p2p.Cap{Name: ProtocolName, Version: version} return &Peer{ id: id, rw: rw, version: version, + tracker: tracker.New(cap, id, 1*time.Minute), logger: log.New("peer", id[:8]), } } @@ -71,63 +80,99 @@ func (p *Peer) Log() log.Logger { return p.logger } +// Close releases resources associated with the peer. +func (p *Peer) Close() { + p.tracker.Stop() +} + // RequestAccountRange fetches a batch of accounts rooted in a specific account // trie, starting with the origin. -func (p *Peer) RequestAccountRange(id uint64, root common.Hash, origin, limit common.Hash, bytes uint64) error { +func (p *Peer) RequestAccountRange(id uint64, root common.Hash, origin, limit common.Hash, bytes int) error { p.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes)) - requestTracker.Track(p.id, p.version, GetAccountRangeMsg, AccountRangeMsg, id) + err := p.tracker.Track(tracker.Request{ + ReqCode: GetAccountRangeMsg, + RespCode: AccountRangeMsg, + ID: id, + Size: 2 * bytes, + }) + if err != nil { + return err + } return p2p.Send(p.rw, GetAccountRangeMsg, &GetAccountRangePacket{ ID: id, Root: root, Origin: origin, Limit: limit, - Bytes: bytes, + Bytes: uint64(bytes), }) } // RequestStorageRanges fetches a batch of storage slots belonging to one or more // accounts. If slots from only one account is requested, an origin marker may also // be used to retrieve from there. -func (p *Peer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { +func (p *Peer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes int) error { if len(accounts) == 1 && origin != nil { p.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes)) } else { p.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes)) } - requestTracker.Track(p.id, p.version, GetStorageRangesMsg, StorageRangesMsg, id) + + p.tracker.Track(tracker.Request{ + ReqCode: GetStorageRangesMsg, + RespCode: StorageRangesMsg, + ID: id, + Size: 2 * bytes, + }) return p2p.Send(p.rw, GetStorageRangesMsg, &GetStorageRangesPacket{ ID: id, Root: root, Accounts: accounts, Origin: origin, Limit: limit, - Bytes: bytes, + Bytes: uint64(bytes), }) } // RequestByteCodes fetches a batch of bytecodes by hash. -func (p *Peer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error { +func (p *Peer) RequestByteCodes(id uint64, hashes []common.Hash, bytes int) error { p.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes)) - requestTracker.Track(p.id, p.version, GetByteCodesMsg, ByteCodesMsg, id) + err := p.tracker.Track(tracker.Request{ + ReqCode: GetByteCodesMsg, + RespCode: ByteCodesMsg, + ID: id, + Size: len(hashes), // ByteCodes is limited by the length of the hash list. + }) + if err != nil { + return err + } return p2p.Send(p.rw, GetByteCodesMsg, &GetByteCodesPacket{ ID: id, Hashes: hashes, - Bytes: bytes, + Bytes: uint64(bytes), }) } // RequestTrieNodes fetches a batch of account or storage trie nodes rooted in -// a specific state trie. -func (p *Peer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error { +// a specific state trie. The `count` is the total count of paths being requested. +func (p *Peer) RequestTrieNodes(id uint64, root common.Hash, count int, paths []TrieNodePathSet, bytes int) error { p.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes)) - requestTracker.Track(p.id, p.version, GetTrieNodesMsg, TrieNodesMsg, id) + err := p.tracker.Track(tracker.Request{ + ReqCode: GetTrieNodesMsg, + RespCode: TrieNodesMsg, + ID: id, + Size: count, // TrieNodes is limited by number of items. + }) + if err != nil { + return err + } + encPaths, _ := rlp.EncodeToRawList(paths) return p2p.Send(p.rw, GetTrieNodesMsg, &GetTrieNodesPacket{ ID: id, Root: root, - Paths: paths, - Bytes: bytes, + Paths: encPaths, + Bytes: uint64(bytes), }) } diff --git a/eth/protocols/snap/protocol.go b/eth/protocols/snap/protocol.go index 0db206b081..25fe25822b 100644 --- a/eth/protocols/snap/protocol.go +++ b/eth/protocols/snap/protocol.go @@ -78,6 +78,12 @@ type GetAccountRangePacket struct { Bytes uint64 // Soft limit at which to stop returning data } +type accountRangeInput struct { + ID uint64 // ID of the request this is a response for + Accounts rlp.RawList[*AccountData] // List of consecutive accounts from the trie + Proof rlp.RawList[[]byte] // List of trie nodes proving the account range +} + // AccountRangePacket represents an account query response. type AccountRangePacket struct { ID uint64 // ID of the request this is a response for @@ -123,6 +129,12 @@ type GetStorageRangesPacket struct { Bytes uint64 // Soft limit at which to stop returning data } +type storageRangesInput struct { + ID uint64 // ID of the request this is a response for + Slots rlp.RawList[[]*StorageData] // Lists of consecutive storage slots for the requested accounts + Proof rlp.RawList[[]byte] // Merkle proofs for the *last* slot range, if it's incomplete +} + // StorageRangesPacket represents a storage slot query response. type StorageRangesPacket struct { ID uint64 // ID of the request this is a response for @@ -161,6 +173,11 @@ type GetByteCodesPacket struct { Bytes uint64 // Soft limit at which to stop returning data } +type byteCodesInput struct { + ID uint64 // ID of the request this is a response for + Codes rlp.RawList[[]byte] // Requested contract bytecodes +} + // ByteCodesPacket represents a contract bytecode query response. type ByteCodesPacket struct { ID uint64 // ID of the request this is a response for @@ -169,10 +186,10 @@ type ByteCodesPacket struct { // GetTrieNodesPacket represents a state trie node query. type GetTrieNodesPacket struct { - ID uint64 // Request ID to match up responses with - Root common.Hash // Root hash of the account trie to serve - Paths []TrieNodePathSet // Trie node hashes to retrieve the nodes for - Bytes uint64 // Soft limit at which to stop returning data + ID uint64 // Request ID to match up responses with + Root common.Hash // Root hash of the account trie to serve + Paths rlp.RawList[TrieNodePathSet] // Trie node hashes to retrieve the nodes for + Bytes uint64 // Soft limit at which to stop returning data } // TrieNodePathSet is a list of trie node paths to retrieve. A naive way to @@ -187,6 +204,11 @@ type GetTrieNodesPacket struct { // that a slot is accessed before the account path is fully expanded. type TrieNodePathSet [][]byte +type trieNodesInput struct { + ID uint64 // ID of the request this is a response for + Nodes rlp.RawList[[]byte] // Requested state trie nodes +} + // TrieNodesPacket represents a state trie node query response. type TrieNodesPacket struct { ID uint64 // ID of the request this is a response for diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index ec740b9eb0..8263932f2c 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -413,19 +413,19 @@ type SyncPeer interface { // RequestAccountRange fetches a batch of accounts rooted in a specific account // trie, starting with the origin. - RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error + RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes int) error // RequestStorageRanges fetches a batch of storage slots belonging to one or // more accounts. If slots from only one account is requested, an origin marker // may also be used to retrieve from there. - RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error + RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes int) error // RequestByteCodes fetches a batch of bytecodes by hash. - RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error + RequestByteCodes(id uint64, hashes []common.Hash, bytes int) error // RequestTrieNodes fetches a batch of account or storage trie nodes rooted in // a specific state trie. - RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error + RequestTrieNodes(id uint64, root common.Hash, count int, paths []TrieNodePathSet, bytes int) error // Log retrieves the peer's own contextual logger. Log() log.Logger @@ -1105,7 +1105,7 @@ func (s *Syncer) assignAccountTasks(success chan *accountResponse, fail chan *ac if cap < minRequestSize { // Don't bother with peers below a bare minimum performance cap = minRequestSize } - if err := peer.RequestAccountRange(reqid, root, req.origin, req.limit, uint64(cap)); err != nil { + if err := peer.RequestAccountRange(reqid, root, req.origin, req.limit, cap); err != nil { peer.Log().Debug("Failed to request account range", "err", err) s.scheduleRevertAccountRequest(req) } @@ -1363,7 +1363,7 @@ func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *st if subtask != nil { origin, limit = req.origin[:], req.limit[:] } - if err := peer.RequestStorageRanges(reqid, root, accounts, origin, limit, uint64(cap)); err != nil { + if err := peer.RequestStorageRanges(reqid, root, accounts, origin, limit, cap); err != nil { log.Debug("Failed to request storage", "err", err) s.scheduleRevertStorageRequest(req) } @@ -1497,7 +1497,7 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai defer s.pend.Done() // Attempt to send the remote request and revert if it fails - if err := peer.RequestTrieNodes(reqid, root, pathsets, maxRequestSize); err != nil { + if err := peer.RequestTrieNodes(reqid, root, len(paths), pathsets, maxRequestSize); err != nil { log.Debug("Failed to request trienode healers", "err", err) s.scheduleRevertTrienodeHealRequest(req) } diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index 713b358ff8..9530823cdb 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -119,10 +119,10 @@ func BenchmarkHashing(b *testing.B) { } type ( - accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error - storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error - trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error - codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error + accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap int) error + storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max int) error + trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap int) error + codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max int) error ) type testPeer struct { @@ -182,21 +182,21 @@ Trienode requests: %d `, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests) } -func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error { +func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes int) error { t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes)) t.nAccountRequests++ go t.accountRequestHandler(t, id, root, origin, limit, bytes) return nil } -func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error { +func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, count int, paths []TrieNodePathSet, bytes int) error { t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes)) t.nTrienodeRequests++ go t.trieRequestHandler(t, id, root, paths, bytes) return nil } -func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { +func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes int) error { t.nStorageRequests++ if len(accounts) == 1 && origin != nil { t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes)) @@ -207,7 +207,7 @@ func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts [] return nil } -func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error { +func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes int) error { t.nBytecodeRequests++ t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes)) go t.codeRequestHandler(t, id, hashes, bytes) @@ -215,7 +215,7 @@ func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint6 } // defaultTrieRequestHandler is a well-behaving handler for trie healing requests -func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error { +func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap int) error { // Pass the response var nodes [][]byte for _, pathset := range paths { @@ -244,7 +244,7 @@ func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, } // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests -func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { +func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap int) error { keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap) if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil { t.test.Errorf("Remote side rejected our delivery: %v", err) @@ -254,8 +254,8 @@ func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, orig return nil } -func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) { - var size uint64 +func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap int) (keys []common.Hash, vals [][]byte, proofs [][]byte) { + var size int if limit == (common.Hash{}) { limit = common.MaxHash } @@ -266,7 +266,7 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H if bytes.Compare(origin[:], entry.k) <= 0 { keys = append(keys, common.BytesToHash(entry.k)) vals = append(vals, entry.v) - size += uint64(32 + len(entry.v)) + size += 32 + len(entry.v) } // If we've exceeded the request threshold, abort if bytes.Compare(entry.k, limit[:]) >= 0 { @@ -290,7 +290,7 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H } // defaultStorageRequestHandler is a well-behaving storage request handler -func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error { +func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max int) error { hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max) if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil { t.test.Errorf("Remote side rejected our delivery: %v", err) @@ -299,7 +299,7 @@ func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Has return nil } -func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { +func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max int) error { var bytecodes [][]byte for _, h := range hashes { bytecodes = append(bytecodes, getCodeByHash(h)) @@ -311,8 +311,8 @@ func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max return nil } -func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { - var size uint64 +func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max int) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { + var size int for _, account := range accounts { // The first account might start from a different origin and end sooner var originHash common.Hash @@ -338,7 +338,7 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm } keys = append(keys, common.BytesToHash(entry.k)) vals = append(vals, entry.v) - size += uint64(32 + len(entry.v)) + size += 32 + len(entry.v) if bytes.Compare(entry.k, limitHash[:]) >= 0 { break } @@ -377,8 +377,8 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm // createStorageRequestResponseAlwaysProve tests a cornercase, where the peer always // supplies the proof for the last account, even if it is 'complete'. -func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { - var size uint64 +func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max int) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { + var size int max = max * 3 / 4 var origin common.Hash @@ -395,7 +395,7 @@ func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, acco } keys = append(keys, common.BytesToHash(entry.k)) vals = append(vals, entry.v) - size += uint64(32 + len(entry.v)) + size += 32 + len(entry.v) if size > max { exit = true } @@ -433,34 +433,34 @@ func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, acco } // emptyRequestAccountRangeFn is a rejects AccountRangeRequests -func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { +func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap int) error { t.remote.OnAccounts(t, requestId, nil, nil, nil) return nil } -func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { +func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap int) error { return nil } -func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error { +func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap int) error { t.remote.OnTrieNodes(t, requestId, nil) return nil } -func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error { +func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap int) error { return nil } -func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { +func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max int) error { t.remote.OnStorage(t, requestId, nil, nil, nil) return nil } -func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { +func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max int) error { return nil } -func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { +func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max int) error { hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max) if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil { t.test.Errorf("Remote side rejected our delivery: %v", err) @@ -475,7 +475,7 @@ func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common. // return nil //} -func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { +func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max int) error { var bytecodes [][]byte for _, h := range hashes { // Send back the hashes @@ -489,7 +489,7 @@ func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max return nil } -func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { +func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max int) error { var bytecodes [][]byte for _, h := range hashes[:1] { bytecodes = append(bytecodes, getCodeByHash(h)) @@ -503,11 +503,11 @@ func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max } // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small -func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { +func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max int) error { return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500) } -func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { +func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap int) error { return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500) } @@ -515,7 +515,7 @@ func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Ha // return defaultAccountRequestHandler(t, requestId-1, root, origin, 500) //} -func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { +func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap int) error { hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap) if len(proofs) > 0 { proofs = proofs[1:] @@ -529,7 +529,7 @@ func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Has } // corruptStorageRequestHandler doesn't provide good proofs -func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { +func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max int) error { hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max) if len(proofs) > 0 { proofs = proofs[1:] @@ -542,7 +542,7 @@ func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Has return nil } -func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { +func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max int) error { hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max) if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil { t.logger.Info("remote error on delivery (as expected)", "error", err) @@ -577,7 +577,7 @@ func testSyncBloatedProof(t *testing.T, scheme string) { source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems - source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap int) error { var ( keys []common.Hash vals [][]byte @@ -1165,7 +1165,7 @@ func testSyncNoStorageAndOneCodeCappedPeer(t *testing.T, scheme string) { var counter int syncer := setupSyncer( nodeScheme, - mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { + mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max int) error { counter++ return cappedCodeRequestHandler(t, id, hashes, max) }), @@ -1432,7 +1432,7 @@ func testSyncWithUnevenStorage(t *testing.T, scheme string) { source.accountValues = accounts source.setStorageTries(storageTries) source.storageValues = storageElems - source.storageRequestHandler = func(t *testPeer, reqId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { + source.storageRequestHandler = func(t *testPeer, reqId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max int) error { return defaultStorageRequestHandler(t, reqId, root, accounts, origin, limit, 128) // retrieve storage in large mode } return source diff --git a/eth/protocols/snap/tracker.go b/eth/protocols/snap/tracker.go deleted file mode 100644 index 2cf59cc23a..0000000000 --- a/eth/protocols/snap/tracker.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2021 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package snap - -import ( - "time" - - "github.com/ethereum/go-ethereum/p2p/tracker" -) - -// requestTracker is a singleton tracker for request times. -var requestTracker = tracker.New(ProtocolName, time.Minute) diff --git a/go.mod b/go.mod index 46fbfe05e2..b5c17f5951 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.18.45 github.com/aws/aws-sdk-go-v2/credentials v1.13.43 github.com/aws/aws-sdk-go-v2/service/route53 v1.30.2 + github.com/bits-and-blooms/bitset v1.20.0 github.com/bnb-chain/fastssz v0.1.2 github.com/bnb-chain/ics23 v0.1.0 github.com/cespare/cp v1.1.1 @@ -17,6 +18,7 @@ require ( github.com/cockroachdb/pebble v1.1.5 github.com/cometbft/cometbft v0.37.0 github.com/consensys/gnark-crypto v0.18.1 + github.com/cosmos/iavl v0.12.0 github.com/crate-crypto/go-eth-kzg v1.4.0 github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a github.com/crate-crypto/go-kzg-4844 v1.1.0 @@ -73,22 +75,20 @@ require ( github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/tendermint/go-amino v0.14.1 - github.com/tendermint/iavl v0.12.0 github.com/tendermint/tendermint v0.31.15 github.com/tidwall/wal v1.1.7 github.com/urfave/cli/v2 v2.27.5 github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.1.3 - github.com/willf/bitset v1.1.3 go.uber.org/automaxprocs v1.5.2 go.uber.org/goleak v1.3.0 - golang.org/x/crypto v0.45.0 + golang.org/x/crypto v0.46.0 golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 - golang.org/x/sync v0.18.0 + golang.org/x/sync v0.19.0 golang.org/x/sys v0.40.0 - golang.org/x/text v0.31.0 + golang.org/x/text v0.32.0 golang.org/x/time v0.9.0 - golang.org/x/tools v0.38.0 - google.golang.org/protobuf v1.36.3 + golang.org/x/tools v0.39.0 + google.golang.org/protobuf v1.36.10 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v3 v3.0.1 gotest.tools v2.2.0+incompatible @@ -118,7 +118,6 @@ require ( github.com/aws/smithy-go v1.15.0 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.20.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash v1.1.0 // indirect @@ -309,14 +308,14 @@ require ( go.uber.org/mock v0.5.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/mod v0.29.0 // indirect - golang.org/x/net v0.47.0 // indirect - golang.org/x/oauth2 v0.27.0 // indirect - golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 // indirect - golang.org/x/term v0.37.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect - google.golang.org/grpc v1.69.4 // indirect + golang.org/x/mod v0.30.0 // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/oauth2 v0.34.0 // indirect + golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect + golang.org/x/term v0.38.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect + google.golang.org/grpc v1.79.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/apimachinery v0.30.4 // indirect diff --git a/go.sum b/go.sum index bf83181536..6d2d09194c 100644 --- a/go.sum +++ b/go.sum @@ -221,6 +221,8 @@ github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d h1:49RLWk1j44Xu4fj github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= github.com/cosmos/gogoproto v1.4.1 h1:WoyH+0/jbCTzpKNvyav5FL1ZTWsp1im1MxEpJEzKUB8= github.com/cosmos/gogoproto v1.4.1/go.mod h1:Ac9lzL4vFpBMcptJROQ6dQ4M3pOEK5Z/l0Q9p+LoCr4= +github.com/cosmos/iavl v0.12.0 h1:lcK28rWK6Q/+iKQlMksi/pKnqQZLIdQocSfazSR4fKE= +github.com/cosmos/iavl v0.12.0/go.mod h1:5paARKGkQVKis4t9KbBnXTKdzl+sdDAhR3rIp+iAZzs= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= @@ -1114,8 +1116,6 @@ github.com/templexxx/cpufeat v0.0.0-20180724012125-cef66df7f161/go.mod h1:wM7WEv github.com/templexxx/xor v0.0.0-20191217153810-f85b25db303b/go.mod h1:5XA7W9S6mni3h5uvOC75dA3m9CCCaS83lltmc0ukdi4= github.com/tendermint/go-amino v0.14.1 h1:o2WudxNfdLNBwMyl2dqOJxiro5rfrEaU0Ugs6offJMk= github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYMbR92AaJVmKso= -github.com/tendermint/iavl v0.12.0 h1:xcaFAr+ycqCj7WN1RzL2EfcBioRDOHcU1oWcg83K028= -github.com/tendermint/iavl v0.12.0/go.mod h1:EoKMMv++tDOL5qKKVnoIqtVPshRrEPeJ0WsgDOLAauM= github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e h1:cR8/SYRgyQCt5cNCMniB/ZScMkhI9nk8U5C7SbISXjo= github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e/go.mod h1:Tu4lItkATkonrYuvtVjG0/rhy15qrNGNTjPdaphtZ/8= github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo= @@ -1159,8 +1159,6 @@ github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.1.3 h1:SxrDVSr+oXuT1 github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.1.3/go.mod h1:qiIimacW5NhVRy8o+YxWo9YrecXqDAKKbL0+sOa0SJ4= github.com/wealdtech/go-eth2-wallet-types/v2 v2.8.2 h1:264/meVYWt1wFw6Mtn+xwkZkXjID42gNra4rycoiDXI= github.com/wealdtech/go-eth2-wallet-types/v2 v2.8.2/go.mod h1:k6kmiKWSWBTd4OxFifTEkPaBLhZspnO2KFD5XJY9nqg= -github.com/willf/bitset v1.1.3 h1:ekJIKh6+YbUIVt9DfNbkR5d6aFcFTLDRyJNAACURBg8= -github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= @@ -1267,8 +1265,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1308,8 +1306,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1364,8 +1362,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1377,8 +1375,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1393,8 +1391,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1483,8 +1481,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU= -golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1493,8 +1491,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= -golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1509,8 +1507,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1580,12 +1578,14 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= -golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -1661,10 +1661,10 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= -google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1688,8 +1688,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= -google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= +google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1700,8 +1700,8 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= -google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a/go.mod h1:KF9sEfUPAXdG8Oev9e99iLGnl2uJMjc5B+4y3O7x610= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 46a2de9f49..52ddcce568 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1383,6 +1383,7 @@ func (api *BlockChainAPI) replay(ctx context.Context, block *types.Block, accoun if posa, ok := api.b.Engine().(consensus.PoSA); ok { if isSystem, _ := posa.IsSystemTransaction(tx, block.Header()); isSystem { + msg.SkipTransactionChecks = true balance := statedb.GetBalance(consensus.SystemAddress) if balance.Cmp(common.U2560) > 0 { statedb.SetBalance(consensus.SystemAddress, uint256.NewInt(0), tracing.BalanceChangeUnspecified) diff --git a/miner/bid_simulator.go b/miner/bid_simulator.go index c46992ce1c..ec5ba0eae0 100644 --- a/miner/bid_simulator.go +++ b/miner/bid_simulator.go @@ -1079,6 +1079,12 @@ func (r *BidRuntime) commitTransaction(chain *core.BlockChain, chainConfig *para if sc.Version == types.BlobSidecarVersion1 { return errors.New("cell proof is not supported yet") } + + // Validate blob sidecar commitment hashes and KZG proofs. + if err := txpool.ValidateBlobTx(tx, env.header, nil); err != nil { + return err + } + // Checking against blob gas limit: It's kind of ugly to perform this check here, but there // isn't really a better place right now. The blob gas limit is checked at block validation time // and not during execution. This means core.ApplyTransaction will not return an error if the diff --git a/oss-fuzz.sh b/oss-fuzz.sh index 020b6fee27..6083ac6f74 100644 --- a/oss-fuzz.sh +++ b/oss-fuzz.sh @@ -35,7 +35,7 @@ function coverbuild { sed -i -e 's/TestFuzzCorpus/Test'$function'Corpus/' ./"${function,,}"_test.go cat << DOG > $OUT/$fuzzer -#/bin/sh +#!/bin/sh cd $OUT/$path go test -run Test${function}Corpus -v $tags -coverprofile \$1 -coverpkg $coverpkg diff --git a/p2p/tracker/tracker.go b/p2p/tracker/tracker.go index 5b72eb2b88..3a9135aa3b 100644 --- a/p2p/tracker/tracker.go +++ b/p2p/tracker/tracker.go @@ -18,52 +18,63 @@ package tracker import ( "container/list" + "errors" "fmt" "sync" "time" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/p2p" ) const ( - // trackedGaugeName is the prefix of the per-packet request tracking. trackedGaugeName = "p2p/tracked" - - // lostMeterName is the prefix of the per-packet request expirations. - lostMeterName = "p2p/lost" - - // staleMeterName is the prefix of the per-packet stale responses. - staleMeterName = "p2p/stale" - - // waitHistName is the prefix of the per-packet (req only) waiting time histograms. - waitHistName = "p2p/wait" + lostMeterName = "p2p/lost" + staleMeterName = "p2p/stale" + waitHistName = "p2p/wait" // maxTrackedPackets is a huge number to act as a failsafe on the number of // pending requests the node will track. It should never be hit unless an // attacker figures out a way to spin requests. - maxTrackedPackets = 100000 + maxTrackedPackets = 10000 ) -// request tracks sent network requests which have not yet received a response. -type request struct { - peer string - version uint // Protocol version +var ( + ErrNoMatchingRequest = errors.New("no matching request") + ErrTooManyItems = errors.New("response is larger than request allows") + ErrCollision = errors.New("request ID collision") + ErrCodeMismatch = errors.New("wrong response code for request") + ErrLimitReached = errors.New("request limit reached") + ErrStopped = errors.New("tracker stopped") +) - reqCode uint64 // Protocol message code of the request - resCode uint64 // Protocol message code of the expected response +// Request tracks sent network requests which have not yet received a response. +type Request struct { + ID uint64 // Request ID + Size int // Number/size of requested items + ReqCode uint64 // Protocol message code of the request + RespCode uint64 // Protocol message code of the expected response time time.Time // Timestamp when the request was made expire *list.Element // Expiration marker to untrack it } +type Response struct { + ID uint64 // Request ID of the response + MsgCode uint64 // Protocol message code + Size int // number/size of items in response +} + // Tracker is a pending network request tracker to measure how much time it takes // a remote peer to respond. type Tracker struct { - protocol string // Protocol capability identifier for the metrics - timeout time.Duration // Global timeout after which to drop a tracked packet + cap p2p.Cap // Protocol capability identifier for the metrics - pending map[uint64]*request // Currently pending requests + peer string // Peer ID + timeout time.Duration // Global timeout after which to drop a tracked packet + + pending map[uint64]*Request // Currently pending requests expire *list.List // Linked list tracking the expiration order wake *time.Timer // Timer tracking the expiration of the next item @@ -72,52 +83,53 @@ type Tracker struct { // New creates a new network request tracker to monitor how much time it takes to // fill certain requests and how individual peers perform. -func New(protocol string, timeout time.Duration) *Tracker { +func New(cap p2p.Cap, peerID string, timeout time.Duration) *Tracker { return &Tracker{ - protocol: protocol, - timeout: timeout, - pending: make(map[uint64]*request), - expire: list.New(), + cap: cap, + peer: peerID, + timeout: timeout, + pending: make(map[uint64]*Request), + expire: list.New(), } } // Track adds a network request to the tracker to wait for a response to arrive // or until the request it cancelled or times out. -func (t *Tracker) Track(peer string, version uint, reqCode uint64, resCode uint64, id uint64) { - if !metrics.Enabled() { - return - } +func (t *Tracker) Track(req Request) error { t.lock.Lock() defer t.lock.Unlock() + if t.expire == nil { + return ErrStopped + } + // If there's a duplicate request, we've just random-collided (or more probably, // we have a bug), report it. We could also add a metric, but we're not really // expecting ourselves to be buggy, so a noisy warning should be enough. - if _, ok := t.pending[id]; ok { - log.Error("Network request id collision", "protocol", t.protocol, "version", version, "code", reqCode, "id", id) - return + if _, ok := t.pending[req.ID]; ok { + log.Error("Network request id collision", "cap", t.cap, "code", req.ReqCode, "id", req.ID) + return ErrCollision } // If we have too many pending requests, bail out instead of leaking memory if pending := len(t.pending); pending >= maxTrackedPackets { - log.Error("Request tracker exceeded allowance", "pending", pending, "peer", peer, "protocol", t.protocol, "version", version, "code", reqCode) - return + log.Error("Request tracker exceeded allowance", "pending", pending, "peer", t.peer, "cap", t.cap, "code", req.ReqCode) + return ErrLimitReached } + // Id doesn't exist yet, start tracking it - t.pending[id] = &request{ - peer: peer, - version: version, - reqCode: reqCode, - resCode: resCode, - time: time.Now(), - expire: t.expire.PushBack(id), + req.time = time.Now() + req.expire = t.expire.PushBack(req.ID) + t.pending[req.ID] = &req + + if metrics.Enabled() { + t.trackedGauge(req.ReqCode).Inc(1) } - g := fmt.Sprintf("%s/%s/%d/%#02x", trackedGaugeName, t.protocol, version, reqCode) - metrics.GetOrRegisterGauge(g, nil).Inc(1) // If we've just inserted the first item, start the expiration timer if t.wake == nil { t.wake = time.AfterFunc(t.timeout, t.clean) } + return nil } // clean is called automatically when a preset time passes without a response @@ -142,11 +154,10 @@ func (t *Tracker) clean() { t.expire.Remove(head) delete(t.pending, id) - g := fmt.Sprintf("%s/%s/%d/%#02x", trackedGaugeName, t.protocol, req.version, req.reqCode) - metrics.GetOrRegisterGauge(g, nil).Dec(1) - - m := fmt.Sprintf("%s/%s/%d/%#02x", lostMeterName, t.protocol, req.version, req.reqCode) - metrics.GetOrRegisterMeter(m, nil).Mark(1) + if metrics.Enabled() { + t.trackedGauge(req.ReqCode).Dec(1) + t.lostMeter(req.ReqCode).Mark(1) + } } t.schedule() } @@ -161,45 +172,92 @@ func (t *Tracker) schedule() { t.wake = time.AfterFunc(time.Until(t.pending[t.expire.Front().Value.(uint64)].time.Add(t.timeout)), t.clean) } -// Fulfil fills a pending request, if any is available, reporting on various metrics. -func (t *Tracker) Fulfil(peer string, version uint, code uint64, id uint64) { - if !metrics.Enabled() { - return +// Stop reclaims resources of the tracker. +func (t *Tracker) Stop() { + t.lock.Lock() + defer t.lock.Unlock() + + if t.wake != nil { + t.wake.Stop() + t.wake = nil } + if metrics.Enabled() { + // Ensure metrics are decremented for pending requests. + counts := make(map[uint64]int64) + for _, req := range t.pending { + counts[req.ReqCode]++ + } + for code, count := range counts { + t.trackedGauge(code).Dec(count) + } + } + clear(t.pending) + t.expire = nil +} + +// Fulfil fills a pending request, if any is available, reporting on various metrics. +func (t *Tracker) Fulfil(resp Response) error { t.lock.Lock() defer t.lock.Unlock() // If it's a non existing request, track as stale response - req, ok := t.pending[id] + req, ok := t.pending[resp.ID] if !ok { - m := fmt.Sprintf("%s/%s/%d/%#02x", staleMeterName, t.protocol, version, code) - metrics.GetOrRegisterMeter(m, nil).Mark(1) - return + if metrics.Enabled() { + t.staleMeter(resp.MsgCode).Mark(1) + } + return ErrNoMatchingRequest } + // If the response is funky, it might be some active attack - if req.peer != peer || req.version != version || req.resCode != code { - log.Warn("Network response id collision", - "have", fmt.Sprintf("%s:%s/%d:%d", peer, t.protocol, version, code), - "want", fmt.Sprintf("%s:%s/%d:%d", peer, t.protocol, req.version, req.resCode), + if req.RespCode != resp.MsgCode { + log.Warn("Network response code collision", + "have", fmt.Sprintf("%s:%s/%d:%d", t.peer, t.cap.Name, t.cap.Version, resp.MsgCode), + "want", fmt.Sprintf("%s:%s/%d:%d", t.peer, t.cap.Name, t.cap.Version, req.RespCode), ) - return + return ErrCodeMismatch + } + if resp.Size > req.Size { + return ErrTooManyItems } + // Everything matches, mark the request serviced and meter it + wasHead := req.expire.Prev() == nil t.expire.Remove(req.expire) - delete(t.pending, id) - if req.expire.Prev() == nil { + delete(t.pending, req.ID) + if wasHead { if t.wake.Stop() { t.schedule() } } - g := fmt.Sprintf("%s/%s/%d/%#02x", trackedGaugeName, t.protocol, req.version, req.reqCode) - metrics.GetOrRegisterGauge(g, nil).Dec(1) - h := fmt.Sprintf("%s/%s/%d/%#02x", waitHistName, t.protocol, req.version, req.reqCode) + // Update request metrics. + if metrics.Enabled() { + t.trackedGauge(req.ReqCode).Dec(1) + t.waitHistogram(req.ReqCode).Update(time.Since(req.time).Microseconds()) + } + return nil +} + +func (t *Tracker) trackedGauge(code uint64) *metrics.Gauge { + name := fmt.Sprintf("%s/%s/%d/%#02x", trackedGaugeName, t.cap.Name, t.cap.Version, code) + return metrics.GetOrRegisterGauge(name, nil) +} + +func (t *Tracker) lostMeter(code uint64) *metrics.Meter { + name := fmt.Sprintf("%s/%s/%d/%#02x", lostMeterName, t.cap.Name, t.cap.Version, code) + return metrics.GetOrRegisterMeter(name, nil) +} + +func (t *Tracker) staleMeter(code uint64) *metrics.Meter { + name := fmt.Sprintf("%s/%s/%d/%#02x", staleMeterName, t.cap.Name, t.cap.Version, code) + return metrics.GetOrRegisterMeter(name, nil) +} + +func (t *Tracker) waitHistogram(code uint64) metrics.Histogram { + name := fmt.Sprintf("%s/%s/%d/%#02x", waitHistName, t.cap.Name, t.cap.Version, code) sampler := func() metrics.Sample { - return metrics.ResettingSample( - metrics.NewExpDecaySample(1028, 0.015), - ) + return metrics.ResettingSample(metrics.NewExpDecaySample(1028, 0.015)) } - metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(req.time).Microseconds()) + return metrics.GetOrRegisterHistogramLazy(name, nil, sampler) } diff --git a/p2p/tracker/tracker_test.go b/p2p/tracker/tracker_test.go new file mode 100644 index 0000000000..a37a59f70f --- /dev/null +++ b/p2p/tracker/tracker_test.go @@ -0,0 +1,64 @@ +// Copyright 2026 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package tracker + +import ( + "testing" + "time" + + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/p2p" +) + +// This checks that metrics gauges for pending requests are be decremented when a +// Tracker is stopped. +func TestMetricsOnStop(t *testing.T) { + metrics.Enable() + + cap := p2p.Cap{Name: "test", Version: 1} + tr := New(cap, "peer1", time.Minute) + + // Track some requests with different ReqCodes. + var id uint64 + for i := 0; i < 3; i++ { + tr.Track(Request{ID: id, ReqCode: 0x01, RespCode: 0x02, Size: 1}) + id++ + } + for i := 0; i < 5; i++ { + tr.Track(Request{ID: id, ReqCode: 0x03, RespCode: 0x04, Size: 1}) + id++ + } + + gauge1 := tr.trackedGauge(0x01) + gauge2 := tr.trackedGauge(0x03) + + if gauge1.Snapshot().Value() != 3 { + t.Fatalf("gauge1 value mismatch: got %d, want 3", gauge1.Snapshot().Value()) + } + if gauge2.Snapshot().Value() != 5 { + t.Fatalf("gauge2 value mismatch: got %d, want 5", gauge2.Snapshot().Value()) + } + + tr.Stop() + + if gauge1.Snapshot().Value() != 0 { + t.Fatalf("gauge1 value after stop: got %d, want 0", gauge1.Snapshot().Value()) + } + if gauge2.Snapshot().Value() != 0 { + t.Fatalf("gauge2 value after stop: got %d, want 0", gauge2.Snapshot().Value()) + } +} diff --git a/params/config.go b/params/config.go index a7378767d7..8e9b40398a 100644 --- a/params/config.go +++ b/params/config.go @@ -250,10 +250,10 @@ var ( LorentzTime: newUint64(1745903100), // 2025-04-29 05:05:00 AM UTC MaxwellTime: newUint64(1751250600), // 2025-06-30 02:30:00 AM UTC FermiTime: newUint64(1768357800), // 2026-01-14 02:30:00 AM UTC - OsakaTime: nil, - MendelTime: nil, - BPO1Time: nil, // will be skipped in BSC - BPO2Time: nil, // will be skipped in BSC + OsakaTime: newUint64(1777343400), // 2026-04-28 02:30:00 AM UTC + MendelTime: newUint64(1777343400), // 2026-04-28 02:30:00 AM UTC + BPO1Time: nil, // will be skipped in BSC + BPO2Time: nil, // will be skipped in BSC AmsterdamTime: nil, PasteurTime: nil, diff --git a/rlp/encode.go b/rlp/encode.go index ed99275739..9d04e6324a 100644 --- a/rlp/encode.go +++ b/rlp/encode.go @@ -102,6 +102,29 @@ func EncodeToReader(val interface{}) (size int, r io.Reader, err error) { return buf.size(), &encReader{buf: buf}, nil } +// EncodeToRawList encodes val as an RLP list and returns it as a RawList. +func EncodeToRawList[T any](val []T) (RawList[T], error) { + if len(val) == 0 { + return RawList[T]{}, nil + } + + // Encode the value to an internal buffer. + buf := getEncBuffer() + defer encBufferPool.Put(buf) + if err := buf.encode(val); err != nil { + return RawList[T]{}, err + } + + // Create the RawList. RawList assumes the initial list header is padded + // 9 bytes, so we have to determine the offset where the value should be + // placed. + contentSize := buf.lheads[0].size + bytes := make([]byte, contentSize+9) + offset := 9 - headsize(uint64(contentSize)) + buf.copyTo(bytes[offset:]) + return RawList[T]{enc: bytes, length: len(val)}, nil +} + type listhead struct { offset int // index of this header in string data size int // total size of encoded data (including list headers) diff --git a/rlp/iterator.go b/rlp/iterator.go index 05567fa05b..9e41cec947 100644 --- a/rlp/iterator.go +++ b/rlp/iterator.go @@ -16,31 +16,35 @@ package rlp -type listIterator struct { - data []byte - next []byte - err error +// Iterator is an iterator over the elements of an encoded container. +type Iterator struct { + data []byte + next []byte + offset int + err error } -// NewListIterator creates an iterator for the (list) represented by data -func NewListIterator(data RawValue) (*listIterator, error) { +// NewListIterator creates an iterator for the (list) represented by data. +func NewListIterator(data RawValue) (Iterator, error) { k, t, c, err := readKind(data) if err != nil { - return nil, err + return Iterator{}, err } if k != List { - return nil, ErrExpectedList - } - it := &listIterator{ - data: data[t : t+c], + return Iterator{}, ErrExpectedList } + it := newIterator(data[t:t+c], int(t)) return it, nil } +func newIterator(data []byte, initialOffset int) Iterator { + return Iterator{data: data, offset: initialOffset} +} + // Next forwards the iterator one step. // Returns true if there is a next item or an error occurred on this step (check Err()). // On parse error, the iterator is marked finished and subsequent calls return false. -func (it *listIterator) Next() bool { +func (it *Iterator) Next() bool { if len(it.data) == 0 { return false } @@ -52,17 +56,34 @@ func (it *listIterator) Next() bool { it.data = nil return true } - it.next = it.data[:t+c] - it.data = it.data[t+c:] + length := t + c + it.next = it.data[:length] + it.data = it.data[length:] + it.offset += int(length) it.err = nil return true } -// Value returns the current value -func (it *listIterator) Value() []byte { +// Value returns the current value. +func (it *Iterator) Value() []byte { return it.next } -func (it *listIterator) Err() error { +// Count returns the remaining number of items. +// Note this is O(n) and the result may be incorrect if the list data is invalid. +// The returned count is always an upper bound on the remaining items +// that will be visited by the iterator. +func (it *Iterator) Count() int { + count, _ := CountValues(it.data) + return count +} + +// Offset returns the offset of the current value into the list data. +func (it *Iterator) Offset() int { + return it.offset - len(it.next) +} + +// Err returns the error that caused Next to return false, if any. +func (it *Iterator) Err() error { return it.err } diff --git a/rlp/iterator_test.go b/rlp/iterator_test.go index a22aaec862..275d4371c7 100644 --- a/rlp/iterator_test.go +++ b/rlp/iterator_test.go @@ -17,6 +17,7 @@ package rlp import ( + "io" "testing" "github.com/ethereum/go-ethereum/common/hexutil" @@ -38,14 +39,25 @@ func TestIterator(t *testing.T) { t.Fatal("expected two elems, got zero") } txs := it.Value() + if offset := it.Offset(); offset != 3 { + t.Fatal("wrong offset", offset, "want 3") + } + // Check that uncles exist if !it.Next() { t.Fatal("expected two elems, got one") } + if offset := it.Offset(); offset != 219 { + t.Fatal("wrong offset", offset, "want 219") + } + txit, err := NewListIterator(txs) if err != nil { t.Fatal(err) } + if c := txit.Count(); c != 2 { + t.Fatal("wrong Count:", c) + } var i = 0 for txit.Next() { if txit.err != nil { @@ -57,3 +69,65 @@ func TestIterator(t *testing.T) { t.Errorf("count wrong, expected %d got %d", i, exp) } } + +func TestIteratorErrors(t *testing.T) { + tests := []struct { + input []byte + wantCount int // expected Count before iterating + wantErr error + }{ + // Second item string header claims 3 bytes content, but only 2 remain. + {unhex("C4 01 83AABB"), 2, ErrValueTooLarge}, + // Second item truncated: B9 requires 2 size bytes, none available. + {unhex("C2 01 B9"), 2, io.ErrUnexpectedEOF}, + // 0x05 should be encoded directly, not as 81 05. + {unhex("C3 01 8105"), 2, ErrCanonSize}, + // Long-form string header B8 used for 1-byte content (< 56). + {unhex("C4 01 B801AA"), 2, ErrCanonSize}, + // Long-form list header F8 used for 1-byte content (< 56). + {unhex("C4 01 F80101"), 2, ErrCanonSize}, + } + for _, tt := range tests { + it, err := NewListIterator(tt.input) + if err != nil { + t.Fatal("NewListIterator error:", err) + } + if c := it.Count(); c != tt.wantCount { + t.Fatalf("%x: Count = %d, want %d", tt.input, c, tt.wantCount) + } + n := 0 + for it.Next() { + if it.Err() != nil { + break + } + n++ + } + if wantN := tt.wantCount - 1; n != wantN { + t.Fatalf("%x: got %d valid items, want %d", tt.input, n, wantN) + } + if it.Err() != tt.wantErr { + t.Fatalf("%x: got error %v, want %v", tt.input, it.Err(), tt.wantErr) + } + if it.Next() { + t.Fatalf("%x: Next returned true after error", tt.input) + } + } +} + +func FuzzIteratorCount(f *testing.F) { + examples := [][]byte{unhex("010203"), unhex("018142"), unhex("01830202")} + for _, e := range examples { + f.Add(e) + } + f.Fuzz(func(t *testing.T, in []byte) { + it := newIterator(in, 0) + count := it.Count() + i := 0 + for it.Next() { + i++ + } + if i != count { + t.Fatalf("%x: count %d not equal to %d iterations", in, count, i) + } + }) +} diff --git a/rlp/raw.go b/rlp/raw.go index cec90346a1..ee57cdfd37 100644 --- a/rlp/raw.go +++ b/rlp/raw.go @@ -17,8 +17,10 @@ package rlp import ( + "fmt" "io" "reflect" + "slices" ) // RawValue represents an encoded RLP value and can be used to delay @@ -28,6 +30,144 @@ type RawValue []byte var rawValueType = reflect.TypeFor[RawValue]() +// RawList represents an encoded RLP list. +type RawList[T any] struct { + // The list is stored in encoded form. + // Note this buffer has some special properties: + // + // - if the buffer is nil, it's the zero value, representing + // an empty list. + // - if the buffer is non-nil, it must have a length of at least + // 9 bytes, which is reserved padding for the encoded list header. + // The remaining bytes, enc[9:], store the content bytes of the list. + // + // The implementation code mostly works with the Content method because it + // returns something valid either way. + enc []byte + + // length holds the number of items in the list. + length int +} + +// Content returns the RLP-encoded data of the list. +// This does not include the list-header. +// The return value is a direct reference to the internal buffer, not a copy. +func (r *RawList[T]) Content() []byte { + if r.enc == nil { + return nil + } else { + return r.enc[9:] + } +} + +// EncodeRLP writes the encoded list to the writer. +func (r RawList[T]) EncodeRLP(w io.Writer) error { + _, err := w.Write(r.Bytes()) + return err +} + +// Bytes returns the RLP encoding of the list. +// Note the return value aliases the internal buffer. +func (r *RawList[T]) Bytes() []byte { + if r == nil || r.enc == nil { + return []byte{0xC0} // zero value encodes as empty list + } + n := puthead(r.enc, 0xC0, 0xF7, uint64(len(r.Content()))) + copy(r.enc[9-n:], r.enc[:n]) + return r.enc[9-n:] +} + +// DecodeRLP decodes the list. This does not perform validation of the items! +func (r *RawList[T]) DecodeRLP(s *Stream) error { + k, size, err := s.Kind() + if err != nil { + return err + } + if k != List { + return fmt.Errorf("%w for %T", ErrExpectedList, r) + } + enc := make([]byte, 9+size) + if err := s.readFull(enc[9:]); err != nil { + return err + } + n, err := CountValues(enc[9:]) + if err != nil { + if err == ErrValueTooLarge { + return ErrElemTooLarge + } + return err + } + *r = RawList[T]{enc: enc, length: n} + return nil +} + +// Items decodes and returns all items in the list. +func (r *RawList[T]) Items() ([]T, error) { + items := make([]T, r.Len()) + it := r.ContentIterator() + for i := 0; it.Next(); i++ { + if err := DecodeBytes(it.Value(), &items[i]); err != nil { + return items[:i], err + } + } + return items, nil +} + +// Len returns the number of items in the list. +func (r *RawList[T]) Len() int { + return r.length +} + +// Size returns the encoded size of the list. +func (r *RawList[T]) Size() uint64 { + return ListSize(uint64(len(r.Content()))) +} + +// ContentIterator returns an iterator over the content of the list. +// Note the offsets returned by iterator.Offset are relative to the +// Content bytes of the list. +func (r *RawList[T]) ContentIterator() Iterator { + return newIterator(r.Content(), 0) +} + +// Append adds an item to the end of the list. +func (r *RawList[T]) Append(item T) error { + if r.enc == nil { + r.enc = make([]byte, 9) + } + + eb := getEncBuffer() + defer encBufferPool.Put(eb) + + if err := eb.encode(item); err != nil { + return err + } + prevEnd := len(r.enc) + end := prevEnd + eb.size() + r.enc = slices.Grow(r.enc, eb.size())[:end] + eb.copyTo(r.enc[prevEnd:end]) + r.length++ + return nil +} + +// AppendRaw adds an encoded item to the list. +// The given byte slice must contain exactly one RLP value. +func (r *RawList[T]) AppendRaw(b []byte) error { + _, tagsize, contentsize, err := readKind(b) + if err != nil { + return err + } + if tagsize+contentsize != uint64(len(b)) { + return fmt.Errorf("rlp: input has trailing bytes in AppendRaw") + } + if r.enc == nil { + r.enc = make([]byte, 9) + } + r.enc = append(r.enc, b...) + r.length++ + return nil +} + // StringSize returns the encoded size of a string. func StringSize(s string) uint64 { switch n := len(s); n { @@ -145,7 +285,7 @@ func CountValues(b []byte) (int, error) { for ; len(b) > 0; i++ { _, tagsize, size, err := readKind(b) if err != nil { - return 0, err + return i + 1, err } b = b[tagsize+size:] } diff --git a/rlp/raw_test.go b/rlp/raw_test.go index 7b3255eca3..e80b427182 100644 --- a/rlp/raw_test.go +++ b/rlp/raw_test.go @@ -19,11 +19,259 @@ package rlp import ( "bytes" "errors" + "fmt" "io" + "reflect" "testing" "testing/quick" ) +type rawListTest[T any] struct { + input string + content string + items []T + length int +} + +func (test rawListTest[T]) name() string { + return fmt.Sprintf("%T-%d", *new(T), test.length) +} + +func (test rawListTest[T]) run(t *testing.T) { + // check decoding and properties + input := unhex(test.input) + inputSize := len(input) + var rl RawList[T] + if err := DecodeBytes(input, &rl); err != nil { + t.Fatal("decode failed:", err) + } + if l := rl.Len(); l != test.length { + t.Fatalf("wrong Len %d, want %d", l, test.length) + } + if sz := rl.Size(); sz != uint64(inputSize) { + t.Fatalf("wrong Size %d, want %d", sz, inputSize) + } + items, err := rl.Items() + if err != nil { + t.Fatal("Items failed:", err) + } + if !reflect.DeepEqual(items, test.items) { + t.Fatal("wrong items:", items) + } + if !bytes.Equal(rl.Content(), unhex(test.content)) { + t.Fatalf("wrong Content %x, want %s", rl.Content(), test.content) + } + if !bytes.Equal(rl.Bytes(), unhex(test.input)) { + t.Fatalf("wrong Bytes %x, want %s", rl.Bytes(), test.input) + } + + // check iterator + it := rl.ContentIterator() + i := 0 + for it.Next() { + var item T + if err := DecodeBytes(it.Value(), &item); err != nil { + t.Fatalf("item %d decode error: %v", i, err) + } + if !reflect.DeepEqual(item, items[i]) { + t.Fatalf("iterator has wrong item %v at %d", item, i) + } + i++ + } + if i != test.length { + t.Fatalf("iterator produced %d values, want %d", i, test.length) + } + if it.Err() != nil { + t.Fatalf("iterator error: %v", it.Err()) + } + + // check encoding round trip + output, err := EncodeToBytes(&rl) + if err != nil { + t.Fatal("encode error:", err) + } + if !bytes.Equal(output, unhex(test.input)) { + t.Fatalf("encoding does not round trip: %x", output) + } + + // check EncodeToRawList on items produces same bytes + encRL, err := EncodeToRawList(test.items) + if err != nil { + t.Fatal("EncodeToRawList error:", err) + } + encRLOutput, err := EncodeToBytes(&encRL) + if err != nil { + t.Fatal("EncodeToBytes of encoded list failed:", err) + } + if !bytes.Equal(encRLOutput, output) { + t.Fatalf("wrong encoding of EncodeToRawList result: %x", encRLOutput) + } +} + +func TestRawList(t *testing.T) { + tests := []interface { + name() string + run(t *testing.T) + }{ + rawListTest[uint64]{ + input: "C0", + content: "", + items: []uint64{}, + length: 0, + }, + rawListTest[uint64]{ + input: "C3010203", + content: "010203", + items: []uint64{1, 2, 3}, + length: 3, + }, + rawListTest[simplestruct]{ + input: "C6C20102C20304", + content: "C20102C20304", + items: []simplestruct{{1, "\x02"}, {3, "\x04"}}, + length: 2, + }, + rawListTest[string]{ + input: "F83C836161618362626283636363836464648365656583666666836767678368686883696969836A6A6A836B6B6B836C6C6C836D6D6D836E6E6E836F6F6F", + content: "836161618362626283636363836464648365656583666666836767678368686883696969836A6A6A836B6B6B836C6C6C836D6D6D836E6E6E836F6F6F", + items: []string{"aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo"}, + length: 15, + }, + } + + for _, test := range tests { + t.Run(test.name(), test.run) + } +} + +func TestRawListEmpty(t *testing.T) { + // zero value list + var rl RawList[uint64] + b, _ := EncodeToBytes(&rl) + if !bytes.Equal(b, unhex("C0")) { + t.Fatalf("empty RawList has wrong encoding %x", b) + } + if rl.Len() != 0 { + t.Fatalf("empty list has Len %d", rl.Len()) + } + if rl.Size() != 1 { + t.Fatalf("empty list has Size %d", rl.Size()) + } + if len(rl.Content()) > 0 { + t.Fatalf("empty list has non-empty Content") + } + if !bytes.Equal(rl.Bytes(), []byte{0xC0}) { + t.Fatalf("empty list has wrong encoding") + } + + // nil pointer + var nilptr *RawList[uint64] + b, _ = EncodeToBytes(nilptr) + if !bytes.Equal(b, unhex("C0")) { + t.Fatalf("nil pointer to RawList has wrong encoding %x", b) + } +} + +// This checks that *RawList works in an 'optional' context. +func TestRawListOptional(t *testing.T) { + type foo struct { + L *RawList[uint64] `rlp:"optional"` + } + // nil pointer encoding + var empty foo + b, _ := EncodeToBytes(empty) + if !bytes.Equal(b, unhex("C0")) { + t.Fatalf("nil pointer to RawList has wrong encoding %x", b) + } + // decoding + var dec foo + if err := DecodeBytes(unhex("C0"), &dec); err != nil { + t.Fatal(err) + } + if dec.L != nil { + t.Fatal("rawlist was decoded as non-nil") + } +} + +func TestRawListAppend(t *testing.T) { + var rl RawList[simplestruct] + + v1 := simplestruct{1, "one"} + v2 := simplestruct{2, "two"} + if err := rl.Append(v1); err != nil { + t.Fatal("append 1 failed:", err) + } + if err := rl.Append(v2); err != nil { + t.Fatal("append 2 failed:", err) + } + + if rl.Len() != 2 { + t.Fatalf("wrong Len %d", rl.Len()) + } + if rl.Size() != 13 { + t.Fatalf("wrong Size %d", rl.Size()) + } + if !bytes.Equal(rl.Content(), unhex("C501836F6E65 C5028374776F")) { + t.Fatalf("wrong Content %x", rl.Content()) + } + encoded, _ := EncodeToBytes(&rl) + if !bytes.Equal(encoded, unhex("CC C501836F6E65 C5028374776F")) { + t.Fatalf("wrong encoding %x", encoded) + } +} + +func TestRawListAppendRaw(t *testing.T) { + var rl RawList[uint64] + + if err := rl.AppendRaw(unhex("01")); err != nil { + t.Fatal("AppendRaw(01) failed:", err) + } + if err := rl.AppendRaw(unhex("820102")); err != nil { + t.Fatal("AppendRaw(820102) failed:", err) + } + if rl.Len() != 2 { + t.Fatalf("wrong Len %d after valid appends", rl.Len()) + } + + if err := rl.AppendRaw(nil); err == nil { + t.Fatal("AppendRaw(nil) should fail") + } + if err := rl.AppendRaw(unhex("0102")); err == nil { + t.Fatal("AppendRaw(0102) should fail due to trailing bytes") + } + if err := rl.AppendRaw(unhex("8201")); err == nil { + t.Fatal("AppendRaw(8201) should fail due to truncated value") + } + if rl.Len() != 2 { + t.Fatalf("wrong Len %d after invalid appends, want 2", rl.Len()) + } +} + +func TestRawListDecodeInvalid(t *testing.T) { + tests := []struct { + input string + err error + }{ + // Single item with non-canonical size (0x81 wrapping byte <= 0x7F). + {input: "C28142", err: ErrCanonSize}, + // Single item claiming more bytes than available in the list. + {input: "C484020202", err: ErrElemTooLarge}, + // Two items, second has non-canonical size. + {input: "C3018142", err: ErrCanonSize}, + // Two items, second claims more bytes than remain in the list. + {input: "C401830202", err: ErrElemTooLarge}, + // Item is a sub-list whose declared size exceeds available bytes. + {input: "C3C40102", err: ErrElemTooLarge}, + } + for _, test := range tests { + var rl RawList[RawValue] + err := DecodeBytes(unhex(test.input), &rl) + if !errors.Is(err, test.err) { + t.Errorf("input %s: error mismatch: got %v, want %v", test.input, err, test.err) + } + } +} + func TestCountValues(t *testing.T) { tests := []struct { input string // note: spaces in input are stripped by unhex @@ -40,9 +288,9 @@ func TestCountValues(t *testing.T) { {"820101 820202 8403030303 04", 4, nil}, // size errors - {"8142", 0, ErrCanonSize}, - {"01 01 8142", 0, ErrCanonSize}, - {"02 84020202", 0, ErrValueTooLarge}, + {"8142", 1, ErrCanonSize}, + {"01 01 8142", 3, ErrCanonSize}, + {"02 84020202", 2, ErrValueTooLarge}, { input: "A12000BF49F440A1CD0527E4D06E2765654C0F56452257516D793A9B8D604DCFDF2AB853F851808D10000000000000000000000000A056E81F171BCC55A6FF8345E692C0F86E5B48E01B996CADC001622FB5E363B421A0C5D2460186F7233C927E7DB2DCC703C0E500B653CA82273B7BFAD8045D85A470", diff --git a/trie/node.go b/trie/node.go index 65e5c003e1..6316884bf6 100644 --- a/trie/node.go +++ b/trie/node.go @@ -165,11 +165,14 @@ func decodeNodeUnsafe(hash, buf []byte) (node, error) { if err != nil { return nil, fmt.Errorf("decode error: %v", err) } - switch c, _ := rlp.CountValues(elems); c { - case 2: + c, err := rlp.CountValues(elems) + switch { + case err != nil: + return nil, fmt.Errorf("invalid node list: %v", err) + case c == 2: n, err := decodeShort(hash, elems) return n, wrapError(err, "short") - case 17: + case c == 17: n, err := decodeFull(hash, elems) return n, wrapError(err, "full") default: diff --git a/version/version.go b/version/version.go index e6bc50c436..86a22968c0 100644 --- a/version/version.go +++ b/version/version.go @@ -19,6 +19,6 @@ package version const ( Major = 1 // Major version component of the current release Minor = 7 // Minor version component of the current release - Patch = 1 // Patch version component of the current release + Patch = 2 // Patch version component of the current release Meta = "" // Version metadata to append to the version string )