diff --git a/.github/workflows/qa-rpc-integration-tests-gnosis.yml b/.github/workflows/qa-rpc-integration-tests-gnosis.yml index 9fbd229b498..46cca6bdaec 100644 --- a/.github/workflows/qa-rpc-integration-tests-gnosis.yml +++ b/.github/workflows/qa-rpc-integration-tests-gnosis.yml @@ -19,7 +19,7 @@ on: jobs: gnosis-rpc-integ-tests: - if: false # Temporarily disabled + if: false # Temporarily disabled concurrency: group: >- ${{ diff --git a/.github/workflows/qa-rpc-integration-tests-polygon.yml b/.github/workflows/qa-rpc-integration-tests-polygon.yml index 00fc970664c..95f8288756c 100644 --- a/.github/workflows/qa-rpc-integration-tests-polygon.yml +++ b/.github/workflows/qa-rpc-integration-tests-polygon.yml @@ -19,7 +19,7 @@ on: jobs: bor-mainnet-rpc-integ-tests: - if: false # Temporarily disabled + if: false # Temporarily disabled concurrency: group: >- ${{ diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml index 6070da57a48..432bf56bcdb 100644 --- a/.github/workflows/qa-rpc-integration-tests.yml +++ b/.github/workflows/qa-rpc-integration-tests.yml @@ -19,7 +19,7 @@ on: jobs: mainnet-rpc-integ-tests: - if: false # Temporarily disabled + if: false # Temporarily disabled concurrency: group: >- ${{ diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8e08e507234..8ec6fdd19da 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -201,105 +201,10 @@ jobs: compression-level: 0 if-no-files-found: error - test-release: - name: test on ${{ matrix.id }} - if: ${{ ! inputs.skip_tests }} - runs-on: [ self-hosted, qa, Release, "${{ matrix.runner-arch }}" ] - timeout-minutes: 2800 # 2 days - needs: [ build-release ] - strategy: - matrix: - include: - - id: linux/amd64 - runner-arch: X64 - artifact: linux_amd64 - - id: linux/arm64 - runner-arch: ARM64 - artifact: linux_arm64 - - steps: - - - name: Cleanup working directory - run: rm -drfv * - - - name: Set up Python - uses: actions/setup-python@v6 - with: - python-version: '3.12' - - - name: Download artifact ${{ env.APPLICATION }}_${{ inputs.release_version }}_${{ matrix.artifact }}.tar - uses: actions/download-artifact@v5 - with: - name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_${{ matrix.artifact }}.tar - path: . - - - name: Extract artifact ${{ env.APPLICATION }}_${{ inputs.release_version }}_${{ matrix.artifact }}.tar - env: - RELEASE_VERSION: ${{ inputs.release_version }} - run: | - pwd - ls -l ${{ env.APPLICATION }}_${RELEASE_VERSION}_${{ matrix.artifact }}.tar - tar xvf ${{ env.APPLICATION }}_${RELEASE_VERSION}_${{ matrix.artifact }}.tar - ls -lR - - - name: Fast checkout git repository erigontech/erigon-qa - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 ## 5.0.0 - with: - token: ${{ secrets.ORG_GITHUB_ERIGONTECH_ERIGON_QA_READ }} - repository: erigontech/erigon-qa - fetch-depth: 1 - ref: main - path: erigon-qa - - - name: Run QA Tests - env: - RELEASE_VERSION: ${{ inputs.release_version }} - run: | - cd ./erigon-qa/test_system - pwd - ls -lao - pip3 install -r requirements.txt - ln -s $(pwd)/base_library $(pwd)/qa-tests/tip-tracking/base_library - echo "DEBUG -- content of directory $(pwd) :" - ls -l - echo "DEBUG -- content of directory $(pwd)/qa-tests/tip-tracking/" - ls -l $(pwd)/qa-tests/tip-tracking/ - echo "DEBUG -- content of directory GITHUB_WORKSPACE ${GITHUB_WORKSPACE} :" - ls -l ${GITHUB_WORKSPACE} - echo "DEBUG -- end." - rm -rf ${RUNNER_WORKSPACE}/erigon-data || true - mkdir ${RUNNER_WORKSPACE}/erigon-data - # Run Erigon, wait sync and check ability to maintain sync - python3 qa-tests/tip-tracking/run_and_check_tip_tracking.py \ - ${GITHUB_WORKSPACE}/${{ env.APPLICATION }}_${RELEASE_VERSION}_${{ matrix.artifact }} \ - ${RUNNER_WORKSPACE}/erigon-data ${{ env.TEST_TRACKING_TIME_SECONDS }} ${{ env.TEST_TOTAL_TIME_SECONDS }} ${{ env.APPLICATION_VERSION }} ${{ env.TEST_CHAIN }} - # Capture monitoring script exit status - test_exit_status=$? - # Save the subsection reached status - echo "test_executed=true" >> "$GITHUB_OUTPUT" - # Check test runner script exit status - if [ $test_exit_status -eq 0 ]; then - echo "Tests completed successfully" - echo "TEST_RESULT=success" >> "$GITHUB_OUTPUT" - else - echo "Error detected during tests" - echo "TEST_RESULT=failure" >> "$GITHUB_OUTPUT" - fi - - - name: Upload Downloader Torrent Client Status - if: always() - uses: actions/upload-artifact@v4 - with: - name: torrent-client-status-${{ matrix.artifact }} - path: torrent-client-status.txt - - - name: Cleanup working directory - run: rm -drfv * - build-debian-pkg: name: Debian packages - needs: [ build-release, test-release ] - if: always() && contains(needs.build-release.result, 'success') && !contains(needs.test-release.result, 'failure') + needs: [ build-release ] + if: always() && contains(needs.build-release.result, 'success') uses: ./.github/workflows/reusable-release-build-debian-pkg.yml with: application: ${{ needs.build-release.outputs.application }} @@ -307,11 +212,11 @@ jobs: publish-docker-image: - needs: [ build-release, test-release ] - if: always() && contains(needs.build-release.result, 'success') && !contains(needs.test-release.result, 'failure') + needs: [ build-release ] + if: always() && contains(needs.build-release.result, 'success') runs-on: ubuntu-latest timeout-minutes: 30 - name: Docker image + name: Docker image steps: @@ -422,8 +327,8 @@ jobs: In-case-of-failure: name: "In case of failure: remove remote git tag pointing to the new version." - needs: [ publish-release, build-release, test-release, build-debian-pkg, publish-docker-image ] - if: always() && !contains(needs.build-release.result, 'success') && contains(needs.test-release.result, 'failure') && !contains(needs.publish-release.result, 'success') && !contains(needs.build-debian-pkg.result, 'success') && !contains(needs.publish-docker-image.result, 'success') + needs: [ publish-release, build-release, build-debian-pkg, publish-docker-image ] + if: always() && !contains(needs.build-release.result, 'success') && !contains(needs.publish-release.result, 'success') && !contains(needs.build-debian-pkg.result, 'success') && !contains(needs.publish-docker-image.result, 'success') runs-on: ubuntu-latest env: RELEASE_VERSION: ${{ inputs.release_version }} diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index b7957109e64..c5009bd9506 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -1397,6 +1397,7 @@ func newSync(ctx context.Context, db kv.TemporalRwDB, miningConfig *buildercfg.M maxBlockBroadcastPeers, false, /* disableBlockDownload */ false, /* enableWitProtocol */ + nil, /* bridgeReader */ logger, ) if err != nil { diff --git a/core/blockchain.go b/core/blockchain.go index a6cf0b54be2..042f6af0237 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -209,6 +209,7 @@ func ExecuteBlockEphemerally( logs = append(logs, receipt.Logs...) } + // PIP-74 state-sync receipt handling. stateSyncReceipt := &types.Receipt{} if chainConfig.Consensus == chain.BorConsensus && len(blockLogs) > 0 { slices.SortStableFunc(blockLogs, func(i, j *types.Log) int { return cmp.Compare(i.Index, j.Index) }) diff --git a/eth/backend.go b/eth/backend.go index d47f8813d5a..c22eaf2db9d 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -717,6 +717,14 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger } } + // Create bridge.Reader for P2P state sync receipts. + // When Erigon nodes serve receipts to peers via P2P, they need bridge.Reader + // to include state sync transaction receipts in the response. + var bridgeReaderForP2P *bridge.Reader + if bridgeStore != nil && chainConfig.Bor != nil { + bridgeReaderForP2P = bridge.NewReader(bridgeStore, logger, chainConfig.Bor.StateReceiverContractAddress()) + } + sentryMcDisableBlockDownload := chainConfig.Bor != nil || config.ElBlockDownloaderV2 backend.sentriesClient, err = sentry_multi_client.NewMultiClient( backend.chainDB, @@ -731,6 +739,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger maxBlockBroadcastPeers, sentryMcDisableBlockDownload, stack.Config().P2P.EnableWitProtocol, + bridgeReaderForP2P, logger, ) if err != nil { diff --git a/execution/stages/blockchain_test.go b/execution/stages/blockchain_test.go index df5829efd0a..01e61d2d80f 100644 --- a/execution/stages/blockchain_test.go +++ b/execution/stages/blockchain_test.go @@ -307,7 +307,9 @@ func testReorgLong(t *testing.T) { // Tests that reorganising a short difficult chain after a long easy one // overwrites the canonical numbers and links in the database. -func TestReorgShortBlocks(t *testing.T) { testReorgShort(t) } +func TestReorgShortBlocks(t *testing.T) { + testReorgShort(t) +} func testReorgShort(t *testing.T) { t.Parallel() diff --git a/execution/stages/mock/mock_sentry.go b/execution/stages/mock/mock_sentry.go index 04c1bab80a7..78f287ea3f0 100644 --- a/execution/stages/mock/mock_sentry.go +++ b/execution/stages/mock/mock_sentry.go @@ -323,7 +323,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK PeerId: gointerfaces.ConvertHashToH512([64]byte{0x12, 0x34, 0x50}), // "12345" BlockSnapshots: allSnapshots, BlockReader: br, - ReceiptsReader: receipts.NewGenerator(br, engine, 5*time.Second), + ReceiptsReader: receipts.NewGenerator(br, engine, 5*time.Second, nil), HistoryV3: true, cfg: cfg, } @@ -442,6 +442,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK maxBlockBroadcastPeers, false, /* disableBlockDownload */ false, /* enableWitProtocol */ + nil, /* bridgeReader */ logger, ) if err != nil { diff --git a/execution/types/receipt.go b/execution/types/receipt.go index 2be813c42e7..c4b50c483aa 100644 --- a/execution/types/receipt.go +++ b/execution/types/receipt.go @@ -580,9 +580,30 @@ func (e receiptEncoder69) EncodeRLP(w io.Writer) error { return e.r.EncodeRLP69( func (rs Receipts) EncodeRLP69(w io.Writer) error { encs := make([]receiptEncoder69, len(rs)) + n := len(rs) + for i := range rs { - encs[i] = receiptEncoder69{r: rs[i]} + // Copy the receipt reference. + r := rs[i] + + // Only the last receipt can be a state-sync tx. + if i == n-1 { + // Post-Madhugiri, the state-sync transaction is a typed tx (StateSyncTxType) + // and must keep its type for eth/69 encoding. Only apply the pre-HF heuristic + // when the receipt is not already the typed state-sync receipt. + if r.Type != StateSyncTxType { + // Match the ReadStateSyncReceiptByHash logic for pre-HF: + // It's a state-sync transaction if the cumulative gas is zero, or + // the cumulative gas is equal to the previous one (zero gas usage) + if r.CumulativeGasUsed == 0 || (n >= 2 && r.CumulativeGasUsed == rs[n-2].CumulativeGasUsed) { + r.Type = LegacyTxType + } + } + } + + encs[i] = receiptEncoder69{r: r} } + return rlp.Encode(w, encs) } diff --git a/node/nodecfg/defaults.go b/node/nodecfg/defaults.go index db310032a37..cb6861dda32 100644 --- a/node/nodecfg/defaults.go +++ b/node/nodecfg/defaults.go @@ -48,7 +48,7 @@ var DefaultConfig = Config{ WSModules: []string{"net", "web3"}, P2P: p2p.Config{ ListenAddr: ":30303", - ProtocolVersion: []uint{direct.ETH68, direct.ETH69}, // Keep eth/68 in first index for Hive tests + ProtocolVersion: []uint{direct.ETH69, direct.ETH68}, MaxPeers: 32, MaxPendingPeers: 1000, NAT: nat.Any(), diff --git a/p2p/protocols/eth/protocol.go b/p2p/protocols/eth/protocol.go index f3ea9c6e66a..67a0dbd2689 100644 --- a/p2p/protocols/eth/protocol.go +++ b/p2p/protocols/eth/protocol.go @@ -181,6 +181,7 @@ type StatusPacket struct { type StatusPacket69 struct { ProtocolVersion uint32 NetworkID uint64 + TD *big.Int Genesis common.Hash ForkID forkid.ID MinimumBlock, LatestBlock uint64 diff --git a/p2p/sentry/eth_handshake.go b/p2p/sentry/eth_handshake.go index a5c3f0aa7e0..1ca17decc59 100644 --- a/p2p/sentry/eth_handshake.go +++ b/p2p/sentry/eth_handshake.go @@ -172,10 +172,12 @@ func encodeStatusPacket(status *sentryproto.StatusData, version uint) eth.Status } func encodeStatusPacket69(status *sentryproto.StatusData, version uint) eth.StatusPacket69 { + ourTD := gointerfaces.ConvertH256ToUint256Int(status.TotalDifficulty) genesisHash := gointerfaces.ConvertH256ToHash(status.ForkData.Genesis) return eth.StatusPacket69{ ProtocolVersion: uint32(version), NetworkID: status.NetworkId, + TD: ourTD.ToBig(), Genesis: genesisHash, ForkID: forkid.NewIDFromForks(status.ForkData.HeightForks, status.ForkData.TimeForks, genesisHash, status.MaxBlockHeight, status.MaxBlockTime), MinimumBlock: status.MinimumBlockHeight, diff --git a/p2p/sentry/sentry_multi_client/sentry_multi_client.go b/p2p/sentry/sentry_multi_client/sentry_multi_client.go index 5804be8994d..0413d5f5810 100644 --- a/p2p/sentry/sentry_multi_client/sentry_multi_client.go +++ b/p2p/sentry/sentry_multi_client/sentry_multi_client.go @@ -294,6 +294,12 @@ type MultiClient struct { var _ eth.ReceiptsGetter = new(receipts.Generator) // compile-time interface-check +// bridgeReader interface for reading bridge events (state sync). +type bridgeReader interface { + Events(ctx context.Context, blockHash common.Hash, blockNum uint64) ([]*types.Message, error) + EventTxnLookup(ctx context.Context, borTxHash common.Hash) (uint64, bool, error) +} + func NewMultiClient( db kv.TemporalRoDB, chainConfig *chain.Config, @@ -307,6 +313,7 @@ func NewMultiClient( maxBlockBroadcastPeers func(*types.Header) uint, disableBlockDownload bool, enableWitProtocol bool, + bridgeReader bridgeReader, logger log.Logger, ) (*MultiClient, error) { // header downloader @@ -348,6 +355,13 @@ func NewMultiClient( witnessBuffer = stagedsync.NewWitnessBuffer() } + // Create BorGenerator for state sync receipts if bridgeReader is provided. + // This is required for Erigon to include state sync transaction receipts in P2P GetReceipts responses. + var borGenerator *receipts.BorGenerator + if bridgeReader != nil { + borGenerator = receipts.NewBorGenerator(blockReader, engine, bridgeReader) + } + cs := &MultiClient{ Hd: hd, Bd: bd, @@ -364,7 +378,7 @@ func NewMultiClient( disableBlockDownload: disableBlockDownload, logger: logger, getReceiptsActiveGoroutineNumber: semaphore.NewWeighted(1), - ethApiWrapper: receipts.NewGenerator(blockReader, engine, 5*time.Minute), + ethApiWrapper: receipts.NewGenerator(blockReader, engine, 5*time.Minute, borGenerator), } return cs, nil diff --git a/rpc/jsonrpc/eth_api.go b/rpc/jsonrpc/eth_api.go index 035df699d3e..430987b2b7c 100644 --- a/rpc/jsonrpc/eth_api.go +++ b/rpc/jsonrpc/eth_api.go @@ -157,6 +157,8 @@ func NewBaseApi(f *rpchelper.Filters, stateCache kvcache.Cache, blockReader serv panic(err) } + borReceiptGenerator := receipts.NewBorGenerator(blockReader, engine, bridgeReader) + return &BaseAPI{ filters: f, stateCache: stateCache, @@ -166,8 +168,8 @@ func NewBaseApi(f *rpchelper.Filters, stateCache kvcache.Cache, blockReader serv _txNumReader: blockReader.TxnumReader(context.Background()), evmCallTimeout: evmCallTimeout, _engine: engine, - receiptsGenerator: receipts.NewGenerator(blockReader, engine, evmCallTimeout), - borReceiptGenerator: receipts.NewBorGenerator(blockReader, engine), + receiptsGenerator: receipts.NewGenerator(blockReader, engine, evmCallTimeout, borReceiptGenerator), + borReceiptGenerator: borReceiptGenerator, dirs: dirs, bridgeReader: bridgeReader, } diff --git a/rpc/jsonrpc/eth_receipts.go b/rpc/jsonrpc/eth_receipts.go index 1241b601fd1..3ca248f01f0 100644 --- a/rpc/jsonrpc/eth_receipts.go +++ b/rpc/jsonrpc/eth_receipts.go @@ -553,18 +553,25 @@ func (api *APIImpl) GetBlockReceipts(ctx context.Context, numberOrHash rpc.Block if err != nil { return nil, fmt.Errorf("getReceipts error: %w", err) } - result := make([]map[string]interface{}, 0, len(receipts)) - for _, receipt := range receipts { - txn := block.Transactions()[receipt.TransactionIndex] - result = append(result, ethutils.MarshalReceipt(receipt, txn, chainConfig, block.HeaderNoCopy(), txn.Hash(), true, true)) - } - if chainConfig.Bor == nil { - return result, nil + numReceipts := len(receipts) + numTxs := len(block.Transactions()) + result := make([]map[string]interface{}, 0, numReceipts) + + for _, receipt := range receipts { + // State-sync receipts' TransactionIndex is equal to numTxs. + if int(receipt.TransactionIndex) == numTxs { + // This is a state-sync transaction receipt. + result = append(result, ethutils.MarshalReceipt(receipt, bortypes.NewBorTransaction(), chainConfig, block.HeaderNoCopy(), receipt.TxHash, false, true)) + } else { + // This is a normal transaction receipt. + txn := block.Transactions()[receipt.TransactionIndex] + result = append(result, ethutils.MarshalReceipt(receipt, txn, chainConfig, block.HeaderNoCopy(), txn.Hash(), true, true)) + } } var borTx types.Transaction = bortypes.NewBorTransaction() - if chainConfig.Bor.IsMadhugiri(blockNum) && len(receipts)+1 == len(block.Transactions()) { + if chainConfig.Bor != nil && chainConfig.Bor.IsMadhugiri(blockNum) && len(receipts)+1 == len(block.Transactions()) { borTx = block.Transactions()[len(block.Transactions())-1] if borTx.Type() != types.StateSyncTxType { return result, nil diff --git a/rpc/jsonrpc/receipts/bor_receipts_generator.go b/rpc/jsonrpc/receipts/bor_receipts_generator.go index 150d7eebec7..c7a8688d27d 100644 --- a/rpc/jsonrpc/receipts/bor_receipts_generator.go +++ b/rpc/jsonrpc/receipts/bor_receipts_generator.go @@ -22,14 +22,19 @@ import ( "github.com/erigontech/erigon/turbo/transactions" ) +type bridgeReader interface { + Events(ctx context.Context, blockHash common.Hash, blockNum uint64) ([]*types.Message, error) + EventTxnLookup(ctx context.Context, borTxHash common.Hash) (uint64, bool, error) +} + type BorGenerator struct { receiptCache *lru.Cache[common.Hash, *types.Receipt] blockReader services.FullBlockReader engine consensus.EngineReader + bridgeReader bridgeReader } -func NewBorGenerator(blockReader services.FullBlockReader, - engine consensus.EngineReader) *BorGenerator { +func NewBorGenerator(blockReader services.FullBlockReader, engine consensus.EngineReader, bridgeReader bridgeReader) *BorGenerator { receiptCache, err := lru.New[common.Hash, *types.Receipt](receiptsCacheLimit) if err != nil { panic(err) @@ -39,6 +44,7 @@ func NewBorGenerator(blockReader services.FullBlockReader, receiptCache: receiptCache, blockReader: blockReader, engine: engine, + bridgeReader: bridgeReader, } } @@ -83,7 +89,7 @@ func (g *BorGenerator) GenerateBorReceipt(ctx context.Context, tx kv.TemporalTx, txHash = bortypes.ComputeBorTxHash(block.NumberU64(), block.Hash()) } - receipt, err := applyBorTransaction(msgs, evm, gp, ibs, block.Number(), block.Hash(), txHash, uint(txIndex), cumGasUsedInLastBlock, uint(logIdxAfterTx), rawtemporaldb.ReceiptStoresFirstLogIdx(tx)) + receipt, err := applyBorTransaction(chainConfig, msgs, evm, gp, ibs, block.Number(), block.Hash(), txHash, uint(txIndex), cumGasUsedInLastBlock, uint(logIdxAfterTx), rawtemporaldb.ReceiptStoresFirstLogIdx(tx)) if err != nil { return nil, err } @@ -143,14 +149,22 @@ func getBorLogs(msgs []*types.Message, evm *vm.EVM, gp *core.GasPool, ibs *state return receiptLogs, nil } -func applyBorTransaction(msgs []*types.Message, evm *vm.EVM, gp *core.GasPool, ibs *state.IntraBlockState, blockNumber *big.Int, blockHash common.Hash, txHash common.Hash, txIndex uint, cumulativeGasUsed uint64, logIdxAfterTx uint, receiptWithFirstLogIdx bool) (*types.Receipt, error) { +func applyBorTransaction(chainConfig *chain.Config, msgs []*types.Message, evm *vm.EVM, gp *core.GasPool, ibs *state.IntraBlockState, blockNumber *big.Int, blockHash common.Hash, txHash common.Hash, txIndex uint, cumulativeGasUsed uint64, logIdxAfterTx uint, receiptWithFirstLogIdx bool) (*types.Receipt, error) { receiptLogs, err := getBorLogs(msgs, evm, gp, ibs, blockNumber.Uint64(), blockHash, txHash, txIndex, logIdxAfterTx, receiptWithFirstLogIdx) if err != nil { return nil, err } + var receiptType uint8 + if chainConfig.Bor.IsMadhugiri(blockNumber.Uint64()) { + receiptType = types.StateSyncTxType + } else { + receiptType = types.LegacyTxType + } + + // Default to legacy type for pre-Madhugiri hardfork behavior; callers may override for post-Madhugiri hardfork. receipt := types.Receipt{ - Type: 0, + Type: receiptType, CumulativeGasUsed: cumulativeGasUsed, TxHash: txHash, GasUsed: 0, diff --git a/rpc/jsonrpc/receipts/handler_test.go b/rpc/jsonrpc/receipts/handler_test.go index 962fbb98053..6951b3751c3 100644 --- a/rpc/jsonrpc/receipts/handler_test.go +++ b/rpc/jsonrpc/receipts/handler_test.go @@ -29,10 +29,14 @@ import ( "github.com/stretchr/testify/require" "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/common/empty" "github.com/erigontech/erigon-lib/crypto" "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" + "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/core" + "github.com/erigontech/erigon/db/kv" "github.com/erigontech/erigon/db/rawdb" + "github.com/erigontech/erigon/db/state" "github.com/erigontech/erigon/execution/chain" "github.com/erigontech/erigon/execution/chain/params" "github.com/erigontech/erigon/execution/rlp" @@ -298,7 +302,7 @@ func TestGetBlockReceipts(t *testing.T) { } // Assemble the test environment m := mockWithGenerator(t, 4, generator) - receiptsGetter := receipts.NewGenerator(m.BlockReader, m.Engine, time.Minute) + receiptsGetter := receipts.NewGenerator(m.BlockReader, m.Engine, time.Minute, nil) // Collect the hashes to request, and the response to expect var ( hashes []common.Hash @@ -342,7 +346,231 @@ func TestGetBlockReceipts(t *testing.T) { require.Equal(t, expect, sent.Data) } -// newTestBackend creates a chain with a number of explicitly defined blocks and +func TestReadStateSyncReceiptByHash_Found(t *testing.T) { + m := mock.Mock(t) + // Enable receipt cache domain + m.DB.(state.HasAgg).Agg().(*state.Aggregator).EnableDomain(kv.RCacheDomain) + + txRw, err := m.DB.BeginTemporalRw(m.Ctx) + require.NoError(t, err) + defer txRw.Rollback() + ctx := m.Ctx + br := m.BlockReader + txNumReader := br.TxnumReader(ctx) + + // Create a fake (legacy) transaction + tx := types.NewTransaction( + 0, + common.HexToAddress("0x01"), + uint256.NewInt(0), + 21000, + uint256.NewInt(1), + nil, + ) + body := &types.Body{Transactions: types.Transactions{tx}} + header := &types.Header{ + Number: big.NewInt(1), + TxHash: types.DeriveSha(types.Transactions{tx}), + UncleHash: empty.UncleHash, + ReceiptHash: empty.RootHash, + } + + // Write header and body to DB + require.NoError(t, rawdb.WriteCanonicalHash(txRw, header.Hash(), 1)) + require.NoError(t, rawdb.WriteHeader(txRw, header)) + require.NoError(t, rawdb.WriteBody(txRw, header.Hash(), 1, body)) + + // Create a state-sync receipt (Type == 0 as legacy, CumulativeGasUsed == 0) + ssr := &types.Receipt{ + Type: 0, + CumulativeGasUsed: 0, + TxHash: tx.Hash(), + BlockHash: header.Hash(), + BlockNumber: header.Number, + TransactionIndex: 0, + Status: types.ReceiptStatusSuccessful, + } + ssr.Bloom = types.CreateBloom(types.Receipts{ssr}) + + // Insert into the receipt cache + sd, err := state.NewSharedDomains(txRw, log.New()) + require.NoError(t, err) + defer sd.Close() + + base, err := txNumReader.Min(txRw, 1) + require.NoError(t, err) + + require.NoError(t, rawdb.WriteReceiptCacheV2(sd.AsPutDel(txRw), nil, base)) + require.NoError(t, rawdb.WriteReceiptCacheV2(sd.AsPutDel(txRw), ssr, base+1)) + require.NoError(t, rawdb.WriteReceiptCacheV2(sd.AsPutDel(txRw), nil, base+2)) + + _, err = sd.ComputeCommitment(ctx, true, header.Number.Uint64(), base+2, "flush") + require.NoError(t, err) + require.NoError(t, sd.Flush(ctx, txRw)) + + // Build the block + b := types.NewBlockFromStorage(header.Hash(), header, body.Transactions, body.Uncles, nil) + require.NotNil(t, b) + + // Assert state-sync receipt is found + got, err := rawdb.ReadReceiptsCacheV2(txRw, b, txNumReader) + require.NoError(t, err) + require.NotNil(t, got) + require.Equal(t, got.Len(), 1) + + r := got[0] + + require.Equal(t, tx.Hash(), r.TxHash) + require.Equal(t, header.Hash(), r.BlockHash) + require.Equal(t, header.Number.Uint64(), r.BlockNumber.Uint64()) +} + +func TestReadStateSyncReceiptByHash_NoStateSync(t *testing.T) { + m := mock.Mock(t) + m.DB.(state.HasAgg).Agg().(*state.Aggregator).EnableDomain(kv.RCacheDomain) + + txRw, err := m.DB.BeginTemporalRw(m.Ctx) + require.NoError(t, err) + defer txRw.Rollback() + + ctx := m.Ctx + br := m.BlockReader + txNumReader := br.TxnumReader(ctx) + + // Single transaction block + tx := types.NewTransaction(0, common.HexToAddress("0x02"), uint256.NewInt(0), 21000, uint256.NewInt(1), nil) + body := &types.Body{Transactions: types.Transactions{tx}} + + header := &types.Header{ + Number: big.NewInt(2), + TxHash: types.DeriveSha(types.Transactions(body.Transactions)), + UncleHash: empty.UncleHash, + ReceiptHash: empty.RootHash, + } + + require.NoError(t, rawdb.WriteCanonicalHash(txRw, header.Hash(), 2)) + require.NoError(t, rawdb.WriteHeader(txRw, header)) + require.NoError(t, rawdb.WriteBody(txRw, header.Hash(), 2, body)) + + // No state-sync receipt (CumulativeGasUsed != 0) + norm := &types.Receipt{ + Type: 0, + CumulativeGasUsed: 21000, + TxHash: tx.Hash(), + BlockHash: header.Hash(), + BlockNumber: header.Number, + TransactionIndex: 0, + Status: types.ReceiptStatusSuccessful, + } + norm.Bloom = types.CreateBloom(types.Receipts{norm}) + + sd, err := state.NewSharedDomains(txRw, log.New()) + require.NoError(t, err) + defer sd.Close() + + base, err := txNumReader.Min(txRw, 1) + require.NoError(t, err) + + require.NoError(t, rawdb.WriteReceiptCacheV2(sd.AsPutDel(txRw), norm, base)) + require.NoError(t, sd.Flush(ctx, txRw)) + + b := types.NewBlockFromStorage(header.Hash(), header, body.Transactions, body.Uncles, nil) + require.NotNil(t, b) + + // Expect nil (no state-sync) + got, err := rawdb.ReadReceiptsCacheV2(txRw, b, txNumReader) + require.NoError(t, err) + require.Equal(t, got.Len(), 0) +} + +func TestReadStateSyncReceiptByHash_EqualGasUsedStateSync(t *testing.T) { + m := mock.Mock(t) + m.DB.(state.HasAgg).Agg().(*state.Aggregator).EnableDomain(kv.RCacheDomain) + + txRw, err := m.DB.BeginTemporalRw(m.Ctx) + require.NoError(t, err) + defer txRw.Rollback() + + ctx := m.Ctx + br := m.BlockReader + txNumReader := br.TxnumReader(ctx) + + // Create two transactions: one "normal" and one state-sync + tx1 := types.NewTransaction(0, common.HexToAddress("0x10"), uint256.NewInt(0), 21000, uint256.NewInt(1), nil) + tx2 := types.NewTransaction(1, common.HexToAddress("0x11"), uint256.NewInt(0), 21000, uint256.NewInt(1), nil) + body := &types.Body{Transactions: types.Transactions{tx1, tx2}} + + header := &types.Header{ + Number: big.NewInt(3), + TxHash: types.DeriveSha(types.Transactions(body.Transactions)), + UncleHash: empty.UncleHash, + ReceiptHash: empty.RootHash, + } + + require.NoError(t, rawdb.WriteCanonicalHash(txRw, header.Hash(), 3)) + require.NoError(t, rawdb.WriteHeader(txRw, header)) + require.NoError(t, rawdb.WriteBody(txRw, header.Hash(), 3, body)) + + // Receipt 1: normal tx + r1 := &types.Receipt{ + Type: 0, + CumulativeGasUsed: 21000, + TxHash: tx1.Hash(), + BlockHash: header.Hash(), + BlockNumber: header.Number, + TransactionIndex: 0, + Status: types.ReceiptStatusSuccessful, + } + // Receipt 2: equal cumulative gas used (state-sync) + r2 := &types.Receipt{ + Type: 0, + CumulativeGasUsed: 21000, // same as r1 + TxHash: tx2.Hash(), + BlockHash: header.Hash(), + BlockNumber: header.Number, + TransactionIndex: 1, + Status: types.ReceiptStatusSuccessful, + } + r1.Bloom = types.CreateBloom(types.Receipts{r1}) + r2.Bloom = types.CreateBloom(types.Receipts{r2}) + + // Insert into receipts cache domain + sd, err := state.NewSharedDomains(txRw, log.New()) + require.NoError(t, err) + defer sd.Close() + + base, err := txNumReader.Min(txRw, 1) + require.NoError(t, err) + + // Write receipts + require.NoError(t, rawdb.WriteReceiptCacheV2(sd.AsPutDel(txRw), r1, base)) + require.NoError(t, rawdb.WriteReceiptCacheV2(sd.AsPutDel(txRw), r2, base+1)) + require.NoError(t, rawdb.WriteReceiptCacheV2(sd.AsPutDel(txRw), nil, base+2)) + + // Compute commitment with base+2 to include both receipts (base and base+1) + _, err = sd.ComputeCommitment(ctx, true, header.Number.Uint64(), base+2, "flush") + require.NoError(t, err) + require.NoError(t, sd.Flush(ctx, txRw)) + + // Build the block + b := types.NewBlockFromStorage(header.Hash(), header, body.Transactions, body.Uncles, nil) + require.NotNil(t, b) + + // Expect ssTx found: receipt r2 + got, err := rawdb.ReadReceiptsCacheV2(txRw, b, txNumReader) + require.NoError(t, err) + require.NotNil(t, got) + require.Equal(t, got.Len(), 2) + + r := got[1] + + require.Equal(t, tx2.Hash(), r.TxHash) + require.Equal(t, r2.CumulativeGasUsed, r.CumulativeGasUsed) + require.Equal(t, header.Hash(), r.BlockHash) + require.Equal(t, header.Number.Uint64(), r.BlockNumber.Uint64()) +} + +// mockWithGenerator creates a chain with a number of explicitly defined blocks and // wraps it into a mock backend. func mockWithGenerator(t *testing.T, blocks int, generator func(int, *core.BlockGen)) *mock.MockSentry { m := mock.MockWithGenesis(t, &types.Genesis{ diff --git a/rpc/jsonrpc/receipts/receipts_generator.go b/rpc/jsonrpc/receipts/receipts_generator.go index dfa12091e2e..e421613fa91 100644 --- a/rpc/jsonrpc/receipts/receipts_generator.go +++ b/rpc/jsonrpc/receipts/receipts_generator.go @@ -46,6 +46,8 @@ type Generator struct { blockReader services.FullBlockReader txNumReader rawdbv3.TxNumsReader engine consensus.EngineReader + + borGenerator *BorGenerator // to handle state-sync receipts only if needed } type ReceiptEnv struct { @@ -63,7 +65,7 @@ var ( receiptsCacheTrace = dbg.EnvBool("R_LRU_TRACE", false) ) -func NewGenerator(blockReader services.FullBlockReader, engine consensus.EngineReader, evmTimeout time.Duration) *Generator { +func NewGenerator(blockReader services.FullBlockReader, engine consensus.EngineReader, evmTimeout time.Duration, borGenerator *BorGenerator) *Generator { receiptsCache, err := lru.New[common.Hash, types.Receipts](receiptsCacheLimit) //TODO: is handling both of them a good idea though...? if err != nil { panic(err) @@ -88,6 +90,8 @@ func NewGenerator(blockReader services.FullBlockReader, engine consensus.EngineR blockExecMutex: &loaderMutex[common.Hash]{}, txnExecMutex: &loaderMutex[common.Hash]{}, + + borGenerator: borGenerator, } } @@ -282,10 +286,10 @@ func (g *Generator) GetReceipt(ctx context.Context, cfg *chain.Config, tx kv.Tem return receipt, nil } +// GetReceipts regenerates or loads receipts for a given block (including state-sync synthetic receipt if needed). func (g *Generator) GetReceipts(ctx context.Context, cfg *chain.Config, tx kv.TemporalTx, block *types.Block) (types.Receipts, error) { blockHash := block.Hash() - //if can find in DB - then don't need store in `receiptsCache` - because DB it's already kind-of cache (small, mmaped, hot file) var receiptsFromDB types.Receipts var txCount = len(block.Transactions()) if txCount > 0 { @@ -302,10 +306,11 @@ func (g *Generator) GetReceipts(ctx context.Context, cfg *chain.Config, tx kv.Te } }() - mu := g.blockExecMutex.lock(blockHash) // parallel requests of same blockNum will executed only once + mu := g.blockExecMutex.lock(blockHash) // parallel requests of the same blockNum will be executed only once defer g.blockExecMutex.unlock(mu, blockHash) - if receipts, ok := g.receiptsCache.Get(blockHash); ok { - return receipts, nil + + if cachedReceipts, ok := g.receiptsCache.Get(blockHash); ok { + return cachedReceipts, nil } if !rpcDisableRCache { @@ -334,6 +339,7 @@ func (g *Generator) GetReceipts(ctx context.Context, cfg *chain.Config, tx kv.Te ctx, cancel := context.WithTimeout(ctx, g.evmTimeout) defer cancel() + // Handle normal transactions for i, txn := range block.Transactions() { select { case <-ctx.Done(): @@ -373,6 +379,22 @@ func (g *Generator) GetReceipts(ctx context.Context, cfg *chain.Config, tx kv.Te } } + // PIP-74: state-sync receipt handling. + if g.borGenerator != nil && cfg.Bor != nil { + // Extract state-sync events from block. + events, err := g.extractBorEvents(ctx, block) + if err != nil { + return nil, fmt.Errorf("ReceiptGen.GetReceipts: failed to extract bor events for block %d: %w", block.NumberU64(), err) + } + if len(events) > 0 { + borReceipt, err := g.borGenerator.GenerateBorReceipt(ctx, tx, block, events, cfg) + if err != nil { + return nil, fmt.Errorf("ReceiptGen.GetReceipts: failed to generate bor receipt for block %d: %w", block.NumberU64(), err) + } + receipts = append(receipts, borReceipt) + } + } + g.addToCacheReceipts(block.HeaderNoCopy(), receipts) return receipts, nil } @@ -442,6 +464,14 @@ func (g *Generator) GetReceiptsGasUsed(tx kv.TemporalTx, block *types.Block, txN return receipts, nil } +func (g *Generator) extractBorEvents(ctx context.Context, block *types.Block) ([]*types.Message, error) { + events, err := g.borGenerator.bridgeReader.Events(ctx, block.Hash(), block.NumberU64()) + if err != nil { + return nil, err + } + return events, nil +} + type loaderMutex[K comparable] struct { sync.Map }